dma.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658
  1. /*
  2. Broadcom B43 wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "b43.h"
  22. #include "dma.h"
  23. #include "main.h"
  24. #include "debugfs.h"
  25. #include "xmit.h"
  26. #include <linux/dma-mapping.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/etherdevice.h>
  31. #include <asm/div64.h>
  32. /* Required number of TX DMA slots per TX frame.
  33. * This currently is 2, because we put the header and the ieee80211 frame
  34. * into separate slots. */
  35. #define TX_SLOTS_PER_FRAME 2
  36. /* 32bit DMA ops. */
  37. static
  38. struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
  39. int slot,
  40. struct b43_dmadesc_meta **meta)
  41. {
  42. struct b43_dmadesc32 *desc;
  43. *meta = &(ring->meta[slot]);
  44. desc = ring->descbase;
  45. desc = &(desc[slot]);
  46. return (struct b43_dmadesc_generic *)desc;
  47. }
  48. static void op32_fill_descriptor(struct b43_dmaring *ring,
  49. struct b43_dmadesc_generic *desc,
  50. dma_addr_t dmaaddr, u16 bufsize,
  51. int start, int end, int irq)
  52. {
  53. struct b43_dmadesc32 *descbase = ring->descbase;
  54. int slot;
  55. u32 ctl;
  56. u32 addr;
  57. u32 addrext;
  58. slot = (int)(&(desc->dma32) - descbase);
  59. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  60. addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  61. addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
  62. >> SSB_DMA_TRANSLATION_SHIFT;
  63. addr |= ssb_dma_translation(ring->dev->dev);
  64. ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
  65. if (slot == ring->nr_slots - 1)
  66. ctl |= B43_DMA32_DCTL_DTABLEEND;
  67. if (start)
  68. ctl |= B43_DMA32_DCTL_FRAMESTART;
  69. if (end)
  70. ctl |= B43_DMA32_DCTL_FRAMEEND;
  71. if (irq)
  72. ctl |= B43_DMA32_DCTL_IRQ;
  73. ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
  74. & B43_DMA32_DCTL_ADDREXT_MASK;
  75. desc->dma32.control = cpu_to_le32(ctl);
  76. desc->dma32.address = cpu_to_le32(addr);
  77. }
  78. static void op32_poke_tx(struct b43_dmaring *ring, int slot)
  79. {
  80. b43_dma_write(ring, B43_DMA32_TXINDEX,
  81. (u32) (slot * sizeof(struct b43_dmadesc32)));
  82. }
  83. static void op32_tx_suspend(struct b43_dmaring *ring)
  84. {
  85. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  86. | B43_DMA32_TXSUSPEND);
  87. }
  88. static void op32_tx_resume(struct b43_dmaring *ring)
  89. {
  90. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  91. & ~B43_DMA32_TXSUSPEND);
  92. }
  93. static int op32_get_current_rxslot(struct b43_dmaring *ring)
  94. {
  95. u32 val;
  96. val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
  97. val &= B43_DMA32_RXDPTR;
  98. return (val / sizeof(struct b43_dmadesc32));
  99. }
  100. static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
  101. {
  102. b43_dma_write(ring, B43_DMA32_RXINDEX,
  103. (u32) (slot * sizeof(struct b43_dmadesc32)));
  104. }
  105. static const struct b43_dma_ops dma32_ops = {
  106. .idx2desc = op32_idx2desc,
  107. .fill_descriptor = op32_fill_descriptor,
  108. .poke_tx = op32_poke_tx,
  109. .tx_suspend = op32_tx_suspend,
  110. .tx_resume = op32_tx_resume,
  111. .get_current_rxslot = op32_get_current_rxslot,
  112. .set_current_rxslot = op32_set_current_rxslot,
  113. };
  114. /* 64bit DMA ops. */
  115. static
  116. struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
  117. int slot,
  118. struct b43_dmadesc_meta **meta)
  119. {
  120. struct b43_dmadesc64 *desc;
  121. *meta = &(ring->meta[slot]);
  122. desc = ring->descbase;
  123. desc = &(desc[slot]);
  124. return (struct b43_dmadesc_generic *)desc;
  125. }
  126. static void op64_fill_descriptor(struct b43_dmaring *ring,
  127. struct b43_dmadesc_generic *desc,
  128. dma_addr_t dmaaddr, u16 bufsize,
  129. int start, int end, int irq)
  130. {
  131. struct b43_dmadesc64 *descbase = ring->descbase;
  132. int slot;
  133. u32 ctl0 = 0, ctl1 = 0;
  134. u32 addrlo, addrhi;
  135. u32 addrext;
  136. slot = (int)(&(desc->dma64) - descbase);
  137. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  138. addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
  139. addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
  140. addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
  141. >> SSB_DMA_TRANSLATION_SHIFT;
  142. addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
  143. if (slot == ring->nr_slots - 1)
  144. ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
  145. if (start)
  146. ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
  147. if (end)
  148. ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
  149. if (irq)
  150. ctl0 |= B43_DMA64_DCTL0_IRQ;
  151. ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
  152. ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
  153. & B43_DMA64_DCTL1_ADDREXT_MASK;
  154. desc->dma64.control0 = cpu_to_le32(ctl0);
  155. desc->dma64.control1 = cpu_to_le32(ctl1);
  156. desc->dma64.address_low = cpu_to_le32(addrlo);
  157. desc->dma64.address_high = cpu_to_le32(addrhi);
  158. }
  159. static void op64_poke_tx(struct b43_dmaring *ring, int slot)
  160. {
  161. b43_dma_write(ring, B43_DMA64_TXINDEX,
  162. (u32) (slot * sizeof(struct b43_dmadesc64)));
  163. }
  164. static void op64_tx_suspend(struct b43_dmaring *ring)
  165. {
  166. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  167. | B43_DMA64_TXSUSPEND);
  168. }
  169. static void op64_tx_resume(struct b43_dmaring *ring)
  170. {
  171. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  172. & ~B43_DMA64_TXSUSPEND);
  173. }
  174. static int op64_get_current_rxslot(struct b43_dmaring *ring)
  175. {
  176. u32 val;
  177. val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
  178. val &= B43_DMA64_RXSTATDPTR;
  179. return (val / sizeof(struct b43_dmadesc64));
  180. }
  181. static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
  182. {
  183. b43_dma_write(ring, B43_DMA64_RXINDEX,
  184. (u32) (slot * sizeof(struct b43_dmadesc64)));
  185. }
  186. static const struct b43_dma_ops dma64_ops = {
  187. .idx2desc = op64_idx2desc,
  188. .fill_descriptor = op64_fill_descriptor,
  189. .poke_tx = op64_poke_tx,
  190. .tx_suspend = op64_tx_suspend,
  191. .tx_resume = op64_tx_resume,
  192. .get_current_rxslot = op64_get_current_rxslot,
  193. .set_current_rxslot = op64_set_current_rxslot,
  194. };
  195. static inline int free_slots(struct b43_dmaring *ring)
  196. {
  197. return (ring->nr_slots - ring->used_slots);
  198. }
  199. static inline int next_slot(struct b43_dmaring *ring, int slot)
  200. {
  201. B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
  202. if (slot == ring->nr_slots - 1)
  203. return 0;
  204. return slot + 1;
  205. }
  206. static inline int prev_slot(struct b43_dmaring *ring, int slot)
  207. {
  208. B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
  209. if (slot == 0)
  210. return ring->nr_slots - 1;
  211. return slot - 1;
  212. }
  213. #ifdef CONFIG_B43_DEBUG
  214. static void update_max_used_slots(struct b43_dmaring *ring,
  215. int current_used_slots)
  216. {
  217. if (current_used_slots <= ring->max_used_slots)
  218. return;
  219. ring->max_used_slots = current_used_slots;
  220. if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
  221. b43dbg(ring->dev->wl,
  222. "max_used_slots increased to %d on %s ring %d\n",
  223. ring->max_used_slots,
  224. ring->tx ? "TX" : "RX", ring->index);
  225. }
  226. }
  227. #else
  228. static inline
  229. void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
  230. {
  231. }
  232. #endif /* DEBUG */
  233. /* Request a slot for usage. */
  234. static inline int request_slot(struct b43_dmaring *ring)
  235. {
  236. int slot;
  237. B43_WARN_ON(!ring->tx);
  238. B43_WARN_ON(ring->stopped);
  239. B43_WARN_ON(free_slots(ring) == 0);
  240. slot = next_slot(ring, ring->current_slot);
  241. ring->current_slot = slot;
  242. ring->used_slots++;
  243. update_max_used_slots(ring, ring->used_slots);
  244. return slot;
  245. }
  246. static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
  247. {
  248. static const u16 map64[] = {
  249. B43_MMIO_DMA64_BASE0,
  250. B43_MMIO_DMA64_BASE1,
  251. B43_MMIO_DMA64_BASE2,
  252. B43_MMIO_DMA64_BASE3,
  253. B43_MMIO_DMA64_BASE4,
  254. B43_MMIO_DMA64_BASE5,
  255. };
  256. static const u16 map32[] = {
  257. B43_MMIO_DMA32_BASE0,
  258. B43_MMIO_DMA32_BASE1,
  259. B43_MMIO_DMA32_BASE2,
  260. B43_MMIO_DMA32_BASE3,
  261. B43_MMIO_DMA32_BASE4,
  262. B43_MMIO_DMA32_BASE5,
  263. };
  264. if (type == B43_DMA_64BIT) {
  265. B43_WARN_ON(!(controller_idx >= 0 &&
  266. controller_idx < ARRAY_SIZE(map64)));
  267. return map64[controller_idx];
  268. }
  269. B43_WARN_ON(!(controller_idx >= 0 &&
  270. controller_idx < ARRAY_SIZE(map32)));
  271. return map32[controller_idx];
  272. }
  273. static inline
  274. dma_addr_t map_descbuffer(struct b43_dmaring *ring,
  275. unsigned char *buf, size_t len, int tx)
  276. {
  277. dma_addr_t dmaaddr;
  278. if (tx) {
  279. dmaaddr = ssb_dma_map_single(ring->dev->dev,
  280. buf, len, DMA_TO_DEVICE);
  281. } else {
  282. dmaaddr = ssb_dma_map_single(ring->dev->dev,
  283. buf, len, DMA_FROM_DEVICE);
  284. }
  285. return dmaaddr;
  286. }
  287. static inline
  288. void unmap_descbuffer(struct b43_dmaring *ring,
  289. dma_addr_t addr, size_t len, int tx)
  290. {
  291. if (tx) {
  292. ssb_dma_unmap_single(ring->dev->dev,
  293. addr, len, DMA_TO_DEVICE);
  294. } else {
  295. ssb_dma_unmap_single(ring->dev->dev,
  296. addr, len, DMA_FROM_DEVICE);
  297. }
  298. }
  299. static inline
  300. void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
  301. dma_addr_t addr, size_t len)
  302. {
  303. B43_WARN_ON(ring->tx);
  304. ssb_dma_sync_single_for_cpu(ring->dev->dev,
  305. addr, len, DMA_FROM_DEVICE);
  306. }
  307. static inline
  308. void sync_descbuffer_for_device(struct b43_dmaring *ring,
  309. dma_addr_t addr, size_t len)
  310. {
  311. B43_WARN_ON(ring->tx);
  312. ssb_dma_sync_single_for_device(ring->dev->dev,
  313. addr, len, DMA_FROM_DEVICE);
  314. }
  315. static inline
  316. void free_descriptor_buffer(struct b43_dmaring *ring,
  317. struct b43_dmadesc_meta *meta)
  318. {
  319. if (meta->skb) {
  320. dev_kfree_skb_any(meta->skb);
  321. meta->skb = NULL;
  322. }
  323. }
  324. static int alloc_ringmemory(struct b43_dmaring *ring)
  325. {
  326. gfp_t flags = GFP_KERNEL;
  327. /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
  328. * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
  329. * has shown that 4K is sufficient for the latter as long as the buffer
  330. * does not cross an 8K boundary.
  331. *
  332. * For unknown reasons - possibly a hardware error - the BCM4311 rev
  333. * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
  334. * which accounts for the GFP_DMA flag below.
  335. *
  336. * The flags here must match the flags in free_ringmemory below!
  337. */
  338. if (ring->type == B43_DMA_64BIT)
  339. flags |= GFP_DMA;
  340. ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
  341. B43_DMA_RINGMEMSIZE,
  342. &(ring->dmabase), flags);
  343. if (!ring->descbase) {
  344. b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
  345. return -ENOMEM;
  346. }
  347. memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
  348. return 0;
  349. }
  350. static void free_ringmemory(struct b43_dmaring *ring)
  351. {
  352. gfp_t flags = GFP_KERNEL;
  353. if (ring->type == B43_DMA_64BIT)
  354. flags |= GFP_DMA;
  355. ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
  356. ring->descbase, ring->dmabase, flags);
  357. }
  358. /* Reset the RX DMA channel */
  359. static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
  360. enum b43_dmatype type)
  361. {
  362. int i;
  363. u32 value;
  364. u16 offset;
  365. might_sleep();
  366. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
  367. b43_write32(dev, mmio_base + offset, 0);
  368. for (i = 0; i < 10; i++) {
  369. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
  370. B43_DMA32_RXSTATUS;
  371. value = b43_read32(dev, mmio_base + offset);
  372. if (type == B43_DMA_64BIT) {
  373. value &= B43_DMA64_RXSTAT;
  374. if (value == B43_DMA64_RXSTAT_DISABLED) {
  375. i = -1;
  376. break;
  377. }
  378. } else {
  379. value &= B43_DMA32_RXSTATE;
  380. if (value == B43_DMA32_RXSTAT_DISABLED) {
  381. i = -1;
  382. break;
  383. }
  384. }
  385. msleep(1);
  386. }
  387. if (i != -1) {
  388. b43err(dev->wl, "DMA RX reset timed out\n");
  389. return -ENODEV;
  390. }
  391. return 0;
  392. }
  393. /* Reset the TX DMA channel */
  394. static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
  395. enum b43_dmatype type)
  396. {
  397. int i;
  398. u32 value;
  399. u16 offset;
  400. might_sleep();
  401. for (i = 0; i < 10; i++) {
  402. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  403. B43_DMA32_TXSTATUS;
  404. value = b43_read32(dev, mmio_base + offset);
  405. if (type == B43_DMA_64BIT) {
  406. value &= B43_DMA64_TXSTAT;
  407. if (value == B43_DMA64_TXSTAT_DISABLED ||
  408. value == B43_DMA64_TXSTAT_IDLEWAIT ||
  409. value == B43_DMA64_TXSTAT_STOPPED)
  410. break;
  411. } else {
  412. value &= B43_DMA32_TXSTATE;
  413. if (value == B43_DMA32_TXSTAT_DISABLED ||
  414. value == B43_DMA32_TXSTAT_IDLEWAIT ||
  415. value == B43_DMA32_TXSTAT_STOPPED)
  416. break;
  417. }
  418. msleep(1);
  419. }
  420. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
  421. b43_write32(dev, mmio_base + offset, 0);
  422. for (i = 0; i < 10; i++) {
  423. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  424. B43_DMA32_TXSTATUS;
  425. value = b43_read32(dev, mmio_base + offset);
  426. if (type == B43_DMA_64BIT) {
  427. value &= B43_DMA64_TXSTAT;
  428. if (value == B43_DMA64_TXSTAT_DISABLED) {
  429. i = -1;
  430. break;
  431. }
  432. } else {
  433. value &= B43_DMA32_TXSTATE;
  434. if (value == B43_DMA32_TXSTAT_DISABLED) {
  435. i = -1;
  436. break;
  437. }
  438. }
  439. msleep(1);
  440. }
  441. if (i != -1) {
  442. b43err(dev->wl, "DMA TX reset timed out\n");
  443. return -ENODEV;
  444. }
  445. /* ensure the reset is completed. */
  446. msleep(1);
  447. return 0;
  448. }
  449. /* Check if a DMA mapping address is invalid. */
  450. static bool b43_dma_mapping_error(struct b43_dmaring *ring,
  451. dma_addr_t addr,
  452. size_t buffersize, bool dma_to_device)
  453. {
  454. if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
  455. return 1;
  456. switch (ring->type) {
  457. case B43_DMA_30BIT:
  458. if ((u64)addr + buffersize > (1ULL << 30))
  459. goto address_error;
  460. break;
  461. case B43_DMA_32BIT:
  462. if ((u64)addr + buffersize > (1ULL << 32))
  463. goto address_error;
  464. break;
  465. case B43_DMA_64BIT:
  466. /* Currently we can't have addresses beyond
  467. * 64bit in the kernel. */
  468. break;
  469. }
  470. /* The address is OK. */
  471. return 0;
  472. address_error:
  473. /* We can't support this address. Unmap it again. */
  474. unmap_descbuffer(ring, addr, buffersize, dma_to_device);
  475. return 1;
  476. }
  477. static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
  478. {
  479. unsigned char *f = skb->data + ring->frameoffset;
  480. return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
  481. }
  482. static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
  483. {
  484. struct b43_rxhdr_fw4 *rxhdr;
  485. unsigned char *frame;
  486. /* This poisons the RX buffer to detect DMA failures. */
  487. rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
  488. rxhdr->frame_len = 0;
  489. B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
  490. frame = skb->data + ring->frameoffset;
  491. memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
  492. }
  493. static int setup_rx_descbuffer(struct b43_dmaring *ring,
  494. struct b43_dmadesc_generic *desc,
  495. struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
  496. {
  497. dma_addr_t dmaaddr;
  498. struct sk_buff *skb;
  499. B43_WARN_ON(ring->tx);
  500. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  501. if (unlikely(!skb))
  502. return -ENOMEM;
  503. b43_poison_rx_buffer(ring, skb);
  504. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  505. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  506. /* ugh. try to realloc in zone_dma */
  507. gfp_flags |= GFP_DMA;
  508. dev_kfree_skb_any(skb);
  509. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  510. if (unlikely(!skb))
  511. return -ENOMEM;
  512. b43_poison_rx_buffer(ring, skb);
  513. dmaaddr = map_descbuffer(ring, skb->data,
  514. ring->rx_buffersize, 0);
  515. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  516. b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
  517. dev_kfree_skb_any(skb);
  518. return -EIO;
  519. }
  520. }
  521. meta->skb = skb;
  522. meta->dmaaddr = dmaaddr;
  523. ring->ops->fill_descriptor(ring, desc, dmaaddr,
  524. ring->rx_buffersize, 0, 0, 0);
  525. return 0;
  526. }
  527. /* Allocate the initial descbuffers.
  528. * This is used for an RX ring only.
  529. */
  530. static int alloc_initial_descbuffers(struct b43_dmaring *ring)
  531. {
  532. int i, err = -ENOMEM;
  533. struct b43_dmadesc_generic *desc;
  534. struct b43_dmadesc_meta *meta;
  535. for (i = 0; i < ring->nr_slots; i++) {
  536. desc = ring->ops->idx2desc(ring, i, &meta);
  537. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  538. if (err) {
  539. b43err(ring->dev->wl,
  540. "Failed to allocate initial descbuffers\n");
  541. goto err_unwind;
  542. }
  543. }
  544. mb();
  545. ring->used_slots = ring->nr_slots;
  546. err = 0;
  547. out:
  548. return err;
  549. err_unwind:
  550. for (i--; i >= 0; i--) {
  551. desc = ring->ops->idx2desc(ring, i, &meta);
  552. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  553. dev_kfree_skb(meta->skb);
  554. }
  555. goto out;
  556. }
  557. /* Do initial setup of the DMA controller.
  558. * Reset the controller, write the ring busaddress
  559. * and switch the "enable" bit on.
  560. */
  561. static int dmacontroller_setup(struct b43_dmaring *ring)
  562. {
  563. int err = 0;
  564. u32 value;
  565. u32 addrext;
  566. u32 trans = ssb_dma_translation(ring->dev->dev);
  567. if (ring->tx) {
  568. if (ring->type == B43_DMA_64BIT) {
  569. u64 ringbase = (u64) (ring->dmabase);
  570. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  571. >> SSB_DMA_TRANSLATION_SHIFT;
  572. value = B43_DMA64_TXENABLE;
  573. value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
  574. & B43_DMA64_TXADDREXT_MASK;
  575. b43_dma_write(ring, B43_DMA64_TXCTL, value);
  576. b43_dma_write(ring, B43_DMA64_TXRINGLO,
  577. (ringbase & 0xFFFFFFFF));
  578. b43_dma_write(ring, B43_DMA64_TXRINGHI,
  579. ((ringbase >> 32) &
  580. ~SSB_DMA_TRANSLATION_MASK)
  581. | (trans << 1));
  582. } else {
  583. u32 ringbase = (u32) (ring->dmabase);
  584. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  585. >> SSB_DMA_TRANSLATION_SHIFT;
  586. value = B43_DMA32_TXENABLE;
  587. value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
  588. & B43_DMA32_TXADDREXT_MASK;
  589. b43_dma_write(ring, B43_DMA32_TXCTL, value);
  590. b43_dma_write(ring, B43_DMA32_TXRING,
  591. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  592. | trans);
  593. }
  594. } else {
  595. err = alloc_initial_descbuffers(ring);
  596. if (err)
  597. goto out;
  598. if (ring->type == B43_DMA_64BIT) {
  599. u64 ringbase = (u64) (ring->dmabase);
  600. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  601. >> SSB_DMA_TRANSLATION_SHIFT;
  602. value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
  603. value |= B43_DMA64_RXENABLE;
  604. value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
  605. & B43_DMA64_RXADDREXT_MASK;
  606. b43_dma_write(ring, B43_DMA64_RXCTL, value);
  607. b43_dma_write(ring, B43_DMA64_RXRINGLO,
  608. (ringbase & 0xFFFFFFFF));
  609. b43_dma_write(ring, B43_DMA64_RXRINGHI,
  610. ((ringbase >> 32) &
  611. ~SSB_DMA_TRANSLATION_MASK)
  612. | (trans << 1));
  613. b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
  614. sizeof(struct b43_dmadesc64));
  615. } else {
  616. u32 ringbase = (u32) (ring->dmabase);
  617. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  618. >> SSB_DMA_TRANSLATION_SHIFT;
  619. value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
  620. value |= B43_DMA32_RXENABLE;
  621. value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
  622. & B43_DMA32_RXADDREXT_MASK;
  623. b43_dma_write(ring, B43_DMA32_RXCTL, value);
  624. b43_dma_write(ring, B43_DMA32_RXRING,
  625. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  626. | trans);
  627. b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
  628. sizeof(struct b43_dmadesc32));
  629. }
  630. }
  631. out:
  632. return err;
  633. }
  634. /* Shutdown the DMA controller. */
  635. static void dmacontroller_cleanup(struct b43_dmaring *ring)
  636. {
  637. if (ring->tx) {
  638. b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
  639. ring->type);
  640. if (ring->type == B43_DMA_64BIT) {
  641. b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
  642. b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
  643. } else
  644. b43_dma_write(ring, B43_DMA32_TXRING, 0);
  645. } else {
  646. b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
  647. ring->type);
  648. if (ring->type == B43_DMA_64BIT) {
  649. b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
  650. b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
  651. } else
  652. b43_dma_write(ring, B43_DMA32_RXRING, 0);
  653. }
  654. }
  655. static void free_all_descbuffers(struct b43_dmaring *ring)
  656. {
  657. struct b43_dmadesc_generic *desc;
  658. struct b43_dmadesc_meta *meta;
  659. int i;
  660. if (!ring->used_slots)
  661. return;
  662. for (i = 0; i < ring->nr_slots; i++) {
  663. desc = ring->ops->idx2desc(ring, i, &meta);
  664. if (!meta->skb) {
  665. B43_WARN_ON(!ring->tx);
  666. continue;
  667. }
  668. if (ring->tx) {
  669. unmap_descbuffer(ring, meta->dmaaddr,
  670. meta->skb->len, 1);
  671. } else {
  672. unmap_descbuffer(ring, meta->dmaaddr,
  673. ring->rx_buffersize, 0);
  674. }
  675. free_descriptor_buffer(ring, meta);
  676. }
  677. }
  678. static u64 supported_dma_mask(struct b43_wldev *dev)
  679. {
  680. u32 tmp;
  681. u16 mmio_base;
  682. tmp = b43_read32(dev, SSB_TMSHIGH);
  683. if (tmp & SSB_TMSHIGH_DMA64)
  684. return DMA_BIT_MASK(64);
  685. mmio_base = b43_dmacontroller_base(0, 0);
  686. b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
  687. tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
  688. if (tmp & B43_DMA32_TXADDREXT_MASK)
  689. return DMA_BIT_MASK(32);
  690. return DMA_BIT_MASK(30);
  691. }
  692. static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
  693. {
  694. if (dmamask == DMA_BIT_MASK(30))
  695. return B43_DMA_30BIT;
  696. if (dmamask == DMA_BIT_MASK(32))
  697. return B43_DMA_32BIT;
  698. if (dmamask == DMA_BIT_MASK(64))
  699. return B43_DMA_64BIT;
  700. B43_WARN_ON(1);
  701. return B43_DMA_30BIT;
  702. }
  703. /* Main initialization function. */
  704. static
  705. struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
  706. int controller_index,
  707. int for_tx,
  708. enum b43_dmatype type)
  709. {
  710. struct b43_dmaring *ring;
  711. int err;
  712. dma_addr_t dma_test;
  713. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  714. if (!ring)
  715. goto out;
  716. ring->nr_slots = B43_RXRING_SLOTS;
  717. if (for_tx)
  718. ring->nr_slots = B43_TXRING_SLOTS;
  719. ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
  720. GFP_KERNEL);
  721. if (!ring->meta)
  722. goto err_kfree_ring;
  723. ring->type = type;
  724. ring->dev = dev;
  725. ring->mmio_base = b43_dmacontroller_base(type, controller_index);
  726. ring->index = controller_index;
  727. if (type == B43_DMA_64BIT)
  728. ring->ops = &dma64_ops;
  729. else
  730. ring->ops = &dma32_ops;
  731. if (for_tx) {
  732. ring->tx = 1;
  733. ring->current_slot = -1;
  734. } else {
  735. if (ring->index == 0) {
  736. ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
  737. ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
  738. } else
  739. B43_WARN_ON(1);
  740. }
  741. spin_lock_init(&ring->lock);
  742. #ifdef CONFIG_B43_DEBUG
  743. ring->last_injected_overflow = jiffies;
  744. #endif
  745. if (for_tx) {
  746. /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
  747. BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
  748. ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
  749. b43_txhdr_size(dev),
  750. GFP_KERNEL);
  751. if (!ring->txhdr_cache)
  752. goto err_kfree_meta;
  753. /* test for ability to dma to txhdr_cache */
  754. dma_test = ssb_dma_map_single(dev->dev,
  755. ring->txhdr_cache,
  756. b43_txhdr_size(dev),
  757. DMA_TO_DEVICE);
  758. if (b43_dma_mapping_error(ring, dma_test,
  759. b43_txhdr_size(dev), 1)) {
  760. /* ugh realloc */
  761. kfree(ring->txhdr_cache);
  762. ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
  763. b43_txhdr_size(dev),
  764. GFP_KERNEL | GFP_DMA);
  765. if (!ring->txhdr_cache)
  766. goto err_kfree_meta;
  767. dma_test = ssb_dma_map_single(dev->dev,
  768. ring->txhdr_cache,
  769. b43_txhdr_size(dev),
  770. DMA_TO_DEVICE);
  771. if (b43_dma_mapping_error(ring, dma_test,
  772. b43_txhdr_size(dev), 1)) {
  773. b43err(dev->wl,
  774. "TXHDR DMA allocation failed\n");
  775. goto err_kfree_txhdr_cache;
  776. }
  777. }
  778. ssb_dma_unmap_single(dev->dev,
  779. dma_test, b43_txhdr_size(dev),
  780. DMA_TO_DEVICE);
  781. }
  782. err = alloc_ringmemory(ring);
  783. if (err)
  784. goto err_kfree_txhdr_cache;
  785. err = dmacontroller_setup(ring);
  786. if (err)
  787. goto err_free_ringmemory;
  788. out:
  789. return ring;
  790. err_free_ringmemory:
  791. free_ringmemory(ring);
  792. err_kfree_txhdr_cache:
  793. kfree(ring->txhdr_cache);
  794. err_kfree_meta:
  795. kfree(ring->meta);
  796. err_kfree_ring:
  797. kfree(ring);
  798. ring = NULL;
  799. goto out;
  800. }
  801. #define divide(a, b) ({ \
  802. typeof(a) __a = a; \
  803. do_div(__a, b); \
  804. __a; \
  805. })
  806. #define modulo(a, b) ({ \
  807. typeof(a) __a = a; \
  808. do_div(__a, b); \
  809. })
  810. /* Main cleanup function. */
  811. static void b43_destroy_dmaring(struct b43_dmaring *ring,
  812. const char *ringname)
  813. {
  814. if (!ring)
  815. return;
  816. #ifdef CONFIG_B43_DEBUG
  817. {
  818. /* Print some statistics. */
  819. u64 failed_packets = ring->nr_failed_tx_packets;
  820. u64 succeed_packets = ring->nr_succeed_tx_packets;
  821. u64 nr_packets = failed_packets + succeed_packets;
  822. u64 permille_failed = 0, average_tries = 0;
  823. if (nr_packets)
  824. permille_failed = divide(failed_packets * 1000, nr_packets);
  825. if (nr_packets)
  826. average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
  827. b43dbg(ring->dev->wl, "DMA-%u %s: "
  828. "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
  829. "Average tries %llu.%02llu\n",
  830. (unsigned int)(ring->type), ringname,
  831. ring->max_used_slots,
  832. ring->nr_slots,
  833. (unsigned long long)failed_packets,
  834. (unsigned long long)nr_packets,
  835. (unsigned long long)divide(permille_failed, 10),
  836. (unsigned long long)modulo(permille_failed, 10),
  837. (unsigned long long)divide(average_tries, 100),
  838. (unsigned long long)modulo(average_tries, 100));
  839. }
  840. #endif /* DEBUG */
  841. /* Device IRQs are disabled prior entering this function,
  842. * so no need to take care of concurrency with rx handler stuff.
  843. */
  844. dmacontroller_cleanup(ring);
  845. free_all_descbuffers(ring);
  846. free_ringmemory(ring);
  847. kfree(ring->txhdr_cache);
  848. kfree(ring->meta);
  849. kfree(ring);
  850. }
  851. #define destroy_ring(dma, ring) do { \
  852. b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
  853. (dma)->ring = NULL; \
  854. } while (0)
  855. void b43_dma_free(struct b43_wldev *dev)
  856. {
  857. struct b43_dma *dma;
  858. if (b43_using_pio_transfers(dev))
  859. return;
  860. dma = &dev->dma;
  861. destroy_ring(dma, rx_ring);
  862. destroy_ring(dma, tx_ring_AC_BK);
  863. destroy_ring(dma, tx_ring_AC_BE);
  864. destroy_ring(dma, tx_ring_AC_VI);
  865. destroy_ring(dma, tx_ring_AC_VO);
  866. destroy_ring(dma, tx_ring_mcast);
  867. }
  868. static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
  869. {
  870. u64 orig_mask = mask;
  871. bool fallback = 0;
  872. int err;
  873. /* Try to set the DMA mask. If it fails, try falling back to a
  874. * lower mask, as we can always also support a lower one. */
  875. while (1) {
  876. err = ssb_dma_set_mask(dev->dev, mask);
  877. if (!err)
  878. break;
  879. if (mask == DMA_BIT_MASK(64)) {
  880. mask = DMA_BIT_MASK(32);
  881. fallback = 1;
  882. continue;
  883. }
  884. if (mask == DMA_BIT_MASK(32)) {
  885. mask = DMA_BIT_MASK(30);
  886. fallback = 1;
  887. continue;
  888. }
  889. b43err(dev->wl, "The machine/kernel does not support "
  890. "the required %u-bit DMA mask\n",
  891. (unsigned int)dma_mask_to_engine_type(orig_mask));
  892. return -EOPNOTSUPP;
  893. }
  894. if (fallback) {
  895. b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
  896. (unsigned int)dma_mask_to_engine_type(orig_mask),
  897. (unsigned int)dma_mask_to_engine_type(mask));
  898. }
  899. return 0;
  900. }
  901. int b43_dma_init(struct b43_wldev *dev)
  902. {
  903. struct b43_dma *dma = &dev->dma;
  904. int err;
  905. u64 dmamask;
  906. enum b43_dmatype type;
  907. dmamask = supported_dma_mask(dev);
  908. type = dma_mask_to_engine_type(dmamask);
  909. err = b43_dma_set_mask(dev, dmamask);
  910. if (err)
  911. return err;
  912. err = -ENOMEM;
  913. /* setup TX DMA channels. */
  914. dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
  915. if (!dma->tx_ring_AC_BK)
  916. goto out;
  917. dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
  918. if (!dma->tx_ring_AC_BE)
  919. goto err_destroy_bk;
  920. dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
  921. if (!dma->tx_ring_AC_VI)
  922. goto err_destroy_be;
  923. dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
  924. if (!dma->tx_ring_AC_VO)
  925. goto err_destroy_vi;
  926. dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
  927. if (!dma->tx_ring_mcast)
  928. goto err_destroy_vo;
  929. /* setup RX DMA channel. */
  930. dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
  931. if (!dma->rx_ring)
  932. goto err_destroy_mcast;
  933. /* No support for the TX status DMA ring. */
  934. B43_WARN_ON(dev->dev->id.revision < 5);
  935. b43dbg(dev->wl, "%u-bit DMA initialized\n",
  936. (unsigned int)type);
  937. err = 0;
  938. out:
  939. return err;
  940. err_destroy_mcast:
  941. destroy_ring(dma, tx_ring_mcast);
  942. err_destroy_vo:
  943. destroy_ring(dma, tx_ring_AC_VO);
  944. err_destroy_vi:
  945. destroy_ring(dma, tx_ring_AC_VI);
  946. err_destroy_be:
  947. destroy_ring(dma, tx_ring_AC_BE);
  948. err_destroy_bk:
  949. destroy_ring(dma, tx_ring_AC_BK);
  950. return err;
  951. }
  952. /* Generate a cookie for the TX header. */
  953. static u16 generate_cookie(struct b43_dmaring *ring, int slot)
  954. {
  955. u16 cookie;
  956. /* Use the upper 4 bits of the cookie as
  957. * DMA controller ID and store the slot number
  958. * in the lower 12 bits.
  959. * Note that the cookie must never be 0, as this
  960. * is a special value used in RX path.
  961. * It can also not be 0xFFFF because that is special
  962. * for multicast frames.
  963. */
  964. cookie = (((u16)ring->index + 1) << 12);
  965. B43_WARN_ON(slot & ~0x0FFF);
  966. cookie |= (u16)slot;
  967. return cookie;
  968. }
  969. /* Inspect a cookie and find out to which controller/slot it belongs. */
  970. static
  971. struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
  972. {
  973. struct b43_dma *dma = &dev->dma;
  974. struct b43_dmaring *ring = NULL;
  975. switch (cookie & 0xF000) {
  976. case 0x1000:
  977. ring = dma->tx_ring_AC_BK;
  978. break;
  979. case 0x2000:
  980. ring = dma->tx_ring_AC_BE;
  981. break;
  982. case 0x3000:
  983. ring = dma->tx_ring_AC_VI;
  984. break;
  985. case 0x4000:
  986. ring = dma->tx_ring_AC_VO;
  987. break;
  988. case 0x5000:
  989. ring = dma->tx_ring_mcast;
  990. break;
  991. default:
  992. B43_WARN_ON(1);
  993. }
  994. *slot = (cookie & 0x0FFF);
  995. B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
  996. return ring;
  997. }
  998. static int dma_tx_fragment(struct b43_dmaring *ring,
  999. struct sk_buff *skb)
  1000. {
  1001. const struct b43_dma_ops *ops = ring->ops;
  1002. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1003. u8 *header;
  1004. int slot, old_top_slot, old_used_slots;
  1005. int err;
  1006. struct b43_dmadesc_generic *desc;
  1007. struct b43_dmadesc_meta *meta;
  1008. struct b43_dmadesc_meta *meta_hdr;
  1009. struct sk_buff *bounce_skb;
  1010. u16 cookie;
  1011. size_t hdrsize = b43_txhdr_size(ring->dev);
  1012. /* Important note: If the number of used DMA slots per TX frame
  1013. * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
  1014. * the file has to be updated, too!
  1015. */
  1016. old_top_slot = ring->current_slot;
  1017. old_used_slots = ring->used_slots;
  1018. /* Get a slot for the header. */
  1019. slot = request_slot(ring);
  1020. desc = ops->idx2desc(ring, slot, &meta_hdr);
  1021. memset(meta_hdr, 0, sizeof(*meta_hdr));
  1022. header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
  1023. cookie = generate_cookie(ring, slot);
  1024. err = b43_generate_txhdr(ring->dev, header,
  1025. skb->data, skb->len, info, cookie);
  1026. if (unlikely(err)) {
  1027. ring->current_slot = old_top_slot;
  1028. ring->used_slots = old_used_slots;
  1029. return err;
  1030. }
  1031. meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
  1032. hdrsize, 1);
  1033. if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
  1034. ring->current_slot = old_top_slot;
  1035. ring->used_slots = old_used_slots;
  1036. return -EIO;
  1037. }
  1038. ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
  1039. hdrsize, 1, 0, 0);
  1040. /* Get a slot for the payload. */
  1041. slot = request_slot(ring);
  1042. desc = ops->idx2desc(ring, slot, &meta);
  1043. memset(meta, 0, sizeof(*meta));
  1044. meta->skb = skb;
  1045. meta->is_last_fragment = 1;
  1046. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1047. /* create a bounce buffer in zone_dma on mapping failure. */
  1048. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1049. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
  1050. if (!bounce_skb) {
  1051. ring->current_slot = old_top_slot;
  1052. ring->used_slots = old_used_slots;
  1053. err = -ENOMEM;
  1054. goto out_unmap_hdr;
  1055. }
  1056. memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
  1057. dev_kfree_skb_any(skb);
  1058. skb = bounce_skb;
  1059. meta->skb = skb;
  1060. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1061. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1062. ring->current_slot = old_top_slot;
  1063. ring->used_slots = old_used_slots;
  1064. err = -EIO;
  1065. goto out_free_bounce;
  1066. }
  1067. }
  1068. ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
  1069. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1070. /* Tell the firmware about the cookie of the last
  1071. * mcast frame, so it can clear the more-data bit in it. */
  1072. b43_shm_write16(ring->dev, B43_SHM_SHARED,
  1073. B43_SHM_SH_MCASTCOOKIE, cookie);
  1074. }
  1075. /* Now transfer the whole frame. */
  1076. wmb();
  1077. ops->poke_tx(ring, next_slot(ring, slot));
  1078. return 0;
  1079. out_free_bounce:
  1080. dev_kfree_skb_any(skb);
  1081. out_unmap_hdr:
  1082. unmap_descbuffer(ring, meta_hdr->dmaaddr,
  1083. hdrsize, 1);
  1084. return err;
  1085. }
  1086. static inline int should_inject_overflow(struct b43_dmaring *ring)
  1087. {
  1088. #ifdef CONFIG_B43_DEBUG
  1089. if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
  1090. /* Check if we should inject another ringbuffer overflow
  1091. * to test handling of this situation in the stack. */
  1092. unsigned long next_overflow;
  1093. next_overflow = ring->last_injected_overflow + HZ;
  1094. if (time_after(jiffies, next_overflow)) {
  1095. ring->last_injected_overflow = jiffies;
  1096. b43dbg(ring->dev->wl,
  1097. "Injecting TX ring overflow on "
  1098. "DMA controller %d\n", ring->index);
  1099. return 1;
  1100. }
  1101. }
  1102. #endif /* CONFIG_B43_DEBUG */
  1103. return 0;
  1104. }
  1105. /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
  1106. static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
  1107. u8 queue_prio)
  1108. {
  1109. struct b43_dmaring *ring;
  1110. if (b43_modparam_qos) {
  1111. /* 0 = highest priority */
  1112. switch (queue_prio) {
  1113. default:
  1114. B43_WARN_ON(1);
  1115. /* fallthrough */
  1116. case 0:
  1117. ring = dev->dma.tx_ring_AC_VO;
  1118. break;
  1119. case 1:
  1120. ring = dev->dma.tx_ring_AC_VI;
  1121. break;
  1122. case 2:
  1123. ring = dev->dma.tx_ring_AC_BE;
  1124. break;
  1125. case 3:
  1126. ring = dev->dma.tx_ring_AC_BK;
  1127. break;
  1128. }
  1129. } else
  1130. ring = dev->dma.tx_ring_AC_BE;
  1131. return ring;
  1132. }
  1133. int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
  1134. {
  1135. struct b43_dmaring *ring;
  1136. struct ieee80211_hdr *hdr;
  1137. int err = 0;
  1138. unsigned long flags;
  1139. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1140. hdr = (struct ieee80211_hdr *)skb->data;
  1141. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1142. /* The multicast ring will be sent after the DTIM */
  1143. ring = dev->dma.tx_ring_mcast;
  1144. /* Set the more-data bit. Ucode will clear it on
  1145. * the last frame for us. */
  1146. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1147. } else {
  1148. /* Decide by priority where to put this frame. */
  1149. ring = select_ring_by_priority(
  1150. dev, skb_get_queue_mapping(skb));
  1151. }
  1152. spin_lock_irqsave(&ring->lock, flags);
  1153. B43_WARN_ON(!ring->tx);
  1154. /* Check if the queue was stopped in mac80211,
  1155. * but we got called nevertheless.
  1156. * That would be a mac80211 bug. */
  1157. B43_WARN_ON(ring->stopped);
  1158. if (unlikely(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
  1159. b43warn(dev->wl, "DMA queue overflow\n");
  1160. err = -ENOSPC;
  1161. goto out_unlock;
  1162. }
  1163. /* Assign the queue number to the ring (if not already done before)
  1164. * so TX status handling can use it. The queue to ring mapping is
  1165. * static, so we don't need to store it per frame. */
  1166. ring->queue_prio = skb_get_queue_mapping(skb);
  1167. err = dma_tx_fragment(ring, skb);
  1168. if (unlikely(err == -ENOKEY)) {
  1169. /* Drop this packet, as we don't have the encryption key
  1170. * anymore and must not transmit it unencrypted. */
  1171. dev_kfree_skb_any(skb);
  1172. err = 0;
  1173. goto out_unlock;
  1174. }
  1175. if (unlikely(err)) {
  1176. b43err(dev->wl, "DMA tx mapping failure\n");
  1177. goto out_unlock;
  1178. }
  1179. ring->nr_tx_packets++;
  1180. if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
  1181. should_inject_overflow(ring)) {
  1182. /* This TX ring is full. */
  1183. ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
  1184. ring->stopped = 1;
  1185. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1186. b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
  1187. }
  1188. }
  1189. out_unlock:
  1190. spin_unlock_irqrestore(&ring->lock, flags);
  1191. return err;
  1192. }
  1193. /* Called with IRQs disabled. */
  1194. void b43_dma_handle_txstatus(struct b43_wldev *dev,
  1195. const struct b43_txstatus *status)
  1196. {
  1197. const struct b43_dma_ops *ops;
  1198. struct b43_dmaring *ring;
  1199. struct b43_dmadesc_generic *desc;
  1200. struct b43_dmadesc_meta *meta;
  1201. int slot;
  1202. bool frame_succeed;
  1203. ring = parse_cookie(dev, status->cookie, &slot);
  1204. if (unlikely(!ring))
  1205. return;
  1206. spin_lock(&ring->lock); /* IRQs are already disabled. */
  1207. B43_WARN_ON(!ring->tx);
  1208. ops = ring->ops;
  1209. while (1) {
  1210. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  1211. desc = ops->idx2desc(ring, slot, &meta);
  1212. if (meta->skb)
  1213. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
  1214. 1);
  1215. else
  1216. unmap_descbuffer(ring, meta->dmaaddr,
  1217. b43_txhdr_size(dev), 1);
  1218. if (meta->is_last_fragment) {
  1219. struct ieee80211_tx_info *info;
  1220. BUG_ON(!meta->skb);
  1221. info = IEEE80211_SKB_CB(meta->skb);
  1222. /*
  1223. * Call back to inform the ieee80211 subsystem about
  1224. * the status of the transmission.
  1225. */
  1226. frame_succeed = b43_fill_txstatus_report(dev, info, status);
  1227. #ifdef CONFIG_B43_DEBUG
  1228. if (frame_succeed)
  1229. ring->nr_succeed_tx_packets++;
  1230. else
  1231. ring->nr_failed_tx_packets++;
  1232. ring->nr_total_packet_tries += status->frame_count;
  1233. #endif /* DEBUG */
  1234. ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
  1235. /* skb is freed by ieee80211_tx_status_irqsafe() */
  1236. meta->skb = NULL;
  1237. } else {
  1238. /* No need to call free_descriptor_buffer here, as
  1239. * this is only the txhdr, which is not allocated.
  1240. */
  1241. B43_WARN_ON(meta->skb);
  1242. }
  1243. /* Everything unmapped and free'd. So it's not used anymore. */
  1244. ring->used_slots--;
  1245. if (meta->is_last_fragment)
  1246. break;
  1247. slot = next_slot(ring, slot);
  1248. }
  1249. if (ring->stopped) {
  1250. B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
  1251. ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
  1252. ring->stopped = 0;
  1253. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1254. b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
  1255. }
  1256. }
  1257. spin_unlock(&ring->lock);
  1258. }
  1259. void b43_dma_get_tx_stats(struct b43_wldev *dev,
  1260. struct ieee80211_tx_queue_stats *stats)
  1261. {
  1262. const int nr_queues = dev->wl->hw->queues;
  1263. struct b43_dmaring *ring;
  1264. unsigned long flags;
  1265. int i;
  1266. for (i = 0; i < nr_queues; i++) {
  1267. ring = select_ring_by_priority(dev, i);
  1268. spin_lock_irqsave(&ring->lock, flags);
  1269. stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
  1270. stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
  1271. stats[i].count = ring->nr_tx_packets;
  1272. spin_unlock_irqrestore(&ring->lock, flags);
  1273. }
  1274. }
  1275. static void dma_rx(struct b43_dmaring *ring, int *slot)
  1276. {
  1277. const struct b43_dma_ops *ops = ring->ops;
  1278. struct b43_dmadesc_generic *desc;
  1279. struct b43_dmadesc_meta *meta;
  1280. struct b43_rxhdr_fw4 *rxhdr;
  1281. struct sk_buff *skb;
  1282. u16 len;
  1283. int err;
  1284. dma_addr_t dmaaddr;
  1285. desc = ops->idx2desc(ring, *slot, &meta);
  1286. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  1287. skb = meta->skb;
  1288. rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
  1289. len = le16_to_cpu(rxhdr->frame_len);
  1290. if (len == 0) {
  1291. int i = 0;
  1292. do {
  1293. udelay(2);
  1294. barrier();
  1295. len = le16_to_cpu(rxhdr->frame_len);
  1296. } while (len == 0 && i++ < 5);
  1297. if (unlikely(len == 0)) {
  1298. dmaaddr = meta->dmaaddr;
  1299. goto drop_recycle_buffer;
  1300. }
  1301. }
  1302. if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
  1303. /* Something went wrong with the DMA.
  1304. * The device did not touch the buffer and did not overwrite the poison. */
  1305. b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
  1306. dmaaddr = meta->dmaaddr;
  1307. goto drop_recycle_buffer;
  1308. }
  1309. if (unlikely(len > ring->rx_buffersize)) {
  1310. /* The data did not fit into one descriptor buffer
  1311. * and is split over multiple buffers.
  1312. * This should never happen, as we try to allocate buffers
  1313. * big enough. So simply ignore this packet.
  1314. */
  1315. int cnt = 0;
  1316. s32 tmp = len;
  1317. while (1) {
  1318. desc = ops->idx2desc(ring, *slot, &meta);
  1319. /* recycle the descriptor buffer. */
  1320. b43_poison_rx_buffer(ring, meta->skb);
  1321. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1322. ring->rx_buffersize);
  1323. *slot = next_slot(ring, *slot);
  1324. cnt++;
  1325. tmp -= ring->rx_buffersize;
  1326. if (tmp <= 0)
  1327. break;
  1328. }
  1329. b43err(ring->dev->wl, "DMA RX buffer too small "
  1330. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1331. len, ring->rx_buffersize, cnt);
  1332. goto drop;
  1333. }
  1334. dmaaddr = meta->dmaaddr;
  1335. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1336. if (unlikely(err)) {
  1337. b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
  1338. goto drop_recycle_buffer;
  1339. }
  1340. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1341. skb_put(skb, len + ring->frameoffset);
  1342. skb_pull(skb, ring->frameoffset);
  1343. b43_rx(ring->dev, skb, rxhdr);
  1344. drop:
  1345. return;
  1346. drop_recycle_buffer:
  1347. /* Poison and recycle the RX buffer. */
  1348. b43_poison_rx_buffer(ring, skb);
  1349. sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
  1350. }
  1351. void b43_dma_rx(struct b43_dmaring *ring)
  1352. {
  1353. const struct b43_dma_ops *ops = ring->ops;
  1354. int slot, current_slot;
  1355. int used_slots = 0;
  1356. B43_WARN_ON(ring->tx);
  1357. current_slot = ops->get_current_rxslot(ring);
  1358. B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
  1359. slot = ring->current_slot;
  1360. for (; slot != current_slot; slot = next_slot(ring, slot)) {
  1361. dma_rx(ring, &slot);
  1362. update_max_used_slots(ring, ++used_slots);
  1363. }
  1364. ops->set_current_rxslot(ring, slot);
  1365. ring->current_slot = slot;
  1366. }
  1367. static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
  1368. {
  1369. unsigned long flags;
  1370. spin_lock_irqsave(&ring->lock, flags);
  1371. B43_WARN_ON(!ring->tx);
  1372. ring->ops->tx_suspend(ring);
  1373. spin_unlock_irqrestore(&ring->lock, flags);
  1374. }
  1375. static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
  1376. {
  1377. unsigned long flags;
  1378. spin_lock_irqsave(&ring->lock, flags);
  1379. B43_WARN_ON(!ring->tx);
  1380. ring->ops->tx_resume(ring);
  1381. spin_unlock_irqrestore(&ring->lock, flags);
  1382. }
  1383. void b43_dma_tx_suspend(struct b43_wldev *dev)
  1384. {
  1385. b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
  1386. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
  1387. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
  1388. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
  1389. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
  1390. b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
  1391. }
  1392. void b43_dma_tx_resume(struct b43_wldev *dev)
  1393. {
  1394. b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
  1395. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
  1396. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
  1397. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
  1398. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
  1399. b43_power_saving_ctl_bits(dev, 0);
  1400. }
  1401. #ifdef CONFIG_B43_PIO
  1402. static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
  1403. u16 mmio_base, bool enable)
  1404. {
  1405. u32 ctl;
  1406. if (type == B43_DMA_64BIT) {
  1407. ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
  1408. ctl &= ~B43_DMA64_RXDIRECTFIFO;
  1409. if (enable)
  1410. ctl |= B43_DMA64_RXDIRECTFIFO;
  1411. b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
  1412. } else {
  1413. ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
  1414. ctl &= ~B43_DMA32_RXDIRECTFIFO;
  1415. if (enable)
  1416. ctl |= B43_DMA32_RXDIRECTFIFO;
  1417. b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
  1418. }
  1419. }
  1420. /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
  1421. * This is called from PIO code, so DMA structures are not available. */
  1422. void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
  1423. unsigned int engine_index, bool enable)
  1424. {
  1425. enum b43_dmatype type;
  1426. u16 mmio_base;
  1427. type = dma_mask_to_engine_type(supported_dma_mask(dev));
  1428. mmio_base = b43_dmacontroller_base(type, engine_index);
  1429. direct_fifo_rx(dev, type, mmio_base, enable);
  1430. }
  1431. #endif /* CONFIG_B43_PIO */