dma.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. /*
  2. Broadcom B43 wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "b43.h"
  22. #include "dma.h"
  23. #include "main.h"
  24. #include "debugfs.h"
  25. #include "xmit.h"
  26. #include <linux/dma-mapping.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/etherdevice.h>
  31. /* 32bit DMA ops. */
  32. static
  33. struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
  34. int slot,
  35. struct b43_dmadesc_meta **meta)
  36. {
  37. struct b43_dmadesc32 *desc;
  38. *meta = &(ring->meta[slot]);
  39. desc = ring->descbase;
  40. desc = &(desc[slot]);
  41. return (struct b43_dmadesc_generic *)desc;
  42. }
  43. static void op32_fill_descriptor(struct b43_dmaring *ring,
  44. struct b43_dmadesc_generic *desc,
  45. dma_addr_t dmaaddr, u16 bufsize,
  46. int start, int end, int irq)
  47. {
  48. struct b43_dmadesc32 *descbase = ring->descbase;
  49. int slot;
  50. u32 ctl;
  51. u32 addr;
  52. u32 addrext;
  53. slot = (int)(&(desc->dma32) - descbase);
  54. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  55. addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  56. addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
  57. >> SSB_DMA_TRANSLATION_SHIFT;
  58. addr |= ssb_dma_translation(ring->dev->dev);
  59. ctl = (bufsize - ring->frameoffset)
  60. & B43_DMA32_DCTL_BYTECNT;
  61. if (slot == ring->nr_slots - 1)
  62. ctl |= B43_DMA32_DCTL_DTABLEEND;
  63. if (start)
  64. ctl |= B43_DMA32_DCTL_FRAMESTART;
  65. if (end)
  66. ctl |= B43_DMA32_DCTL_FRAMEEND;
  67. if (irq)
  68. ctl |= B43_DMA32_DCTL_IRQ;
  69. ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
  70. & B43_DMA32_DCTL_ADDREXT_MASK;
  71. desc->dma32.control = cpu_to_le32(ctl);
  72. desc->dma32.address = cpu_to_le32(addr);
  73. }
  74. static void op32_poke_tx(struct b43_dmaring *ring, int slot)
  75. {
  76. b43_dma_write(ring, B43_DMA32_TXINDEX,
  77. (u32) (slot * sizeof(struct b43_dmadesc32)));
  78. }
  79. static void op32_tx_suspend(struct b43_dmaring *ring)
  80. {
  81. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  82. | B43_DMA32_TXSUSPEND);
  83. }
  84. static void op32_tx_resume(struct b43_dmaring *ring)
  85. {
  86. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  87. & ~B43_DMA32_TXSUSPEND);
  88. }
  89. static int op32_get_current_rxslot(struct b43_dmaring *ring)
  90. {
  91. u32 val;
  92. val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
  93. val &= B43_DMA32_RXDPTR;
  94. return (val / sizeof(struct b43_dmadesc32));
  95. }
  96. static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
  97. {
  98. b43_dma_write(ring, B43_DMA32_RXINDEX,
  99. (u32) (slot * sizeof(struct b43_dmadesc32)));
  100. }
  101. static const struct b43_dma_ops dma32_ops = {
  102. .idx2desc = op32_idx2desc,
  103. .fill_descriptor = op32_fill_descriptor,
  104. .poke_tx = op32_poke_tx,
  105. .tx_suspend = op32_tx_suspend,
  106. .tx_resume = op32_tx_resume,
  107. .get_current_rxslot = op32_get_current_rxslot,
  108. .set_current_rxslot = op32_set_current_rxslot,
  109. };
  110. /* 64bit DMA ops. */
  111. static
  112. struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
  113. int slot,
  114. struct b43_dmadesc_meta **meta)
  115. {
  116. struct b43_dmadesc64 *desc;
  117. *meta = &(ring->meta[slot]);
  118. desc = ring->descbase;
  119. desc = &(desc[slot]);
  120. return (struct b43_dmadesc_generic *)desc;
  121. }
  122. static void op64_fill_descriptor(struct b43_dmaring *ring,
  123. struct b43_dmadesc_generic *desc,
  124. dma_addr_t dmaaddr, u16 bufsize,
  125. int start, int end, int irq)
  126. {
  127. struct b43_dmadesc64 *descbase = ring->descbase;
  128. int slot;
  129. u32 ctl0 = 0, ctl1 = 0;
  130. u32 addrlo, addrhi;
  131. u32 addrext;
  132. slot = (int)(&(desc->dma64) - descbase);
  133. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  134. addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
  135. addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
  136. addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
  137. >> SSB_DMA_TRANSLATION_SHIFT;
  138. addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
  139. if (slot == ring->nr_slots - 1)
  140. ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
  141. if (start)
  142. ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
  143. if (end)
  144. ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
  145. if (irq)
  146. ctl0 |= B43_DMA64_DCTL0_IRQ;
  147. ctl1 |= (bufsize - ring->frameoffset)
  148. & B43_DMA64_DCTL1_BYTECNT;
  149. ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
  150. & B43_DMA64_DCTL1_ADDREXT_MASK;
  151. desc->dma64.control0 = cpu_to_le32(ctl0);
  152. desc->dma64.control1 = cpu_to_le32(ctl1);
  153. desc->dma64.address_low = cpu_to_le32(addrlo);
  154. desc->dma64.address_high = cpu_to_le32(addrhi);
  155. }
  156. static void op64_poke_tx(struct b43_dmaring *ring, int slot)
  157. {
  158. b43_dma_write(ring, B43_DMA64_TXINDEX,
  159. (u32) (slot * sizeof(struct b43_dmadesc64)));
  160. }
  161. static void op64_tx_suspend(struct b43_dmaring *ring)
  162. {
  163. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  164. | B43_DMA64_TXSUSPEND);
  165. }
  166. static void op64_tx_resume(struct b43_dmaring *ring)
  167. {
  168. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  169. & ~B43_DMA64_TXSUSPEND);
  170. }
  171. static int op64_get_current_rxslot(struct b43_dmaring *ring)
  172. {
  173. u32 val;
  174. val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
  175. val &= B43_DMA64_RXSTATDPTR;
  176. return (val / sizeof(struct b43_dmadesc64));
  177. }
  178. static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
  179. {
  180. b43_dma_write(ring, B43_DMA64_RXINDEX,
  181. (u32) (slot * sizeof(struct b43_dmadesc64)));
  182. }
  183. static const struct b43_dma_ops dma64_ops = {
  184. .idx2desc = op64_idx2desc,
  185. .fill_descriptor = op64_fill_descriptor,
  186. .poke_tx = op64_poke_tx,
  187. .tx_suspend = op64_tx_suspend,
  188. .tx_resume = op64_tx_resume,
  189. .get_current_rxslot = op64_get_current_rxslot,
  190. .set_current_rxslot = op64_set_current_rxslot,
  191. };
  192. static inline int free_slots(struct b43_dmaring *ring)
  193. {
  194. return (ring->nr_slots - ring->used_slots);
  195. }
  196. static inline int next_slot(struct b43_dmaring *ring, int slot)
  197. {
  198. B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
  199. if (slot == ring->nr_slots - 1)
  200. return 0;
  201. return slot + 1;
  202. }
  203. static inline int prev_slot(struct b43_dmaring *ring, int slot)
  204. {
  205. B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
  206. if (slot == 0)
  207. return ring->nr_slots - 1;
  208. return slot - 1;
  209. }
  210. #ifdef CONFIG_B43_DEBUG
  211. static void update_max_used_slots(struct b43_dmaring *ring,
  212. int current_used_slots)
  213. {
  214. if (current_used_slots <= ring->max_used_slots)
  215. return;
  216. ring->max_used_slots = current_used_slots;
  217. if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
  218. b43dbg(ring->dev->wl,
  219. "max_used_slots increased to %d on %s ring %d\n",
  220. ring->max_used_slots,
  221. ring->tx ? "TX" : "RX", ring->index);
  222. }
  223. }
  224. #else
  225. static inline
  226. void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
  227. {
  228. }
  229. #endif /* DEBUG */
  230. /* Request a slot for usage. */
  231. static inline int request_slot(struct b43_dmaring *ring)
  232. {
  233. int slot;
  234. B43_WARN_ON(!ring->tx);
  235. B43_WARN_ON(ring->stopped);
  236. B43_WARN_ON(free_slots(ring) == 0);
  237. slot = next_slot(ring, ring->current_slot);
  238. ring->current_slot = slot;
  239. ring->used_slots++;
  240. update_max_used_slots(ring, ring->used_slots);
  241. return slot;
  242. }
  243. /* Mac80211-queue to b43-ring mapping */
  244. static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
  245. int queue_priority)
  246. {
  247. struct b43_dmaring *ring;
  248. /*FIXME: For now we always run on TX-ring-1 */
  249. return dev->dma.tx_ring1;
  250. /* 0 = highest priority */
  251. switch (queue_priority) {
  252. default:
  253. B43_WARN_ON(1);
  254. /* fallthrough */
  255. case 0:
  256. ring = dev->dma.tx_ring3;
  257. break;
  258. case 1:
  259. ring = dev->dma.tx_ring2;
  260. break;
  261. case 2:
  262. ring = dev->dma.tx_ring1;
  263. break;
  264. case 3:
  265. ring = dev->dma.tx_ring0;
  266. break;
  267. }
  268. return ring;
  269. }
  270. /* b43-ring to mac80211-queue mapping */
  271. static inline int txring_to_priority(struct b43_dmaring *ring)
  272. {
  273. static const u8 idx_to_prio[] = { 3, 2, 1, 0, };
  274. unsigned int index;
  275. /*FIXME: have only one queue, for now */
  276. return 0;
  277. index = ring->index;
  278. if (B43_WARN_ON(index >= ARRAY_SIZE(idx_to_prio)))
  279. index = 0;
  280. return idx_to_prio[index];
  281. }
  282. u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
  283. {
  284. static const u16 map64[] = {
  285. B43_MMIO_DMA64_BASE0,
  286. B43_MMIO_DMA64_BASE1,
  287. B43_MMIO_DMA64_BASE2,
  288. B43_MMIO_DMA64_BASE3,
  289. B43_MMIO_DMA64_BASE4,
  290. B43_MMIO_DMA64_BASE5,
  291. };
  292. static const u16 map32[] = {
  293. B43_MMIO_DMA32_BASE0,
  294. B43_MMIO_DMA32_BASE1,
  295. B43_MMIO_DMA32_BASE2,
  296. B43_MMIO_DMA32_BASE3,
  297. B43_MMIO_DMA32_BASE4,
  298. B43_MMIO_DMA32_BASE5,
  299. };
  300. if (dma64bit) {
  301. B43_WARN_ON(!(controller_idx >= 0 &&
  302. controller_idx < ARRAY_SIZE(map64)));
  303. return map64[controller_idx];
  304. }
  305. B43_WARN_ON(!(controller_idx >= 0 &&
  306. controller_idx < ARRAY_SIZE(map32)));
  307. return map32[controller_idx];
  308. }
  309. static inline
  310. dma_addr_t map_descbuffer(struct b43_dmaring *ring,
  311. unsigned char *buf, size_t len, int tx)
  312. {
  313. dma_addr_t dmaaddr;
  314. if (tx) {
  315. dmaaddr = dma_map_single(ring->dev->dev->dev,
  316. buf, len, DMA_TO_DEVICE);
  317. } else {
  318. dmaaddr = dma_map_single(ring->dev->dev->dev,
  319. buf, len, DMA_FROM_DEVICE);
  320. }
  321. return dmaaddr;
  322. }
  323. static inline
  324. void unmap_descbuffer(struct b43_dmaring *ring,
  325. dma_addr_t addr, size_t len, int tx)
  326. {
  327. if (tx) {
  328. dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
  329. } else {
  330. dma_unmap_single(ring->dev->dev->dev,
  331. addr, len, DMA_FROM_DEVICE);
  332. }
  333. }
  334. static inline
  335. void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
  336. dma_addr_t addr, size_t len)
  337. {
  338. B43_WARN_ON(ring->tx);
  339. dma_sync_single_for_cpu(ring->dev->dev->dev,
  340. addr, len, DMA_FROM_DEVICE);
  341. }
  342. static inline
  343. void sync_descbuffer_for_device(struct b43_dmaring *ring,
  344. dma_addr_t addr, size_t len)
  345. {
  346. B43_WARN_ON(ring->tx);
  347. dma_sync_single_for_device(ring->dev->dev->dev,
  348. addr, len, DMA_FROM_DEVICE);
  349. }
  350. static inline
  351. void free_descriptor_buffer(struct b43_dmaring *ring,
  352. struct b43_dmadesc_meta *meta)
  353. {
  354. if (meta->skb) {
  355. dev_kfree_skb_any(meta->skb);
  356. meta->skb = NULL;
  357. }
  358. }
  359. static int alloc_ringmemory(struct b43_dmaring *ring)
  360. {
  361. struct device *dev = ring->dev->dev->dev;
  362. gfp_t flags = GFP_KERNEL;
  363. /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
  364. * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
  365. * has shown that 4K is sufficient for the latter as long as the buffer
  366. * does not cross an 8K boundary.
  367. *
  368. * For unknown reasons - possibly a hardware error - the BCM4311 rev
  369. * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
  370. * which accounts for the GFP_DMA flag below.
  371. */
  372. if (ring->dma64)
  373. flags |= GFP_DMA;
  374. ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
  375. &(ring->dmabase), flags);
  376. if (!ring->descbase) {
  377. b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
  378. return -ENOMEM;
  379. }
  380. memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
  381. return 0;
  382. }
  383. static void free_ringmemory(struct b43_dmaring *ring)
  384. {
  385. struct device *dev = ring->dev->dev->dev;
  386. dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
  387. ring->descbase, ring->dmabase);
  388. }
  389. /* Reset the RX DMA channel */
  390. int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
  391. {
  392. int i;
  393. u32 value;
  394. u16 offset;
  395. might_sleep();
  396. offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
  397. b43_write32(dev, mmio_base + offset, 0);
  398. for (i = 0; i < 10; i++) {
  399. offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS;
  400. value = b43_read32(dev, mmio_base + offset);
  401. if (dma64) {
  402. value &= B43_DMA64_RXSTAT;
  403. if (value == B43_DMA64_RXSTAT_DISABLED) {
  404. i = -1;
  405. break;
  406. }
  407. } else {
  408. value &= B43_DMA32_RXSTATE;
  409. if (value == B43_DMA32_RXSTAT_DISABLED) {
  410. i = -1;
  411. break;
  412. }
  413. }
  414. msleep(1);
  415. }
  416. if (i != -1) {
  417. b43err(dev->wl, "DMA RX reset timed out\n");
  418. return -ENODEV;
  419. }
  420. return 0;
  421. }
  422. /* Reset the TX DMA channel */
  423. int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
  424. {
  425. int i;
  426. u32 value;
  427. u16 offset;
  428. might_sleep();
  429. for (i = 0; i < 10; i++) {
  430. offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
  431. value = b43_read32(dev, mmio_base + offset);
  432. if (dma64) {
  433. value &= B43_DMA64_TXSTAT;
  434. if (value == B43_DMA64_TXSTAT_DISABLED ||
  435. value == B43_DMA64_TXSTAT_IDLEWAIT ||
  436. value == B43_DMA64_TXSTAT_STOPPED)
  437. break;
  438. } else {
  439. value &= B43_DMA32_TXSTATE;
  440. if (value == B43_DMA32_TXSTAT_DISABLED ||
  441. value == B43_DMA32_TXSTAT_IDLEWAIT ||
  442. value == B43_DMA32_TXSTAT_STOPPED)
  443. break;
  444. }
  445. msleep(1);
  446. }
  447. offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
  448. b43_write32(dev, mmio_base + offset, 0);
  449. for (i = 0; i < 10; i++) {
  450. offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
  451. value = b43_read32(dev, mmio_base + offset);
  452. if (dma64) {
  453. value &= B43_DMA64_TXSTAT;
  454. if (value == B43_DMA64_TXSTAT_DISABLED) {
  455. i = -1;
  456. break;
  457. }
  458. } else {
  459. value &= B43_DMA32_TXSTATE;
  460. if (value == B43_DMA32_TXSTAT_DISABLED) {
  461. i = -1;
  462. break;
  463. }
  464. }
  465. msleep(1);
  466. }
  467. if (i != -1) {
  468. b43err(dev->wl, "DMA TX reset timed out\n");
  469. return -ENODEV;
  470. }
  471. /* ensure the reset is completed. */
  472. msleep(1);
  473. return 0;
  474. }
  475. static int setup_rx_descbuffer(struct b43_dmaring *ring,
  476. struct b43_dmadesc_generic *desc,
  477. struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
  478. {
  479. struct b43_rxhdr_fw4 *rxhdr;
  480. struct b43_hwtxstatus *txstat;
  481. dma_addr_t dmaaddr;
  482. struct sk_buff *skb;
  483. B43_WARN_ON(ring->tx);
  484. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  485. if (unlikely(!skb))
  486. return -ENOMEM;
  487. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  488. if (dma_mapping_error(dmaaddr)) {
  489. /* ugh. try to realloc in zone_dma */
  490. gfp_flags |= GFP_DMA;
  491. dev_kfree_skb_any(skb);
  492. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  493. if (unlikely(!skb))
  494. return -ENOMEM;
  495. dmaaddr = map_descbuffer(ring, skb->data,
  496. ring->rx_buffersize, 0);
  497. }
  498. if (dma_mapping_error(dmaaddr)) {
  499. dev_kfree_skb_any(skb);
  500. return -EIO;
  501. }
  502. meta->skb = skb;
  503. meta->dmaaddr = dmaaddr;
  504. ring->ops->fill_descriptor(ring, desc, dmaaddr,
  505. ring->rx_buffersize, 0, 0, 0);
  506. rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
  507. rxhdr->frame_len = 0;
  508. txstat = (struct b43_hwtxstatus *)(skb->data);
  509. txstat->cookie = 0;
  510. return 0;
  511. }
  512. /* Allocate the initial descbuffers.
  513. * This is used for an RX ring only.
  514. */
  515. static int alloc_initial_descbuffers(struct b43_dmaring *ring)
  516. {
  517. int i, err = -ENOMEM;
  518. struct b43_dmadesc_generic *desc;
  519. struct b43_dmadesc_meta *meta;
  520. for (i = 0; i < ring->nr_slots; i++) {
  521. desc = ring->ops->idx2desc(ring, i, &meta);
  522. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  523. if (err) {
  524. b43err(ring->dev->wl,
  525. "Failed to allocate initial descbuffers\n");
  526. goto err_unwind;
  527. }
  528. }
  529. mb();
  530. ring->used_slots = ring->nr_slots;
  531. err = 0;
  532. out:
  533. return err;
  534. err_unwind:
  535. for (i--; i >= 0; i--) {
  536. desc = ring->ops->idx2desc(ring, i, &meta);
  537. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  538. dev_kfree_skb(meta->skb);
  539. }
  540. goto out;
  541. }
  542. /* Do initial setup of the DMA controller.
  543. * Reset the controller, write the ring busaddress
  544. * and switch the "enable" bit on.
  545. */
  546. static int dmacontroller_setup(struct b43_dmaring *ring)
  547. {
  548. int err = 0;
  549. u32 value;
  550. u32 addrext;
  551. u32 trans = ssb_dma_translation(ring->dev->dev);
  552. if (ring->tx) {
  553. if (ring->dma64) {
  554. u64 ringbase = (u64) (ring->dmabase);
  555. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  556. >> SSB_DMA_TRANSLATION_SHIFT;
  557. value = B43_DMA64_TXENABLE;
  558. value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
  559. & B43_DMA64_TXADDREXT_MASK;
  560. b43_dma_write(ring, B43_DMA64_TXCTL, value);
  561. b43_dma_write(ring, B43_DMA64_TXRINGLO,
  562. (ringbase & 0xFFFFFFFF));
  563. b43_dma_write(ring, B43_DMA64_TXRINGHI,
  564. ((ringbase >> 32) &
  565. ~SSB_DMA_TRANSLATION_MASK)
  566. | (trans << 1));
  567. } else {
  568. u32 ringbase = (u32) (ring->dmabase);
  569. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  570. >> SSB_DMA_TRANSLATION_SHIFT;
  571. value = B43_DMA32_TXENABLE;
  572. value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
  573. & B43_DMA32_TXADDREXT_MASK;
  574. b43_dma_write(ring, B43_DMA32_TXCTL, value);
  575. b43_dma_write(ring, B43_DMA32_TXRING,
  576. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  577. | trans);
  578. }
  579. } else {
  580. err = alloc_initial_descbuffers(ring);
  581. if (err)
  582. goto out;
  583. if (ring->dma64) {
  584. u64 ringbase = (u64) (ring->dmabase);
  585. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  586. >> SSB_DMA_TRANSLATION_SHIFT;
  587. value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
  588. value |= B43_DMA64_RXENABLE;
  589. value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
  590. & B43_DMA64_RXADDREXT_MASK;
  591. b43_dma_write(ring, B43_DMA64_RXCTL, value);
  592. b43_dma_write(ring, B43_DMA64_RXRINGLO,
  593. (ringbase & 0xFFFFFFFF));
  594. b43_dma_write(ring, B43_DMA64_RXRINGHI,
  595. ((ringbase >> 32) &
  596. ~SSB_DMA_TRANSLATION_MASK)
  597. | (trans << 1));
  598. b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
  599. sizeof(struct b43_dmadesc64));
  600. } else {
  601. u32 ringbase = (u32) (ring->dmabase);
  602. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  603. >> SSB_DMA_TRANSLATION_SHIFT;
  604. value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
  605. value |= B43_DMA32_RXENABLE;
  606. value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
  607. & B43_DMA32_RXADDREXT_MASK;
  608. b43_dma_write(ring, B43_DMA32_RXCTL, value);
  609. b43_dma_write(ring, B43_DMA32_RXRING,
  610. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  611. | trans);
  612. b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
  613. sizeof(struct b43_dmadesc32));
  614. }
  615. }
  616. out:
  617. return err;
  618. }
  619. /* Shutdown the DMA controller. */
  620. static void dmacontroller_cleanup(struct b43_dmaring *ring)
  621. {
  622. if (ring->tx) {
  623. b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
  624. ring->dma64);
  625. if (ring->dma64) {
  626. b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
  627. b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
  628. } else
  629. b43_dma_write(ring, B43_DMA32_TXRING, 0);
  630. } else {
  631. b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
  632. ring->dma64);
  633. if (ring->dma64) {
  634. b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
  635. b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
  636. } else
  637. b43_dma_write(ring, B43_DMA32_RXRING, 0);
  638. }
  639. }
  640. static void free_all_descbuffers(struct b43_dmaring *ring)
  641. {
  642. struct b43_dmadesc_generic *desc;
  643. struct b43_dmadesc_meta *meta;
  644. int i;
  645. if (!ring->used_slots)
  646. return;
  647. for (i = 0; i < ring->nr_slots; i++) {
  648. desc = ring->ops->idx2desc(ring, i, &meta);
  649. if (!meta->skb) {
  650. B43_WARN_ON(!ring->tx);
  651. continue;
  652. }
  653. if (ring->tx) {
  654. unmap_descbuffer(ring, meta->dmaaddr,
  655. meta->skb->len, 1);
  656. } else {
  657. unmap_descbuffer(ring, meta->dmaaddr,
  658. ring->rx_buffersize, 0);
  659. }
  660. free_descriptor_buffer(ring, meta);
  661. }
  662. }
  663. static u64 supported_dma_mask(struct b43_wldev *dev)
  664. {
  665. u32 tmp;
  666. u16 mmio_base;
  667. tmp = b43_read32(dev, SSB_TMSHIGH);
  668. if (tmp & SSB_TMSHIGH_DMA64)
  669. return DMA_64BIT_MASK;
  670. mmio_base = b43_dmacontroller_base(0, 0);
  671. b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
  672. tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
  673. if (tmp & B43_DMA32_TXADDREXT_MASK)
  674. return DMA_32BIT_MASK;
  675. return DMA_30BIT_MASK;
  676. }
  677. /* Main initialization function. */
  678. static
  679. struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
  680. int controller_index,
  681. int for_tx, int dma64)
  682. {
  683. struct b43_dmaring *ring;
  684. int err;
  685. int nr_slots;
  686. dma_addr_t dma_test;
  687. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  688. if (!ring)
  689. goto out;
  690. nr_slots = B43_RXRING_SLOTS;
  691. if (for_tx)
  692. nr_slots = B43_TXRING_SLOTS;
  693. ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
  694. GFP_KERNEL);
  695. if (!ring->meta)
  696. goto err_kfree_ring;
  697. if (for_tx) {
  698. ring->txhdr_cache = kcalloc(nr_slots,
  699. sizeof(struct b43_txhdr_fw4),
  700. GFP_KERNEL);
  701. if (!ring->txhdr_cache)
  702. goto err_kfree_meta;
  703. /* test for ability to dma to txhdr_cache */
  704. dma_test = dma_map_single(dev->dev->dev,
  705. ring->txhdr_cache,
  706. sizeof(struct b43_txhdr_fw4),
  707. DMA_TO_DEVICE);
  708. if (dma_mapping_error(dma_test)) {
  709. /* ugh realloc */
  710. kfree(ring->txhdr_cache);
  711. ring->txhdr_cache = kcalloc(nr_slots,
  712. sizeof(struct
  713. b43_txhdr_fw4),
  714. GFP_KERNEL | GFP_DMA);
  715. if (!ring->txhdr_cache)
  716. goto err_kfree_meta;
  717. dma_test = dma_map_single(dev->dev->dev,
  718. ring->txhdr_cache,
  719. sizeof(struct b43_txhdr_fw4),
  720. DMA_TO_DEVICE);
  721. if (dma_mapping_error(dma_test))
  722. goto err_kfree_txhdr_cache;
  723. }
  724. dma_unmap_single(dev->dev->dev,
  725. dma_test, sizeof(struct b43_txhdr_fw4),
  726. DMA_TO_DEVICE);
  727. }
  728. ring->dev = dev;
  729. ring->nr_slots = nr_slots;
  730. ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);
  731. ring->index = controller_index;
  732. ring->dma64 = !!dma64;
  733. if (dma64)
  734. ring->ops = &dma64_ops;
  735. else
  736. ring->ops = &dma32_ops;
  737. if (for_tx) {
  738. ring->tx = 1;
  739. ring->current_slot = -1;
  740. } else {
  741. if (ring->index == 0) {
  742. ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
  743. ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
  744. } else if (ring->index == 3) {
  745. ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
  746. ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
  747. } else
  748. B43_WARN_ON(1);
  749. }
  750. spin_lock_init(&ring->lock);
  751. #ifdef CONFIG_B43_DEBUG
  752. ring->last_injected_overflow = jiffies;
  753. #endif
  754. err = alloc_ringmemory(ring);
  755. if (err)
  756. goto err_kfree_txhdr_cache;
  757. err = dmacontroller_setup(ring);
  758. if (err)
  759. goto err_free_ringmemory;
  760. out:
  761. return ring;
  762. err_free_ringmemory:
  763. free_ringmemory(ring);
  764. err_kfree_txhdr_cache:
  765. kfree(ring->txhdr_cache);
  766. err_kfree_meta:
  767. kfree(ring->meta);
  768. err_kfree_ring:
  769. kfree(ring);
  770. ring = NULL;
  771. goto out;
  772. }
  773. /* Main cleanup function. */
  774. static void b43_destroy_dmaring(struct b43_dmaring *ring)
  775. {
  776. if (!ring)
  777. return;
  778. b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
  779. (ring->dma64) ? "64" : "32",
  780. ring->mmio_base,
  781. (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
  782. /* Device IRQs are disabled prior entering this function,
  783. * so no need to take care of concurrency with rx handler stuff.
  784. */
  785. dmacontroller_cleanup(ring);
  786. free_all_descbuffers(ring);
  787. free_ringmemory(ring);
  788. kfree(ring->txhdr_cache);
  789. kfree(ring->meta);
  790. kfree(ring);
  791. }
  792. void b43_dma_free(struct b43_wldev *dev)
  793. {
  794. struct b43_dma *dma = &dev->dma;
  795. b43_destroy_dmaring(dma->rx_ring3);
  796. dma->rx_ring3 = NULL;
  797. b43_destroy_dmaring(dma->rx_ring0);
  798. dma->rx_ring0 = NULL;
  799. b43_destroy_dmaring(dma->tx_ring5);
  800. dma->tx_ring5 = NULL;
  801. b43_destroy_dmaring(dma->tx_ring4);
  802. dma->tx_ring4 = NULL;
  803. b43_destroy_dmaring(dma->tx_ring3);
  804. dma->tx_ring3 = NULL;
  805. b43_destroy_dmaring(dma->tx_ring2);
  806. dma->tx_ring2 = NULL;
  807. b43_destroy_dmaring(dma->tx_ring1);
  808. dma->tx_ring1 = NULL;
  809. b43_destroy_dmaring(dma->tx_ring0);
  810. dma->tx_ring0 = NULL;
  811. }
  812. int b43_dma_init(struct b43_wldev *dev)
  813. {
  814. struct b43_dma *dma = &dev->dma;
  815. struct b43_dmaring *ring;
  816. int err;
  817. u64 dmamask;
  818. int dma64 = 0;
  819. dmamask = supported_dma_mask(dev);
  820. if (dmamask == DMA_64BIT_MASK)
  821. dma64 = 1;
  822. err = ssb_dma_set_mask(dev->dev, dmamask);
  823. if (err) {
  824. b43err(dev->wl, "The machine/kernel does not support "
  825. "the required DMA mask (0x%08X%08X)\n",
  826. (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
  827. (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
  828. return -EOPNOTSUPP;
  829. }
  830. err = -ENOMEM;
  831. /* setup TX DMA channels. */
  832. ring = b43_setup_dmaring(dev, 0, 1, dma64);
  833. if (!ring)
  834. goto out;
  835. dma->tx_ring0 = ring;
  836. ring = b43_setup_dmaring(dev, 1, 1, dma64);
  837. if (!ring)
  838. goto err_destroy_tx0;
  839. dma->tx_ring1 = ring;
  840. ring = b43_setup_dmaring(dev, 2, 1, dma64);
  841. if (!ring)
  842. goto err_destroy_tx1;
  843. dma->tx_ring2 = ring;
  844. ring = b43_setup_dmaring(dev, 3, 1, dma64);
  845. if (!ring)
  846. goto err_destroy_tx2;
  847. dma->tx_ring3 = ring;
  848. ring = b43_setup_dmaring(dev, 4, 1, dma64);
  849. if (!ring)
  850. goto err_destroy_tx3;
  851. dma->tx_ring4 = ring;
  852. ring = b43_setup_dmaring(dev, 5, 1, dma64);
  853. if (!ring)
  854. goto err_destroy_tx4;
  855. dma->tx_ring5 = ring;
  856. /* setup RX DMA channels. */
  857. ring = b43_setup_dmaring(dev, 0, 0, dma64);
  858. if (!ring)
  859. goto err_destroy_tx5;
  860. dma->rx_ring0 = ring;
  861. if (dev->dev->id.revision < 5) {
  862. ring = b43_setup_dmaring(dev, 3, 0, dma64);
  863. if (!ring)
  864. goto err_destroy_rx0;
  865. dma->rx_ring3 = ring;
  866. }
  867. b43dbg(dev->wl, "%d-bit DMA initialized\n",
  868. (dmamask == DMA_64BIT_MASK) ? 64 :
  869. (dmamask == DMA_32BIT_MASK) ? 32 : 30);
  870. err = 0;
  871. out:
  872. return err;
  873. err_destroy_rx0:
  874. b43_destroy_dmaring(dma->rx_ring0);
  875. dma->rx_ring0 = NULL;
  876. err_destroy_tx5:
  877. b43_destroy_dmaring(dma->tx_ring5);
  878. dma->tx_ring5 = NULL;
  879. err_destroy_tx4:
  880. b43_destroy_dmaring(dma->tx_ring4);
  881. dma->tx_ring4 = NULL;
  882. err_destroy_tx3:
  883. b43_destroy_dmaring(dma->tx_ring3);
  884. dma->tx_ring3 = NULL;
  885. err_destroy_tx2:
  886. b43_destroy_dmaring(dma->tx_ring2);
  887. dma->tx_ring2 = NULL;
  888. err_destroy_tx1:
  889. b43_destroy_dmaring(dma->tx_ring1);
  890. dma->tx_ring1 = NULL;
  891. err_destroy_tx0:
  892. b43_destroy_dmaring(dma->tx_ring0);
  893. dma->tx_ring0 = NULL;
  894. goto out;
  895. }
  896. /* Generate a cookie for the TX header. */
  897. static u16 generate_cookie(struct b43_dmaring *ring, int slot)
  898. {
  899. u16 cookie = 0x1000;
  900. /* Use the upper 4 bits of the cookie as
  901. * DMA controller ID and store the slot number
  902. * in the lower 12 bits.
  903. * Note that the cookie must never be 0, as this
  904. * is a special value used in RX path.
  905. * It can also not be 0xFFFF because that is special
  906. * for multicast frames.
  907. */
  908. switch (ring->index) {
  909. case 0:
  910. cookie = 0x1000;
  911. break;
  912. case 1:
  913. cookie = 0x2000;
  914. break;
  915. case 2:
  916. cookie = 0x3000;
  917. break;
  918. case 3:
  919. cookie = 0x4000;
  920. break;
  921. case 4:
  922. cookie = 0x5000;
  923. break;
  924. case 5:
  925. cookie = 0x6000;
  926. break;
  927. default:
  928. B43_WARN_ON(1);
  929. }
  930. B43_WARN_ON(slot & ~0x0FFF);
  931. cookie |= (u16) slot;
  932. return cookie;
  933. }
  934. /* Inspect a cookie and find out to which controller/slot it belongs. */
  935. static
  936. struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
  937. {
  938. struct b43_dma *dma = &dev->dma;
  939. struct b43_dmaring *ring = NULL;
  940. switch (cookie & 0xF000) {
  941. case 0x1000:
  942. ring = dma->tx_ring0;
  943. break;
  944. case 0x2000:
  945. ring = dma->tx_ring1;
  946. break;
  947. case 0x3000:
  948. ring = dma->tx_ring2;
  949. break;
  950. case 0x4000:
  951. ring = dma->tx_ring3;
  952. break;
  953. case 0x5000:
  954. ring = dma->tx_ring4;
  955. break;
  956. case 0x6000:
  957. ring = dma->tx_ring5;
  958. break;
  959. default:
  960. B43_WARN_ON(1);
  961. }
  962. *slot = (cookie & 0x0FFF);
  963. B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
  964. return ring;
  965. }
  966. static int dma_tx_fragment(struct b43_dmaring *ring,
  967. struct sk_buff *skb,
  968. struct ieee80211_tx_control *ctl)
  969. {
  970. const struct b43_dma_ops *ops = ring->ops;
  971. u8 *header;
  972. int slot;
  973. int err;
  974. struct b43_dmadesc_generic *desc;
  975. struct b43_dmadesc_meta *meta;
  976. struct b43_dmadesc_meta *meta_hdr;
  977. struct sk_buff *bounce_skb;
  978. u16 cookie;
  979. #define SLOTS_PER_PACKET 2
  980. B43_WARN_ON(skb_shinfo(skb)->nr_frags);
  981. /* Get a slot for the header. */
  982. slot = request_slot(ring);
  983. desc = ops->idx2desc(ring, slot, &meta_hdr);
  984. memset(meta_hdr, 0, sizeof(*meta_hdr));
  985. header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]);
  986. cookie = generate_cookie(ring, slot);
  987. b43_generate_txhdr(ring->dev, header,
  988. skb->data, skb->len, ctl, cookie);
  989. meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
  990. sizeof(struct b43_txhdr_fw4), 1);
  991. if (dma_mapping_error(meta_hdr->dmaaddr))
  992. return -EIO;
  993. ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
  994. sizeof(struct b43_txhdr_fw4), 1, 0, 0);
  995. /* Get a slot for the payload. */
  996. slot = request_slot(ring);
  997. desc = ops->idx2desc(ring, slot, &meta);
  998. memset(meta, 0, sizeof(*meta));
  999. memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
  1000. meta->skb = skb;
  1001. meta->is_last_fragment = 1;
  1002. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1003. /* create a bounce buffer in zone_dma on mapping failure. */
  1004. if (dma_mapping_error(meta->dmaaddr)) {
  1005. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
  1006. if (!bounce_skb) {
  1007. err = -ENOMEM;
  1008. goto out_unmap_hdr;
  1009. }
  1010. memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
  1011. dev_kfree_skb_any(skb);
  1012. skb = bounce_skb;
  1013. meta->skb = skb;
  1014. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1015. if (dma_mapping_error(meta->dmaaddr)) {
  1016. err = -EIO;
  1017. goto out_free_bounce;
  1018. }
  1019. }
  1020. ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
  1021. if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
  1022. /* Tell the firmware about the cookie of the last
  1023. * mcast frame, so it can clear the more-data bit in it. */
  1024. b43_shm_write16(ring->dev, B43_SHM_SHARED,
  1025. B43_SHM_SH_MCASTCOOKIE, cookie);
  1026. }
  1027. /* Now transfer the whole frame. */
  1028. wmb();
  1029. ops->poke_tx(ring, next_slot(ring, slot));
  1030. return 0;
  1031. out_free_bounce:
  1032. dev_kfree_skb_any(skb);
  1033. out_unmap_hdr:
  1034. unmap_descbuffer(ring, meta_hdr->dmaaddr,
  1035. sizeof(struct b43_txhdr_fw4), 1);
  1036. return err;
  1037. }
  1038. static inline int should_inject_overflow(struct b43_dmaring *ring)
  1039. {
  1040. #ifdef CONFIG_B43_DEBUG
  1041. if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
  1042. /* Check if we should inject another ringbuffer overflow
  1043. * to test handling of this situation in the stack. */
  1044. unsigned long next_overflow;
  1045. next_overflow = ring->last_injected_overflow + HZ;
  1046. if (time_after(jiffies, next_overflow)) {
  1047. ring->last_injected_overflow = jiffies;
  1048. b43dbg(ring->dev->wl,
  1049. "Injecting TX ring overflow on "
  1050. "DMA controller %d\n", ring->index);
  1051. return 1;
  1052. }
  1053. }
  1054. #endif /* CONFIG_B43_DEBUG */
  1055. return 0;
  1056. }
  1057. int b43_dma_tx(struct b43_wldev *dev,
  1058. struct sk_buff *skb, struct ieee80211_tx_control *ctl)
  1059. {
  1060. struct b43_dmaring *ring;
  1061. struct ieee80211_hdr *hdr;
  1062. int err = 0;
  1063. unsigned long flags;
  1064. if (unlikely(skb->len < 2 + 2 + 6)) {
  1065. /* Too short, this can't be a valid frame. */
  1066. return -EINVAL;
  1067. }
  1068. hdr = (struct ieee80211_hdr *)skb->data;
  1069. if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
  1070. /* The multicast ring will be sent after the DTIM */
  1071. ring = dev->dma.tx_ring4;
  1072. /* Set the more-data bit. Ucode will clear it on
  1073. * the last frame for us. */
  1074. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1075. } else {
  1076. /* Decide by priority where to put this frame. */
  1077. ring = priority_to_txring(dev, ctl->queue);
  1078. }
  1079. spin_lock_irqsave(&ring->lock, flags);
  1080. B43_WARN_ON(!ring->tx);
  1081. if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
  1082. b43warn(dev->wl, "DMA queue overflow\n");
  1083. err = -ENOSPC;
  1084. goto out_unlock;
  1085. }
  1086. /* Check if the queue was stopped in mac80211,
  1087. * but we got called nevertheless.
  1088. * That would be a mac80211 bug. */
  1089. B43_WARN_ON(ring->stopped);
  1090. err = dma_tx_fragment(ring, skb, ctl);
  1091. if (unlikely(err)) {
  1092. b43err(dev->wl, "DMA tx mapping failure\n");
  1093. goto out_unlock;
  1094. }
  1095. ring->nr_tx_packets++;
  1096. if ((free_slots(ring) < SLOTS_PER_PACKET) ||
  1097. should_inject_overflow(ring)) {
  1098. /* This TX ring is full. */
  1099. ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
  1100. ring->stopped = 1;
  1101. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1102. b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
  1103. }
  1104. }
  1105. out_unlock:
  1106. spin_unlock_irqrestore(&ring->lock, flags);
  1107. return err;
  1108. }
  1109. void b43_dma_handle_txstatus(struct b43_wldev *dev,
  1110. const struct b43_txstatus *status)
  1111. {
  1112. const struct b43_dma_ops *ops;
  1113. struct b43_dmaring *ring;
  1114. struct b43_dmadesc_generic *desc;
  1115. struct b43_dmadesc_meta *meta;
  1116. int slot;
  1117. ring = parse_cookie(dev, status->cookie, &slot);
  1118. if (unlikely(!ring))
  1119. return;
  1120. B43_WARN_ON(!irqs_disabled());
  1121. spin_lock(&ring->lock);
  1122. B43_WARN_ON(!ring->tx);
  1123. ops = ring->ops;
  1124. while (1) {
  1125. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  1126. desc = ops->idx2desc(ring, slot, &meta);
  1127. if (meta->skb)
  1128. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
  1129. 1);
  1130. else
  1131. unmap_descbuffer(ring, meta->dmaaddr,
  1132. sizeof(struct b43_txhdr_fw4), 1);
  1133. if (meta->is_last_fragment) {
  1134. B43_WARN_ON(!meta->skb);
  1135. /* Call back to inform the ieee80211 subsystem about the
  1136. * status of the transmission.
  1137. * Some fields of txstat are already filled in dma_tx().
  1138. */
  1139. if (status->acked) {
  1140. meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
  1141. } else {
  1142. if (!(meta->txstat.control.flags
  1143. & IEEE80211_TXCTL_NO_ACK))
  1144. meta->txstat.excessive_retries = 1;
  1145. }
  1146. if (status->frame_count == 0) {
  1147. /* The frame was not transmitted at all. */
  1148. meta->txstat.retry_count = 0;
  1149. } else
  1150. meta->txstat.retry_count = status->frame_count - 1;
  1151. ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
  1152. &(meta->txstat));
  1153. /* skb is freed by ieee80211_tx_status_irqsafe() */
  1154. meta->skb = NULL;
  1155. } else {
  1156. /* No need to call free_descriptor_buffer here, as
  1157. * this is only the txhdr, which is not allocated.
  1158. */
  1159. B43_WARN_ON(meta->skb);
  1160. }
  1161. /* Everything unmapped and free'd. So it's not used anymore. */
  1162. ring->used_slots--;
  1163. if (meta->is_last_fragment)
  1164. break;
  1165. slot = next_slot(ring, slot);
  1166. }
  1167. dev->stats.last_tx = jiffies;
  1168. if (ring->stopped) {
  1169. B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
  1170. ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
  1171. ring->stopped = 0;
  1172. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1173. b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
  1174. }
  1175. }
  1176. spin_unlock(&ring->lock);
  1177. }
  1178. void b43_dma_get_tx_stats(struct b43_wldev *dev,
  1179. struct ieee80211_tx_queue_stats *stats)
  1180. {
  1181. const int nr_queues = dev->wl->hw->queues;
  1182. struct b43_dmaring *ring;
  1183. struct ieee80211_tx_queue_stats_data *data;
  1184. unsigned long flags;
  1185. int i;
  1186. for (i = 0; i < nr_queues; i++) {
  1187. data = &(stats->data[i]);
  1188. ring = priority_to_txring(dev, i);
  1189. spin_lock_irqsave(&ring->lock, flags);
  1190. data->len = ring->used_slots / SLOTS_PER_PACKET;
  1191. data->limit = ring->nr_slots / SLOTS_PER_PACKET;
  1192. data->count = ring->nr_tx_packets;
  1193. spin_unlock_irqrestore(&ring->lock, flags);
  1194. }
  1195. }
  1196. static void dma_rx(struct b43_dmaring *ring, int *slot)
  1197. {
  1198. const struct b43_dma_ops *ops = ring->ops;
  1199. struct b43_dmadesc_generic *desc;
  1200. struct b43_dmadesc_meta *meta;
  1201. struct b43_rxhdr_fw4 *rxhdr;
  1202. struct sk_buff *skb;
  1203. u16 len;
  1204. int err;
  1205. dma_addr_t dmaaddr;
  1206. desc = ops->idx2desc(ring, *slot, &meta);
  1207. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  1208. skb = meta->skb;
  1209. if (ring->index == 3) {
  1210. /* We received an xmit status. */
  1211. struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data;
  1212. int i = 0;
  1213. while (hw->cookie == 0) {
  1214. if (i > 100)
  1215. break;
  1216. i++;
  1217. udelay(2);
  1218. barrier();
  1219. }
  1220. b43_handle_hwtxstatus(ring->dev, hw);
  1221. /* recycle the descriptor buffer. */
  1222. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1223. ring->rx_buffersize);
  1224. return;
  1225. }
  1226. rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
  1227. len = le16_to_cpu(rxhdr->frame_len);
  1228. if (len == 0) {
  1229. int i = 0;
  1230. do {
  1231. udelay(2);
  1232. barrier();
  1233. len = le16_to_cpu(rxhdr->frame_len);
  1234. } while (len == 0 && i++ < 5);
  1235. if (unlikely(len == 0)) {
  1236. /* recycle the descriptor buffer. */
  1237. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1238. ring->rx_buffersize);
  1239. goto drop;
  1240. }
  1241. }
  1242. if (unlikely(len > ring->rx_buffersize)) {
  1243. /* The data did not fit into one descriptor buffer
  1244. * and is split over multiple buffers.
  1245. * This should never happen, as we try to allocate buffers
  1246. * big enough. So simply ignore this packet.
  1247. */
  1248. int cnt = 0;
  1249. s32 tmp = len;
  1250. while (1) {
  1251. desc = ops->idx2desc(ring, *slot, &meta);
  1252. /* recycle the descriptor buffer. */
  1253. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1254. ring->rx_buffersize);
  1255. *slot = next_slot(ring, *slot);
  1256. cnt++;
  1257. tmp -= ring->rx_buffersize;
  1258. if (tmp <= 0)
  1259. break;
  1260. }
  1261. b43err(ring->dev->wl, "DMA RX buffer too small "
  1262. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1263. len, ring->rx_buffersize, cnt);
  1264. goto drop;
  1265. }
  1266. dmaaddr = meta->dmaaddr;
  1267. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1268. if (unlikely(err)) {
  1269. b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
  1270. sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
  1271. goto drop;
  1272. }
  1273. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1274. skb_put(skb, len + ring->frameoffset);
  1275. skb_pull(skb, ring->frameoffset);
  1276. b43_rx(ring->dev, skb, rxhdr);
  1277. drop:
  1278. return;
  1279. }
  1280. void b43_dma_rx(struct b43_dmaring *ring)
  1281. {
  1282. const struct b43_dma_ops *ops = ring->ops;
  1283. int slot, current_slot;
  1284. int used_slots = 0;
  1285. B43_WARN_ON(ring->tx);
  1286. current_slot = ops->get_current_rxslot(ring);
  1287. B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
  1288. slot = ring->current_slot;
  1289. for (; slot != current_slot; slot = next_slot(ring, slot)) {
  1290. dma_rx(ring, &slot);
  1291. update_max_used_slots(ring, ++used_slots);
  1292. }
  1293. ops->set_current_rxslot(ring, slot);
  1294. ring->current_slot = slot;
  1295. }
  1296. static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
  1297. {
  1298. unsigned long flags;
  1299. spin_lock_irqsave(&ring->lock, flags);
  1300. B43_WARN_ON(!ring->tx);
  1301. ring->ops->tx_suspend(ring);
  1302. spin_unlock_irqrestore(&ring->lock, flags);
  1303. }
  1304. static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
  1305. {
  1306. unsigned long flags;
  1307. spin_lock_irqsave(&ring->lock, flags);
  1308. B43_WARN_ON(!ring->tx);
  1309. ring->ops->tx_resume(ring);
  1310. spin_unlock_irqrestore(&ring->lock, flags);
  1311. }
  1312. void b43_dma_tx_suspend(struct b43_wldev *dev)
  1313. {
  1314. b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
  1315. b43_dma_tx_suspend_ring(dev->dma.tx_ring0);
  1316. b43_dma_tx_suspend_ring(dev->dma.tx_ring1);
  1317. b43_dma_tx_suspend_ring(dev->dma.tx_ring2);
  1318. b43_dma_tx_suspend_ring(dev->dma.tx_ring3);
  1319. b43_dma_tx_suspend_ring(dev->dma.tx_ring4);
  1320. b43_dma_tx_suspend_ring(dev->dma.tx_ring5);
  1321. }
  1322. void b43_dma_tx_resume(struct b43_wldev *dev)
  1323. {
  1324. b43_dma_tx_resume_ring(dev->dma.tx_ring5);
  1325. b43_dma_tx_resume_ring(dev->dma.tx_ring4);
  1326. b43_dma_tx_resume_ring(dev->dma.tx_ring3);
  1327. b43_dma_tx_resume_ring(dev->dma.tx_ring2);
  1328. b43_dma_tx_resume_ring(dev->dma.tx_ring1);
  1329. b43_dma_tx_resume_ring(dev->dma.tx_ring0);
  1330. b43_power_saving_ctl_bits(dev, 0);
  1331. }