mv_xor.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375
  1. /*
  2. * offload engine driver for the Marvell XOR engine
  3. * Copyright (C) 2007, 2008, Marvell International Ltd.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/async_tx.h>
  21. #include <linux/delay.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/memory.h>
  27. #include <plat/mv_xor.h>
  28. #include "mv_xor.h"
  29. static void mv_xor_issue_pending(struct dma_chan *chan);
  30. #define to_mv_xor_chan(chan) \
  31. container_of(chan, struct mv_xor_chan, common)
  32. #define to_mv_xor_device(dev) \
  33. container_of(dev, struct mv_xor_device, common)
  34. #define to_mv_xor_slot(tx) \
  35. container_of(tx, struct mv_xor_desc_slot, async_tx)
  36. static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
  37. {
  38. struct mv_xor_desc *hw_desc = desc->hw_desc;
  39. hw_desc->status = (1 << 31);
  40. hw_desc->phy_next_desc = 0;
  41. hw_desc->desc_command = (1 << 31);
  42. }
  43. static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
  44. {
  45. struct mv_xor_desc *hw_desc = desc->hw_desc;
  46. return hw_desc->phy_dest_addr;
  47. }
  48. static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
  49. int src_idx)
  50. {
  51. struct mv_xor_desc *hw_desc = desc->hw_desc;
  52. return hw_desc->phy_src_addr[src_idx];
  53. }
  54. static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
  55. u32 byte_count)
  56. {
  57. struct mv_xor_desc *hw_desc = desc->hw_desc;
  58. hw_desc->byte_count = byte_count;
  59. }
  60. static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  61. u32 next_desc_addr)
  62. {
  63. struct mv_xor_desc *hw_desc = desc->hw_desc;
  64. BUG_ON(hw_desc->phy_next_desc);
  65. hw_desc->phy_next_desc = next_desc_addr;
  66. }
  67. static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
  68. {
  69. struct mv_xor_desc *hw_desc = desc->hw_desc;
  70. hw_desc->phy_next_desc = 0;
  71. }
  72. static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
  73. {
  74. desc->value = val;
  75. }
  76. static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
  77. dma_addr_t addr)
  78. {
  79. struct mv_xor_desc *hw_desc = desc->hw_desc;
  80. hw_desc->phy_dest_addr = addr;
  81. }
  82. static int mv_chan_memset_slot_count(size_t len)
  83. {
  84. return 1;
  85. }
  86. #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
  87. static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  88. int index, dma_addr_t addr)
  89. {
  90. struct mv_xor_desc *hw_desc = desc->hw_desc;
  91. hw_desc->phy_src_addr[index] = addr;
  92. if (desc->type == DMA_XOR)
  93. hw_desc->desc_command |= (1 << index);
  94. }
  95. static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  96. {
  97. return __raw_readl(XOR_CURR_DESC(chan));
  98. }
  99. static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
  100. u32 next_desc_addr)
  101. {
  102. __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
  103. }
  104. static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
  105. {
  106. __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
  107. }
  108. static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
  109. {
  110. __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
  111. }
  112. static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
  113. {
  114. __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
  115. __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
  116. }
  117. static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  118. {
  119. u32 val = __raw_readl(XOR_INTR_MASK(chan));
  120. val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
  121. __raw_writel(val, XOR_INTR_MASK(chan));
  122. }
  123. static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  124. {
  125. u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
  126. intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
  127. return intr_cause;
  128. }
  129. static int mv_is_err_intr(u32 intr_cause)
  130. {
  131. if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
  132. return 1;
  133. return 0;
  134. }
  135. static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
  136. {
  137. u32 val = (1 << (1 + (chan->idx * 16)));
  138. dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
  139. __raw_writel(val, XOR_INTR_CAUSE(chan));
  140. }
  141. static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
  142. {
  143. u32 val = 0xFFFF0000 >> (chan->idx * 16);
  144. __raw_writel(val, XOR_INTR_CAUSE(chan));
  145. }
  146. static int mv_can_chain(struct mv_xor_desc_slot *desc)
  147. {
  148. struct mv_xor_desc_slot *chain_old_tail = list_entry(
  149. desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
  150. if (chain_old_tail->type != desc->type)
  151. return 0;
  152. if (desc->type == DMA_MEMSET)
  153. return 0;
  154. return 1;
  155. }
  156. static void mv_set_mode(struct mv_xor_chan *chan,
  157. enum dma_transaction_type type)
  158. {
  159. u32 op_mode;
  160. u32 config = __raw_readl(XOR_CONFIG(chan));
  161. switch (type) {
  162. case DMA_XOR:
  163. op_mode = XOR_OPERATION_MODE_XOR;
  164. break;
  165. case DMA_MEMCPY:
  166. op_mode = XOR_OPERATION_MODE_MEMCPY;
  167. break;
  168. case DMA_MEMSET:
  169. op_mode = XOR_OPERATION_MODE_MEMSET;
  170. break;
  171. default:
  172. dev_printk(KERN_ERR, chan->device->common.dev,
  173. "error: unsupported operation %d.\n",
  174. type);
  175. BUG();
  176. return;
  177. }
  178. config &= ~0x7;
  179. config |= op_mode;
  180. __raw_writel(config, XOR_CONFIG(chan));
  181. chan->current_type = type;
  182. }
  183. static void mv_chan_activate(struct mv_xor_chan *chan)
  184. {
  185. u32 activation;
  186. dev_dbg(chan->device->common.dev, " activate chan.\n");
  187. activation = __raw_readl(XOR_ACTIVATION(chan));
  188. activation |= 0x1;
  189. __raw_writel(activation, XOR_ACTIVATION(chan));
  190. }
  191. static char mv_chan_is_busy(struct mv_xor_chan *chan)
  192. {
  193. u32 state = __raw_readl(XOR_ACTIVATION(chan));
  194. state = (state >> 4) & 0x3;
  195. return (state == 1) ? 1 : 0;
  196. }
  197. static int mv_chan_xor_slot_count(size_t len, int src_cnt)
  198. {
  199. return 1;
  200. }
  201. /**
  202. * mv_xor_free_slots - flags descriptor slots for reuse
  203. * @slot: Slot to free
  204. * Caller must hold &mv_chan->lock while calling this function
  205. */
  206. static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
  207. struct mv_xor_desc_slot *slot)
  208. {
  209. dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
  210. __func__, __LINE__, slot);
  211. slot->slots_per_op = 0;
  212. }
  213. /*
  214. * mv_xor_start_new_chain - program the engine to operate on new chain headed by
  215. * sw_desc
  216. * Caller must hold &mv_chan->lock while calling this function
  217. */
  218. static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
  219. struct mv_xor_desc_slot *sw_desc)
  220. {
  221. dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
  222. __func__, __LINE__, sw_desc);
  223. if (sw_desc->type != mv_chan->current_type)
  224. mv_set_mode(mv_chan, sw_desc->type);
  225. if (sw_desc->type == DMA_MEMSET) {
  226. /* for memset requests we need to program the engine, no
  227. * descriptors used.
  228. */
  229. struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
  230. mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
  231. mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
  232. mv_chan_set_value(mv_chan, sw_desc->value);
  233. } else {
  234. /* set the hardware chain */
  235. mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
  236. }
  237. mv_chan->pending += sw_desc->slot_cnt;
  238. mv_xor_issue_pending(&mv_chan->common);
  239. }
  240. static dma_cookie_t
  241. mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
  242. struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
  243. {
  244. BUG_ON(desc->async_tx.cookie < 0);
  245. if (desc->async_tx.cookie > 0) {
  246. cookie = desc->async_tx.cookie;
  247. /* call the callback (must not sleep or submit new
  248. * operations to this channel)
  249. */
  250. if (desc->async_tx.callback)
  251. desc->async_tx.callback(
  252. desc->async_tx.callback_param);
  253. /* unmap dma addresses
  254. * (unmap_single vs unmap_page?)
  255. */
  256. if (desc->group_head && desc->unmap_len) {
  257. struct mv_xor_desc_slot *unmap = desc->group_head;
  258. struct device *dev =
  259. &mv_chan->device->pdev->dev;
  260. u32 len = unmap->unmap_len;
  261. enum dma_ctrl_flags flags = desc->async_tx.flags;
  262. u32 src_cnt;
  263. dma_addr_t addr;
  264. if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  265. addr = mv_desc_get_dest_addr(unmap);
  266. dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
  267. }
  268. if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  269. src_cnt = unmap->unmap_src_cnt;
  270. while (src_cnt--) {
  271. addr = mv_desc_get_src_addr(unmap,
  272. src_cnt);
  273. dma_unmap_page(dev, addr, len,
  274. DMA_TO_DEVICE);
  275. }
  276. }
  277. desc->group_head = NULL;
  278. }
  279. }
  280. /* run dependent operations */
  281. async_tx_run_dependencies(&desc->async_tx);
  282. return cookie;
  283. }
  284. static int
  285. mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
  286. {
  287. struct mv_xor_desc_slot *iter, *_iter;
  288. dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
  289. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  290. completed_node) {
  291. if (async_tx_test_ack(&iter->async_tx)) {
  292. list_del(&iter->completed_node);
  293. mv_xor_free_slots(mv_chan, iter);
  294. }
  295. }
  296. return 0;
  297. }
  298. static int
  299. mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
  300. struct mv_xor_chan *mv_chan)
  301. {
  302. dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
  303. __func__, __LINE__, desc, desc->async_tx.flags);
  304. list_del(&desc->chain_node);
  305. /* the client is allowed to attach dependent operations
  306. * until 'ack' is set
  307. */
  308. if (!async_tx_test_ack(&desc->async_tx)) {
  309. /* move this slot to the completed_slots */
  310. list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
  311. return 0;
  312. }
  313. mv_xor_free_slots(mv_chan, desc);
  314. return 0;
  315. }
  316. static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
  317. {
  318. struct mv_xor_desc_slot *iter, *_iter;
  319. dma_cookie_t cookie = 0;
  320. int busy = mv_chan_is_busy(mv_chan);
  321. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  322. int seen_current = 0;
  323. dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
  324. dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
  325. mv_xor_clean_completed_slots(mv_chan);
  326. /* free completed slots from the chain starting with
  327. * the oldest descriptor
  328. */
  329. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  330. chain_node) {
  331. prefetch(_iter);
  332. prefetch(&_iter->async_tx);
  333. /* do not advance past the current descriptor loaded into the
  334. * hardware channel, subsequent descriptors are either in
  335. * process or have not been submitted
  336. */
  337. if (seen_current)
  338. break;
  339. /* stop the search if we reach the current descriptor and the
  340. * channel is busy
  341. */
  342. if (iter->async_tx.phys == current_desc) {
  343. seen_current = 1;
  344. if (busy)
  345. break;
  346. }
  347. cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
  348. if (mv_xor_clean_slot(iter, mv_chan))
  349. break;
  350. }
  351. if ((busy == 0) && !list_empty(&mv_chan->chain)) {
  352. struct mv_xor_desc_slot *chain_head;
  353. chain_head = list_entry(mv_chan->chain.next,
  354. struct mv_xor_desc_slot,
  355. chain_node);
  356. mv_xor_start_new_chain(mv_chan, chain_head);
  357. }
  358. if (cookie > 0)
  359. mv_chan->completed_cookie = cookie;
  360. }
  361. static void
  362. mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
  363. {
  364. spin_lock_bh(&mv_chan->lock);
  365. __mv_xor_slot_cleanup(mv_chan);
  366. spin_unlock_bh(&mv_chan->lock);
  367. }
  368. static void mv_xor_tasklet(unsigned long data)
  369. {
  370. struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
  371. __mv_xor_slot_cleanup(chan);
  372. }
  373. static struct mv_xor_desc_slot *
  374. mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
  375. int slots_per_op)
  376. {
  377. struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
  378. LIST_HEAD(chain);
  379. int slots_found, retry = 0;
  380. /* start search from the last allocated descrtiptor
  381. * if a contiguous allocation can not be found start searching
  382. * from the beginning of the list
  383. */
  384. retry:
  385. slots_found = 0;
  386. if (retry == 0)
  387. iter = mv_chan->last_used;
  388. else
  389. iter = list_entry(&mv_chan->all_slots,
  390. struct mv_xor_desc_slot,
  391. slot_node);
  392. list_for_each_entry_safe_continue(
  393. iter, _iter, &mv_chan->all_slots, slot_node) {
  394. prefetch(_iter);
  395. prefetch(&_iter->async_tx);
  396. if (iter->slots_per_op) {
  397. /* give up after finding the first busy slot
  398. * on the second pass through the list
  399. */
  400. if (retry)
  401. break;
  402. slots_found = 0;
  403. continue;
  404. }
  405. /* start the allocation if the slot is correctly aligned */
  406. if (!slots_found++)
  407. alloc_start = iter;
  408. if (slots_found == num_slots) {
  409. struct mv_xor_desc_slot *alloc_tail = NULL;
  410. struct mv_xor_desc_slot *last_used = NULL;
  411. iter = alloc_start;
  412. while (num_slots) {
  413. int i;
  414. /* pre-ack all but the last descriptor */
  415. async_tx_ack(&iter->async_tx);
  416. list_add_tail(&iter->chain_node, &chain);
  417. alloc_tail = iter;
  418. iter->async_tx.cookie = 0;
  419. iter->slot_cnt = num_slots;
  420. iter->xor_check_result = NULL;
  421. for (i = 0; i < slots_per_op; i++) {
  422. iter->slots_per_op = slots_per_op - i;
  423. last_used = iter;
  424. iter = list_entry(iter->slot_node.next,
  425. struct mv_xor_desc_slot,
  426. slot_node);
  427. }
  428. num_slots -= slots_per_op;
  429. }
  430. alloc_tail->group_head = alloc_start;
  431. alloc_tail->async_tx.cookie = -EBUSY;
  432. list_splice(&chain, &alloc_tail->async_tx.tx_list);
  433. mv_chan->last_used = last_used;
  434. mv_desc_clear_next_desc(alloc_start);
  435. mv_desc_clear_next_desc(alloc_tail);
  436. return alloc_tail;
  437. }
  438. }
  439. if (!retry++)
  440. goto retry;
  441. /* try to free some slots if the allocation fails */
  442. tasklet_schedule(&mv_chan->irq_tasklet);
  443. return NULL;
  444. }
  445. static dma_cookie_t
  446. mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
  447. struct mv_xor_desc_slot *desc)
  448. {
  449. dma_cookie_t cookie = mv_chan->common.cookie;
  450. if (++cookie < 0)
  451. cookie = 1;
  452. mv_chan->common.cookie = desc->async_tx.cookie = cookie;
  453. return cookie;
  454. }
  455. /************************ DMA engine API functions ****************************/
  456. static dma_cookie_t
  457. mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
  458. {
  459. struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
  460. struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
  461. struct mv_xor_desc_slot *grp_start, *old_chain_tail;
  462. dma_cookie_t cookie;
  463. int new_hw_chain = 1;
  464. dev_dbg(mv_chan->device->common.dev,
  465. "%s sw_desc %p: async_tx %p\n",
  466. __func__, sw_desc, &sw_desc->async_tx);
  467. grp_start = sw_desc->group_head;
  468. spin_lock_bh(&mv_chan->lock);
  469. cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
  470. if (list_empty(&mv_chan->chain))
  471. list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
  472. else {
  473. new_hw_chain = 0;
  474. old_chain_tail = list_entry(mv_chan->chain.prev,
  475. struct mv_xor_desc_slot,
  476. chain_node);
  477. list_splice_init(&grp_start->async_tx.tx_list,
  478. &old_chain_tail->chain_node);
  479. if (!mv_can_chain(grp_start))
  480. goto submit_done;
  481. dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
  482. old_chain_tail->async_tx.phys);
  483. /* fix up the hardware chain */
  484. mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
  485. /* if the channel is not busy */
  486. if (!mv_chan_is_busy(mv_chan)) {
  487. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  488. /*
  489. * and the curren desc is the end of the chain before
  490. * the append, then we need to start the channel
  491. */
  492. if (current_desc == old_chain_tail->async_tx.phys)
  493. new_hw_chain = 1;
  494. }
  495. }
  496. if (new_hw_chain)
  497. mv_xor_start_new_chain(mv_chan, grp_start);
  498. submit_done:
  499. spin_unlock_bh(&mv_chan->lock);
  500. return cookie;
  501. }
  502. /* returns the number of allocated descriptors */
  503. static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
  504. struct dma_client *client)
  505. {
  506. char *hw_desc;
  507. int idx;
  508. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  509. struct mv_xor_desc_slot *slot = NULL;
  510. struct mv_xor_platform_data *plat_data =
  511. mv_chan->device->pdev->dev.platform_data;
  512. int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
  513. /* Allocate descriptor slots */
  514. idx = mv_chan->slots_allocated;
  515. while (idx < num_descs_in_pool) {
  516. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  517. if (!slot) {
  518. printk(KERN_INFO "MV XOR Channel only initialized"
  519. " %d descriptor slots", idx);
  520. break;
  521. }
  522. hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
  523. slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
  524. dma_async_tx_descriptor_init(&slot->async_tx, chan);
  525. slot->async_tx.tx_submit = mv_xor_tx_submit;
  526. INIT_LIST_HEAD(&slot->chain_node);
  527. INIT_LIST_HEAD(&slot->slot_node);
  528. INIT_LIST_HEAD(&slot->async_tx.tx_list);
  529. hw_desc = (char *) mv_chan->device->dma_desc_pool;
  530. slot->async_tx.phys =
  531. (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
  532. slot->idx = idx++;
  533. spin_lock_bh(&mv_chan->lock);
  534. mv_chan->slots_allocated = idx;
  535. list_add_tail(&slot->slot_node, &mv_chan->all_slots);
  536. spin_unlock_bh(&mv_chan->lock);
  537. }
  538. if (mv_chan->slots_allocated && !mv_chan->last_used)
  539. mv_chan->last_used = list_entry(mv_chan->all_slots.next,
  540. struct mv_xor_desc_slot,
  541. slot_node);
  542. dev_dbg(mv_chan->device->common.dev,
  543. "allocated %d descriptor slots last_used: %p\n",
  544. mv_chan->slots_allocated, mv_chan->last_used);
  545. return mv_chan->slots_allocated ? : -ENOMEM;
  546. }
  547. static struct dma_async_tx_descriptor *
  548. mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  549. size_t len, unsigned long flags)
  550. {
  551. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  552. struct mv_xor_desc_slot *sw_desc, *grp_start;
  553. int slot_cnt;
  554. dev_dbg(mv_chan->device->common.dev,
  555. "%s dest: %x src %x len: %u flags: %ld\n",
  556. __func__, dest, src, len, flags);
  557. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  558. return NULL;
  559. BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
  560. spin_lock_bh(&mv_chan->lock);
  561. slot_cnt = mv_chan_memcpy_slot_count(len);
  562. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  563. if (sw_desc) {
  564. sw_desc->type = DMA_MEMCPY;
  565. sw_desc->async_tx.flags = flags;
  566. grp_start = sw_desc->group_head;
  567. mv_desc_init(grp_start, flags);
  568. mv_desc_set_byte_count(grp_start, len);
  569. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  570. mv_desc_set_src_addr(grp_start, 0, src);
  571. sw_desc->unmap_src_cnt = 1;
  572. sw_desc->unmap_len = len;
  573. }
  574. spin_unlock_bh(&mv_chan->lock);
  575. dev_dbg(mv_chan->device->common.dev,
  576. "%s sw_desc %p async_tx %p\n",
  577. __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
  578. return sw_desc ? &sw_desc->async_tx : NULL;
  579. }
  580. static struct dma_async_tx_descriptor *
  581. mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  582. size_t len, unsigned long flags)
  583. {
  584. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  585. struct mv_xor_desc_slot *sw_desc, *grp_start;
  586. int slot_cnt;
  587. dev_dbg(mv_chan->device->common.dev,
  588. "%s dest: %x len: %u flags: %ld\n",
  589. __func__, dest, len, flags);
  590. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  591. return NULL;
  592. BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
  593. spin_lock_bh(&mv_chan->lock);
  594. slot_cnt = mv_chan_memset_slot_count(len);
  595. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  596. if (sw_desc) {
  597. sw_desc->type = DMA_MEMSET;
  598. sw_desc->async_tx.flags = flags;
  599. grp_start = sw_desc->group_head;
  600. mv_desc_init(grp_start, flags);
  601. mv_desc_set_byte_count(grp_start, len);
  602. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  603. mv_desc_set_block_fill_val(grp_start, value);
  604. sw_desc->unmap_src_cnt = 1;
  605. sw_desc->unmap_len = len;
  606. }
  607. spin_unlock_bh(&mv_chan->lock);
  608. dev_dbg(mv_chan->device->common.dev,
  609. "%s sw_desc %p async_tx %p \n",
  610. __func__, sw_desc, &sw_desc->async_tx);
  611. return sw_desc ? &sw_desc->async_tx : NULL;
  612. }
  613. static struct dma_async_tx_descriptor *
  614. mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  615. unsigned int src_cnt, size_t len, unsigned long flags)
  616. {
  617. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  618. struct mv_xor_desc_slot *sw_desc, *grp_start;
  619. int slot_cnt;
  620. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  621. return NULL;
  622. BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
  623. dev_dbg(mv_chan->device->common.dev,
  624. "%s src_cnt: %d len: dest %x %u flags: %ld\n",
  625. __func__, src_cnt, len, dest, flags);
  626. spin_lock_bh(&mv_chan->lock);
  627. slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
  628. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  629. if (sw_desc) {
  630. sw_desc->type = DMA_XOR;
  631. sw_desc->async_tx.flags = flags;
  632. grp_start = sw_desc->group_head;
  633. mv_desc_init(grp_start, flags);
  634. /* the byte count field is the same as in memcpy desc*/
  635. mv_desc_set_byte_count(grp_start, len);
  636. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  637. sw_desc->unmap_src_cnt = src_cnt;
  638. sw_desc->unmap_len = len;
  639. while (src_cnt--)
  640. mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
  641. }
  642. spin_unlock_bh(&mv_chan->lock);
  643. dev_dbg(mv_chan->device->common.dev,
  644. "%s sw_desc %p async_tx %p \n",
  645. __func__, sw_desc, &sw_desc->async_tx);
  646. return sw_desc ? &sw_desc->async_tx : NULL;
  647. }
  648. static void mv_xor_free_chan_resources(struct dma_chan *chan)
  649. {
  650. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  651. struct mv_xor_desc_slot *iter, *_iter;
  652. int in_use_descs = 0;
  653. mv_xor_slot_cleanup(mv_chan);
  654. spin_lock_bh(&mv_chan->lock);
  655. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  656. chain_node) {
  657. in_use_descs++;
  658. list_del(&iter->chain_node);
  659. }
  660. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  661. completed_node) {
  662. in_use_descs++;
  663. list_del(&iter->completed_node);
  664. }
  665. list_for_each_entry_safe_reverse(
  666. iter, _iter, &mv_chan->all_slots, slot_node) {
  667. list_del(&iter->slot_node);
  668. kfree(iter);
  669. mv_chan->slots_allocated--;
  670. }
  671. mv_chan->last_used = NULL;
  672. dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
  673. __func__, mv_chan->slots_allocated);
  674. spin_unlock_bh(&mv_chan->lock);
  675. if (in_use_descs)
  676. dev_err(mv_chan->device->common.dev,
  677. "freeing %d in use descriptors!\n", in_use_descs);
  678. }
  679. /**
  680. * mv_xor_is_complete - poll the status of an XOR transaction
  681. * @chan: XOR channel handle
  682. * @cookie: XOR transaction identifier
  683. */
  684. static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
  685. dma_cookie_t cookie,
  686. dma_cookie_t *done,
  687. dma_cookie_t *used)
  688. {
  689. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  690. dma_cookie_t last_used;
  691. dma_cookie_t last_complete;
  692. enum dma_status ret;
  693. last_used = chan->cookie;
  694. last_complete = mv_chan->completed_cookie;
  695. mv_chan->is_complete_cookie = cookie;
  696. if (done)
  697. *done = last_complete;
  698. if (used)
  699. *used = last_used;
  700. ret = dma_async_is_complete(cookie, last_complete, last_used);
  701. if (ret == DMA_SUCCESS) {
  702. mv_xor_clean_completed_slots(mv_chan);
  703. return ret;
  704. }
  705. mv_xor_slot_cleanup(mv_chan);
  706. last_used = chan->cookie;
  707. last_complete = mv_chan->completed_cookie;
  708. if (done)
  709. *done = last_complete;
  710. if (used)
  711. *used = last_used;
  712. return dma_async_is_complete(cookie, last_complete, last_used);
  713. }
  714. static void mv_dump_xor_regs(struct mv_xor_chan *chan)
  715. {
  716. u32 val;
  717. val = __raw_readl(XOR_CONFIG(chan));
  718. dev_printk(KERN_ERR, chan->device->common.dev,
  719. "config 0x%08x.\n", val);
  720. val = __raw_readl(XOR_ACTIVATION(chan));
  721. dev_printk(KERN_ERR, chan->device->common.dev,
  722. "activation 0x%08x.\n", val);
  723. val = __raw_readl(XOR_INTR_CAUSE(chan));
  724. dev_printk(KERN_ERR, chan->device->common.dev,
  725. "intr cause 0x%08x.\n", val);
  726. val = __raw_readl(XOR_INTR_MASK(chan));
  727. dev_printk(KERN_ERR, chan->device->common.dev,
  728. "intr mask 0x%08x.\n", val);
  729. val = __raw_readl(XOR_ERROR_CAUSE(chan));
  730. dev_printk(KERN_ERR, chan->device->common.dev,
  731. "error cause 0x%08x.\n", val);
  732. val = __raw_readl(XOR_ERROR_ADDR(chan));
  733. dev_printk(KERN_ERR, chan->device->common.dev,
  734. "error addr 0x%08x.\n", val);
  735. }
  736. static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
  737. u32 intr_cause)
  738. {
  739. if (intr_cause & (1 << 4)) {
  740. dev_dbg(chan->device->common.dev,
  741. "ignore this error\n");
  742. return;
  743. }
  744. dev_printk(KERN_ERR, chan->device->common.dev,
  745. "error on chan %d. intr cause 0x%08x.\n",
  746. chan->idx, intr_cause);
  747. mv_dump_xor_regs(chan);
  748. BUG();
  749. }
  750. static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
  751. {
  752. struct mv_xor_chan *chan = data;
  753. u32 intr_cause = mv_chan_get_intr_cause(chan);
  754. dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
  755. if (mv_is_err_intr(intr_cause))
  756. mv_xor_err_interrupt_handler(chan, intr_cause);
  757. tasklet_schedule(&chan->irq_tasklet);
  758. mv_xor_device_clear_eoc_cause(chan);
  759. return IRQ_HANDLED;
  760. }
  761. static void mv_xor_issue_pending(struct dma_chan *chan)
  762. {
  763. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  764. if (mv_chan->pending >= MV_XOR_THRESHOLD) {
  765. mv_chan->pending = 0;
  766. mv_chan_activate(mv_chan);
  767. }
  768. }
  769. /*
  770. * Perform a transaction to verify the HW works.
  771. */
  772. #define MV_XOR_TEST_SIZE 2000
  773. static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
  774. {
  775. int i;
  776. void *src, *dest;
  777. dma_addr_t src_dma, dest_dma;
  778. struct dma_chan *dma_chan;
  779. dma_cookie_t cookie;
  780. struct dma_async_tx_descriptor *tx;
  781. int err = 0;
  782. struct mv_xor_chan *mv_chan;
  783. src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
  784. if (!src)
  785. return -ENOMEM;
  786. dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
  787. if (!dest) {
  788. kfree(src);
  789. return -ENOMEM;
  790. }
  791. /* Fill in src buffer */
  792. for (i = 0; i < MV_XOR_TEST_SIZE; i++)
  793. ((u8 *) src)[i] = (u8)i;
  794. /* Start copy, using first DMA channel */
  795. dma_chan = container_of(device->common.channels.next,
  796. struct dma_chan,
  797. device_node);
  798. if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
  799. err = -ENODEV;
  800. goto out;
  801. }
  802. dest_dma = dma_map_single(dma_chan->device->dev, dest,
  803. MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
  804. src_dma = dma_map_single(dma_chan->device->dev, src,
  805. MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
  806. tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
  807. MV_XOR_TEST_SIZE, 0);
  808. cookie = mv_xor_tx_submit(tx);
  809. mv_xor_issue_pending(dma_chan);
  810. async_tx_ack(tx);
  811. msleep(1);
  812. if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
  813. DMA_SUCCESS) {
  814. dev_printk(KERN_ERR, dma_chan->device->dev,
  815. "Self-test copy timed out, disabling\n");
  816. err = -ENODEV;
  817. goto free_resources;
  818. }
  819. mv_chan = to_mv_xor_chan(dma_chan);
  820. dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
  821. MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
  822. if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
  823. dev_printk(KERN_ERR, dma_chan->device->dev,
  824. "Self-test copy failed compare, disabling\n");
  825. err = -ENODEV;
  826. goto free_resources;
  827. }
  828. free_resources:
  829. mv_xor_free_chan_resources(dma_chan);
  830. out:
  831. kfree(src);
  832. kfree(dest);
  833. return err;
  834. }
  835. #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
  836. static int __devinit
  837. mv_xor_xor_self_test(struct mv_xor_device *device)
  838. {
  839. int i, src_idx;
  840. struct page *dest;
  841. struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
  842. dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
  843. dma_addr_t dest_dma;
  844. struct dma_async_tx_descriptor *tx;
  845. struct dma_chan *dma_chan;
  846. dma_cookie_t cookie;
  847. u8 cmp_byte = 0;
  848. u32 cmp_word;
  849. int err = 0;
  850. struct mv_xor_chan *mv_chan;
  851. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
  852. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  853. if (!xor_srcs[src_idx])
  854. while (src_idx--) {
  855. __free_page(xor_srcs[src_idx]);
  856. return -ENOMEM;
  857. }
  858. }
  859. dest = alloc_page(GFP_KERNEL);
  860. if (!dest)
  861. while (src_idx--) {
  862. __free_page(xor_srcs[src_idx]);
  863. return -ENOMEM;
  864. }
  865. /* Fill in src buffers */
  866. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
  867. u8 *ptr = page_address(xor_srcs[src_idx]);
  868. for (i = 0; i < PAGE_SIZE; i++)
  869. ptr[i] = (1 << src_idx);
  870. }
  871. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
  872. cmp_byte ^= (u8) (1 << src_idx);
  873. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  874. (cmp_byte << 8) | cmp_byte;
  875. memset(page_address(dest), 0, PAGE_SIZE);
  876. dma_chan = container_of(device->common.channels.next,
  877. struct dma_chan,
  878. device_node);
  879. if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
  880. err = -ENODEV;
  881. goto out;
  882. }
  883. /* test xor */
  884. dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
  885. DMA_FROM_DEVICE);
  886. for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
  887. dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  888. 0, PAGE_SIZE, DMA_TO_DEVICE);
  889. tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  890. MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
  891. cookie = mv_xor_tx_submit(tx);
  892. mv_xor_issue_pending(dma_chan);
  893. async_tx_ack(tx);
  894. msleep(8);
  895. if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
  896. DMA_SUCCESS) {
  897. dev_printk(KERN_ERR, dma_chan->device->dev,
  898. "Self-test xor timed out, disabling\n");
  899. err = -ENODEV;
  900. goto free_resources;
  901. }
  902. mv_chan = to_mv_xor_chan(dma_chan);
  903. dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
  904. PAGE_SIZE, DMA_FROM_DEVICE);
  905. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  906. u32 *ptr = page_address(dest);
  907. if (ptr[i] != cmp_word) {
  908. dev_printk(KERN_ERR, dma_chan->device->dev,
  909. "Self-test xor failed compare, disabling."
  910. " index %d, data %x, expected %x\n", i,
  911. ptr[i], cmp_word);
  912. err = -ENODEV;
  913. goto free_resources;
  914. }
  915. }
  916. free_resources:
  917. mv_xor_free_chan_resources(dma_chan);
  918. out:
  919. src_idx = MV_XOR_NUM_SRC_TEST;
  920. while (src_idx--)
  921. __free_page(xor_srcs[src_idx]);
  922. __free_page(dest);
  923. return err;
  924. }
  925. static int __devexit mv_xor_remove(struct platform_device *dev)
  926. {
  927. struct mv_xor_device *device = platform_get_drvdata(dev);
  928. struct dma_chan *chan, *_chan;
  929. struct mv_xor_chan *mv_chan;
  930. struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
  931. dma_async_device_unregister(&device->common);
  932. dma_free_coherent(&dev->dev, plat_data->pool_size,
  933. device->dma_desc_pool_virt, device->dma_desc_pool);
  934. list_for_each_entry_safe(chan, _chan, &device->common.channels,
  935. device_node) {
  936. mv_chan = to_mv_xor_chan(chan);
  937. list_del(&chan->device_node);
  938. }
  939. return 0;
  940. }
  941. static int __devinit mv_xor_probe(struct platform_device *pdev)
  942. {
  943. int ret = 0;
  944. int irq;
  945. struct mv_xor_device *adev;
  946. struct mv_xor_chan *mv_chan;
  947. struct dma_device *dma_dev;
  948. struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
  949. adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
  950. if (!adev)
  951. return -ENOMEM;
  952. dma_dev = &adev->common;
  953. /* allocate coherent memory for hardware descriptors
  954. * note: writecombine gives slightly better performance, but
  955. * requires that we explicitly flush the writes
  956. */
  957. adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
  958. plat_data->pool_size,
  959. &adev->dma_desc_pool,
  960. GFP_KERNEL);
  961. if (!adev->dma_desc_pool_virt)
  962. return -ENOMEM;
  963. adev->id = plat_data->hw_id;
  964. /* discover transaction capabilites from the platform data */
  965. dma_dev->cap_mask = plat_data->cap_mask;
  966. adev->pdev = pdev;
  967. platform_set_drvdata(pdev, adev);
  968. adev->shared = platform_get_drvdata(plat_data->shared);
  969. INIT_LIST_HEAD(&dma_dev->channels);
  970. /* set base routines */
  971. dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
  972. dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
  973. dma_dev->device_is_tx_complete = mv_xor_is_complete;
  974. dma_dev->device_issue_pending = mv_xor_issue_pending;
  975. dma_dev->dev = &pdev->dev;
  976. /* set prep routines based on capability */
  977. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  978. dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
  979. if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
  980. dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
  981. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  982. dma_dev->max_xor = 8; ;
  983. dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
  984. }
  985. mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
  986. if (!mv_chan) {
  987. ret = -ENOMEM;
  988. goto err_free_dma;
  989. }
  990. mv_chan->device = adev;
  991. mv_chan->idx = plat_data->hw_id;
  992. mv_chan->mmr_base = adev->shared->xor_base;
  993. if (!mv_chan->mmr_base) {
  994. ret = -ENOMEM;
  995. goto err_free_dma;
  996. }
  997. tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
  998. mv_chan);
  999. /* clear errors before enabling interrupts */
  1000. mv_xor_device_clear_err_status(mv_chan);
  1001. irq = platform_get_irq(pdev, 0);
  1002. if (irq < 0) {
  1003. ret = irq;
  1004. goto err_free_dma;
  1005. }
  1006. ret = devm_request_irq(&pdev->dev, irq,
  1007. mv_xor_interrupt_handler,
  1008. 0, dev_name(&pdev->dev), mv_chan);
  1009. if (ret)
  1010. goto err_free_dma;
  1011. mv_chan_unmask_interrupts(mv_chan);
  1012. mv_set_mode(mv_chan, DMA_MEMCPY);
  1013. spin_lock_init(&mv_chan->lock);
  1014. INIT_LIST_HEAD(&mv_chan->chain);
  1015. INIT_LIST_HEAD(&mv_chan->completed_slots);
  1016. INIT_LIST_HEAD(&mv_chan->all_slots);
  1017. INIT_RCU_HEAD(&mv_chan->common.rcu);
  1018. mv_chan->common.device = dma_dev;
  1019. list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
  1020. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  1021. ret = mv_xor_memcpy_self_test(adev);
  1022. dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
  1023. if (ret)
  1024. goto err_free_dma;
  1025. }
  1026. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1027. ret = mv_xor_xor_self_test(adev);
  1028. dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
  1029. if (ret)
  1030. goto err_free_dma;
  1031. }
  1032. dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
  1033. "( %s%s%s%s)\n",
  1034. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1035. dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
  1036. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  1037. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
  1038. dma_async_device_register(dma_dev);
  1039. goto out;
  1040. err_free_dma:
  1041. dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
  1042. adev->dma_desc_pool_virt, adev->dma_desc_pool);
  1043. out:
  1044. return ret;
  1045. }
  1046. static void
  1047. mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
  1048. struct mbus_dram_target_info *dram)
  1049. {
  1050. void __iomem *base = msp->xor_base;
  1051. u32 win_enable = 0;
  1052. int i;
  1053. for (i = 0; i < 8; i++) {
  1054. writel(0, base + WINDOW_BASE(i));
  1055. writel(0, base + WINDOW_SIZE(i));
  1056. if (i < 4)
  1057. writel(0, base + WINDOW_REMAP_HIGH(i));
  1058. }
  1059. for (i = 0; i < dram->num_cs; i++) {
  1060. struct mbus_dram_window *cs = dram->cs + i;
  1061. writel((cs->base & 0xffff0000) |
  1062. (cs->mbus_attr << 8) |
  1063. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1064. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1065. win_enable |= (1 << i);
  1066. win_enable |= 3 << (16 + (2 * i));
  1067. }
  1068. writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  1069. writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  1070. }
  1071. static struct platform_driver mv_xor_driver = {
  1072. .probe = mv_xor_probe,
  1073. .remove = mv_xor_remove,
  1074. .driver = {
  1075. .owner = THIS_MODULE,
  1076. .name = MV_XOR_NAME,
  1077. },
  1078. };
  1079. static int mv_xor_shared_probe(struct platform_device *pdev)
  1080. {
  1081. struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
  1082. struct mv_xor_shared_private *msp;
  1083. struct resource *res;
  1084. dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
  1085. msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
  1086. if (!msp)
  1087. return -ENOMEM;
  1088. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1089. if (!res)
  1090. return -ENODEV;
  1091. msp->xor_base = devm_ioremap(&pdev->dev, res->start,
  1092. res->end - res->start + 1);
  1093. if (!msp->xor_base)
  1094. return -EBUSY;
  1095. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1096. if (!res)
  1097. return -ENODEV;
  1098. msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
  1099. res->end - res->start + 1);
  1100. if (!msp->xor_high_base)
  1101. return -EBUSY;
  1102. platform_set_drvdata(pdev, msp);
  1103. /*
  1104. * (Re-)program MBUS remapping windows if we are asked to.
  1105. */
  1106. if (msd != NULL && msd->dram != NULL)
  1107. mv_xor_conf_mbus_windows(msp, msd->dram);
  1108. return 0;
  1109. }
  1110. static int mv_xor_shared_remove(struct platform_device *pdev)
  1111. {
  1112. return 0;
  1113. }
  1114. static struct platform_driver mv_xor_shared_driver = {
  1115. .probe = mv_xor_shared_probe,
  1116. .remove = mv_xor_shared_remove,
  1117. .driver = {
  1118. .owner = THIS_MODULE,
  1119. .name = MV_XOR_SHARED_NAME,
  1120. },
  1121. };
  1122. static int __init mv_xor_init(void)
  1123. {
  1124. int rc;
  1125. rc = platform_driver_register(&mv_xor_shared_driver);
  1126. if (!rc) {
  1127. rc = platform_driver_register(&mv_xor_driver);
  1128. if (rc)
  1129. platform_driver_unregister(&mv_xor_shared_driver);
  1130. }
  1131. return rc;
  1132. }
  1133. module_init(mv_xor_init);
  1134. /* it's currently unsafe to unload this module */
  1135. #if 0
  1136. static void __exit mv_xor_exit(void)
  1137. {
  1138. platform_driver_unregister(&mv_xor_driver);
  1139. platform_driver_unregister(&mv_xor_shared_driver);
  1140. return;
  1141. }
  1142. module_exit(mv_xor_exit);
  1143. #endif
  1144. MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
  1145. MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
  1146. MODULE_LICENSE("GPL");