mv_xor.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384
  1. /*
  2. * offload engine driver for the Marvell XOR engine
  3. * Copyright (C) 2007, 2008, Marvell International Ltd.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/async_tx.h>
  21. #include <linux/delay.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/memory.h>
  27. #include <plat/mv_xor.h>
  28. #include "mv_xor.h"
  29. static void mv_xor_issue_pending(struct dma_chan *chan);
  30. #define to_mv_xor_chan(chan) \
  31. container_of(chan, struct mv_xor_chan, common)
  32. #define to_mv_xor_device(dev) \
  33. container_of(dev, struct mv_xor_device, common)
  34. #define to_mv_xor_slot(tx) \
  35. container_of(tx, struct mv_xor_desc_slot, async_tx)
  36. static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
  37. {
  38. struct mv_xor_desc *hw_desc = desc->hw_desc;
  39. hw_desc->status = (1 << 31);
  40. hw_desc->phy_next_desc = 0;
  41. hw_desc->desc_command = (1 << 31);
  42. }
  43. static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
  44. {
  45. struct mv_xor_desc *hw_desc = desc->hw_desc;
  46. return hw_desc->phy_dest_addr;
  47. }
  48. static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
  49. int src_idx)
  50. {
  51. struct mv_xor_desc *hw_desc = desc->hw_desc;
  52. return hw_desc->phy_src_addr[src_idx];
  53. }
  54. static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
  55. u32 byte_count)
  56. {
  57. struct mv_xor_desc *hw_desc = desc->hw_desc;
  58. hw_desc->byte_count = byte_count;
  59. }
  60. static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  61. u32 next_desc_addr)
  62. {
  63. struct mv_xor_desc *hw_desc = desc->hw_desc;
  64. BUG_ON(hw_desc->phy_next_desc);
  65. hw_desc->phy_next_desc = next_desc_addr;
  66. }
  67. static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
  68. {
  69. struct mv_xor_desc *hw_desc = desc->hw_desc;
  70. hw_desc->phy_next_desc = 0;
  71. }
  72. static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
  73. {
  74. desc->value = val;
  75. }
  76. static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
  77. dma_addr_t addr)
  78. {
  79. struct mv_xor_desc *hw_desc = desc->hw_desc;
  80. hw_desc->phy_dest_addr = addr;
  81. }
  82. static int mv_chan_memset_slot_count(size_t len)
  83. {
  84. return 1;
  85. }
  86. #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
  87. static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  88. int index, dma_addr_t addr)
  89. {
  90. struct mv_xor_desc *hw_desc = desc->hw_desc;
  91. hw_desc->phy_src_addr[index] = addr;
  92. if (desc->type == DMA_XOR)
  93. hw_desc->desc_command |= (1 << index);
  94. }
  95. static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  96. {
  97. return __raw_readl(XOR_CURR_DESC(chan));
  98. }
  99. static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
  100. u32 next_desc_addr)
  101. {
  102. __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
  103. }
  104. static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
  105. {
  106. __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
  107. }
  108. static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
  109. {
  110. __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
  111. }
  112. static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
  113. {
  114. __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
  115. __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
  116. }
  117. static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  118. {
  119. u32 val = __raw_readl(XOR_INTR_MASK(chan));
  120. val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
  121. __raw_writel(val, XOR_INTR_MASK(chan));
  122. }
  123. static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  124. {
  125. u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
  126. intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
  127. return intr_cause;
  128. }
  129. static int mv_is_err_intr(u32 intr_cause)
  130. {
  131. if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
  132. return 1;
  133. return 0;
  134. }
  135. static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
  136. {
  137. u32 val = (1 << (1 + (chan->idx * 16)));
  138. dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
  139. __raw_writel(val, XOR_INTR_CAUSE(chan));
  140. }
  141. static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
  142. {
  143. u32 val = 0xFFFF0000 >> (chan->idx * 16);
  144. __raw_writel(val, XOR_INTR_CAUSE(chan));
  145. }
  146. static int mv_can_chain(struct mv_xor_desc_slot *desc)
  147. {
  148. struct mv_xor_desc_slot *chain_old_tail = list_entry(
  149. desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
  150. if (chain_old_tail->type != desc->type)
  151. return 0;
  152. if (desc->type == DMA_MEMSET)
  153. return 0;
  154. return 1;
  155. }
  156. static void mv_set_mode(struct mv_xor_chan *chan,
  157. enum dma_transaction_type type)
  158. {
  159. u32 op_mode;
  160. u32 config = __raw_readl(XOR_CONFIG(chan));
  161. switch (type) {
  162. case DMA_XOR:
  163. op_mode = XOR_OPERATION_MODE_XOR;
  164. break;
  165. case DMA_MEMCPY:
  166. op_mode = XOR_OPERATION_MODE_MEMCPY;
  167. break;
  168. case DMA_MEMSET:
  169. op_mode = XOR_OPERATION_MODE_MEMSET;
  170. break;
  171. default:
  172. dev_printk(KERN_ERR, chan->device->common.dev,
  173. "error: unsupported operation %d.\n",
  174. type);
  175. BUG();
  176. return;
  177. }
  178. config &= ~0x7;
  179. config |= op_mode;
  180. __raw_writel(config, XOR_CONFIG(chan));
  181. chan->current_type = type;
  182. }
  183. static void mv_chan_activate(struct mv_xor_chan *chan)
  184. {
  185. u32 activation;
  186. dev_dbg(chan->device->common.dev, " activate chan.\n");
  187. activation = __raw_readl(XOR_ACTIVATION(chan));
  188. activation |= 0x1;
  189. __raw_writel(activation, XOR_ACTIVATION(chan));
  190. }
  191. static char mv_chan_is_busy(struct mv_xor_chan *chan)
  192. {
  193. u32 state = __raw_readl(XOR_ACTIVATION(chan));
  194. state = (state >> 4) & 0x3;
  195. return (state == 1) ? 1 : 0;
  196. }
  197. static int mv_chan_xor_slot_count(size_t len, int src_cnt)
  198. {
  199. return 1;
  200. }
  201. /**
  202. * mv_xor_free_slots - flags descriptor slots for reuse
  203. * @slot: Slot to free
  204. * Caller must hold &mv_chan->lock while calling this function
  205. */
  206. static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
  207. struct mv_xor_desc_slot *slot)
  208. {
  209. dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
  210. __func__, __LINE__, slot);
  211. slot->slots_per_op = 0;
  212. }
  213. /*
  214. * mv_xor_start_new_chain - program the engine to operate on new chain headed by
  215. * sw_desc
  216. * Caller must hold &mv_chan->lock while calling this function
  217. */
  218. static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
  219. struct mv_xor_desc_slot *sw_desc)
  220. {
  221. dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
  222. __func__, __LINE__, sw_desc);
  223. if (sw_desc->type != mv_chan->current_type)
  224. mv_set_mode(mv_chan, sw_desc->type);
  225. if (sw_desc->type == DMA_MEMSET) {
  226. /* for memset requests we need to program the engine, no
  227. * descriptors used.
  228. */
  229. struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
  230. mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
  231. mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
  232. mv_chan_set_value(mv_chan, sw_desc->value);
  233. } else {
  234. /* set the hardware chain */
  235. mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
  236. }
  237. mv_chan->pending += sw_desc->slot_cnt;
  238. mv_xor_issue_pending(&mv_chan->common);
  239. }
  240. static dma_cookie_t
  241. mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
  242. struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
  243. {
  244. BUG_ON(desc->async_tx.cookie < 0);
  245. if (desc->async_tx.cookie > 0) {
  246. cookie = desc->async_tx.cookie;
  247. /* call the callback (must not sleep or submit new
  248. * operations to this channel)
  249. */
  250. if (desc->async_tx.callback)
  251. desc->async_tx.callback(
  252. desc->async_tx.callback_param);
  253. /* unmap dma addresses
  254. * (unmap_single vs unmap_page?)
  255. */
  256. if (desc->group_head && desc->unmap_len) {
  257. struct mv_xor_desc_slot *unmap = desc->group_head;
  258. struct device *dev =
  259. &mv_chan->device->pdev->dev;
  260. u32 len = unmap->unmap_len;
  261. enum dma_ctrl_flags flags = desc->async_tx.flags;
  262. u32 src_cnt;
  263. dma_addr_t addr;
  264. dma_addr_t dest;
  265. src_cnt = unmap->unmap_src_cnt;
  266. dest = mv_desc_get_dest_addr(unmap);
  267. if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  268. enum dma_data_direction dir;
  269. if (src_cnt > 1) /* is xor ? */
  270. dir = DMA_BIDIRECTIONAL;
  271. else
  272. dir = DMA_FROM_DEVICE;
  273. dma_unmap_page(dev, dest, len, dir);
  274. }
  275. if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  276. while (src_cnt--) {
  277. addr = mv_desc_get_src_addr(unmap,
  278. src_cnt);
  279. if (addr == dest)
  280. continue;
  281. dma_unmap_page(dev, addr, len,
  282. DMA_TO_DEVICE);
  283. }
  284. }
  285. desc->group_head = NULL;
  286. }
  287. }
  288. /* run dependent operations */
  289. async_tx_run_dependencies(&desc->async_tx);
  290. return cookie;
  291. }
  292. static int
  293. mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
  294. {
  295. struct mv_xor_desc_slot *iter, *_iter;
  296. dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
  297. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  298. completed_node) {
  299. if (async_tx_test_ack(&iter->async_tx)) {
  300. list_del(&iter->completed_node);
  301. mv_xor_free_slots(mv_chan, iter);
  302. }
  303. }
  304. return 0;
  305. }
  306. static int
  307. mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
  308. struct mv_xor_chan *mv_chan)
  309. {
  310. dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
  311. __func__, __LINE__, desc, desc->async_tx.flags);
  312. list_del(&desc->chain_node);
  313. /* the client is allowed to attach dependent operations
  314. * until 'ack' is set
  315. */
  316. if (!async_tx_test_ack(&desc->async_tx)) {
  317. /* move this slot to the completed_slots */
  318. list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
  319. return 0;
  320. }
  321. mv_xor_free_slots(mv_chan, desc);
  322. return 0;
  323. }
  324. static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
  325. {
  326. struct mv_xor_desc_slot *iter, *_iter;
  327. dma_cookie_t cookie = 0;
  328. int busy = mv_chan_is_busy(mv_chan);
  329. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  330. int seen_current = 0;
  331. dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
  332. dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
  333. mv_xor_clean_completed_slots(mv_chan);
  334. /* free completed slots from the chain starting with
  335. * the oldest descriptor
  336. */
  337. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  338. chain_node) {
  339. prefetch(_iter);
  340. prefetch(&_iter->async_tx);
  341. /* do not advance past the current descriptor loaded into the
  342. * hardware channel, subsequent descriptors are either in
  343. * process or have not been submitted
  344. */
  345. if (seen_current)
  346. break;
  347. /* stop the search if we reach the current descriptor and the
  348. * channel is busy
  349. */
  350. if (iter->async_tx.phys == current_desc) {
  351. seen_current = 1;
  352. if (busy)
  353. break;
  354. }
  355. cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
  356. if (mv_xor_clean_slot(iter, mv_chan))
  357. break;
  358. }
  359. if ((busy == 0) && !list_empty(&mv_chan->chain)) {
  360. struct mv_xor_desc_slot *chain_head;
  361. chain_head = list_entry(mv_chan->chain.next,
  362. struct mv_xor_desc_slot,
  363. chain_node);
  364. mv_xor_start_new_chain(mv_chan, chain_head);
  365. }
  366. if (cookie > 0)
  367. mv_chan->completed_cookie = cookie;
  368. }
  369. static void
  370. mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
  371. {
  372. spin_lock_bh(&mv_chan->lock);
  373. __mv_xor_slot_cleanup(mv_chan);
  374. spin_unlock_bh(&mv_chan->lock);
  375. }
  376. static void mv_xor_tasklet(unsigned long data)
  377. {
  378. struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
  379. __mv_xor_slot_cleanup(chan);
  380. }
  381. static struct mv_xor_desc_slot *
  382. mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
  383. int slots_per_op)
  384. {
  385. struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
  386. LIST_HEAD(chain);
  387. int slots_found, retry = 0;
  388. /* start search from the last allocated descrtiptor
  389. * if a contiguous allocation can not be found start searching
  390. * from the beginning of the list
  391. */
  392. retry:
  393. slots_found = 0;
  394. if (retry == 0)
  395. iter = mv_chan->last_used;
  396. else
  397. iter = list_entry(&mv_chan->all_slots,
  398. struct mv_xor_desc_slot,
  399. slot_node);
  400. list_for_each_entry_safe_continue(
  401. iter, _iter, &mv_chan->all_slots, slot_node) {
  402. prefetch(_iter);
  403. prefetch(&_iter->async_tx);
  404. if (iter->slots_per_op) {
  405. /* give up after finding the first busy slot
  406. * on the second pass through the list
  407. */
  408. if (retry)
  409. break;
  410. slots_found = 0;
  411. continue;
  412. }
  413. /* start the allocation if the slot is correctly aligned */
  414. if (!slots_found++)
  415. alloc_start = iter;
  416. if (slots_found == num_slots) {
  417. struct mv_xor_desc_slot *alloc_tail = NULL;
  418. struct mv_xor_desc_slot *last_used = NULL;
  419. iter = alloc_start;
  420. while (num_slots) {
  421. int i;
  422. /* pre-ack all but the last descriptor */
  423. async_tx_ack(&iter->async_tx);
  424. list_add_tail(&iter->chain_node, &chain);
  425. alloc_tail = iter;
  426. iter->async_tx.cookie = 0;
  427. iter->slot_cnt = num_slots;
  428. iter->xor_check_result = NULL;
  429. for (i = 0; i < slots_per_op; i++) {
  430. iter->slots_per_op = slots_per_op - i;
  431. last_used = iter;
  432. iter = list_entry(iter->slot_node.next,
  433. struct mv_xor_desc_slot,
  434. slot_node);
  435. }
  436. num_slots -= slots_per_op;
  437. }
  438. alloc_tail->group_head = alloc_start;
  439. alloc_tail->async_tx.cookie = -EBUSY;
  440. list_splice(&chain, &alloc_tail->async_tx.tx_list);
  441. mv_chan->last_used = last_used;
  442. mv_desc_clear_next_desc(alloc_start);
  443. mv_desc_clear_next_desc(alloc_tail);
  444. return alloc_tail;
  445. }
  446. }
  447. if (!retry++)
  448. goto retry;
  449. /* try to free some slots if the allocation fails */
  450. tasklet_schedule(&mv_chan->irq_tasklet);
  451. return NULL;
  452. }
  453. static dma_cookie_t
  454. mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
  455. struct mv_xor_desc_slot *desc)
  456. {
  457. dma_cookie_t cookie = mv_chan->common.cookie;
  458. if (++cookie < 0)
  459. cookie = 1;
  460. mv_chan->common.cookie = desc->async_tx.cookie = cookie;
  461. return cookie;
  462. }
  463. /************************ DMA engine API functions ****************************/
  464. static dma_cookie_t
  465. mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
  466. {
  467. struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
  468. struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
  469. struct mv_xor_desc_slot *grp_start, *old_chain_tail;
  470. dma_cookie_t cookie;
  471. int new_hw_chain = 1;
  472. dev_dbg(mv_chan->device->common.dev,
  473. "%s sw_desc %p: async_tx %p\n",
  474. __func__, sw_desc, &sw_desc->async_tx);
  475. grp_start = sw_desc->group_head;
  476. spin_lock_bh(&mv_chan->lock);
  477. cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
  478. if (list_empty(&mv_chan->chain))
  479. list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
  480. else {
  481. new_hw_chain = 0;
  482. old_chain_tail = list_entry(mv_chan->chain.prev,
  483. struct mv_xor_desc_slot,
  484. chain_node);
  485. list_splice_init(&grp_start->async_tx.tx_list,
  486. &old_chain_tail->chain_node);
  487. if (!mv_can_chain(grp_start))
  488. goto submit_done;
  489. dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
  490. old_chain_tail->async_tx.phys);
  491. /* fix up the hardware chain */
  492. mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
  493. /* if the channel is not busy */
  494. if (!mv_chan_is_busy(mv_chan)) {
  495. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  496. /*
  497. * and the curren desc is the end of the chain before
  498. * the append, then we need to start the channel
  499. */
  500. if (current_desc == old_chain_tail->async_tx.phys)
  501. new_hw_chain = 1;
  502. }
  503. }
  504. if (new_hw_chain)
  505. mv_xor_start_new_chain(mv_chan, grp_start);
  506. submit_done:
  507. spin_unlock_bh(&mv_chan->lock);
  508. return cookie;
  509. }
  510. /* returns the number of allocated descriptors */
  511. static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
  512. struct dma_client *client)
  513. {
  514. char *hw_desc;
  515. int idx;
  516. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  517. struct mv_xor_desc_slot *slot = NULL;
  518. struct mv_xor_platform_data *plat_data =
  519. mv_chan->device->pdev->dev.platform_data;
  520. int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
  521. /* Allocate descriptor slots */
  522. idx = mv_chan->slots_allocated;
  523. while (idx < num_descs_in_pool) {
  524. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  525. if (!slot) {
  526. printk(KERN_INFO "MV XOR Channel only initialized"
  527. " %d descriptor slots", idx);
  528. break;
  529. }
  530. hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
  531. slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
  532. dma_async_tx_descriptor_init(&slot->async_tx, chan);
  533. slot->async_tx.tx_submit = mv_xor_tx_submit;
  534. INIT_LIST_HEAD(&slot->chain_node);
  535. INIT_LIST_HEAD(&slot->slot_node);
  536. INIT_LIST_HEAD(&slot->async_tx.tx_list);
  537. hw_desc = (char *) mv_chan->device->dma_desc_pool;
  538. slot->async_tx.phys =
  539. (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
  540. slot->idx = idx++;
  541. spin_lock_bh(&mv_chan->lock);
  542. mv_chan->slots_allocated = idx;
  543. list_add_tail(&slot->slot_node, &mv_chan->all_slots);
  544. spin_unlock_bh(&mv_chan->lock);
  545. }
  546. if (mv_chan->slots_allocated && !mv_chan->last_used)
  547. mv_chan->last_used = list_entry(mv_chan->all_slots.next,
  548. struct mv_xor_desc_slot,
  549. slot_node);
  550. dev_dbg(mv_chan->device->common.dev,
  551. "allocated %d descriptor slots last_used: %p\n",
  552. mv_chan->slots_allocated, mv_chan->last_used);
  553. return mv_chan->slots_allocated ? : -ENOMEM;
  554. }
  555. static struct dma_async_tx_descriptor *
  556. mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  557. size_t len, unsigned long flags)
  558. {
  559. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  560. struct mv_xor_desc_slot *sw_desc, *grp_start;
  561. int slot_cnt;
  562. dev_dbg(mv_chan->device->common.dev,
  563. "%s dest: %x src %x len: %u flags: %ld\n",
  564. __func__, dest, src, len, flags);
  565. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  566. return NULL;
  567. BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
  568. spin_lock_bh(&mv_chan->lock);
  569. slot_cnt = mv_chan_memcpy_slot_count(len);
  570. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  571. if (sw_desc) {
  572. sw_desc->type = DMA_MEMCPY;
  573. sw_desc->async_tx.flags = flags;
  574. grp_start = sw_desc->group_head;
  575. mv_desc_init(grp_start, flags);
  576. mv_desc_set_byte_count(grp_start, len);
  577. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  578. mv_desc_set_src_addr(grp_start, 0, src);
  579. sw_desc->unmap_src_cnt = 1;
  580. sw_desc->unmap_len = len;
  581. }
  582. spin_unlock_bh(&mv_chan->lock);
  583. dev_dbg(mv_chan->device->common.dev,
  584. "%s sw_desc %p async_tx %p\n",
  585. __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
  586. return sw_desc ? &sw_desc->async_tx : NULL;
  587. }
  588. static struct dma_async_tx_descriptor *
  589. mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  590. size_t len, unsigned long flags)
  591. {
  592. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  593. struct mv_xor_desc_slot *sw_desc, *grp_start;
  594. int slot_cnt;
  595. dev_dbg(mv_chan->device->common.dev,
  596. "%s dest: %x len: %u flags: %ld\n",
  597. __func__, dest, len, flags);
  598. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  599. return NULL;
  600. BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
  601. spin_lock_bh(&mv_chan->lock);
  602. slot_cnt = mv_chan_memset_slot_count(len);
  603. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  604. if (sw_desc) {
  605. sw_desc->type = DMA_MEMSET;
  606. sw_desc->async_tx.flags = flags;
  607. grp_start = sw_desc->group_head;
  608. mv_desc_init(grp_start, flags);
  609. mv_desc_set_byte_count(grp_start, len);
  610. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  611. mv_desc_set_block_fill_val(grp_start, value);
  612. sw_desc->unmap_src_cnt = 1;
  613. sw_desc->unmap_len = len;
  614. }
  615. spin_unlock_bh(&mv_chan->lock);
  616. dev_dbg(mv_chan->device->common.dev,
  617. "%s sw_desc %p async_tx %p \n",
  618. __func__, sw_desc, &sw_desc->async_tx);
  619. return sw_desc ? &sw_desc->async_tx : NULL;
  620. }
  621. static struct dma_async_tx_descriptor *
  622. mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  623. unsigned int src_cnt, size_t len, unsigned long flags)
  624. {
  625. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  626. struct mv_xor_desc_slot *sw_desc, *grp_start;
  627. int slot_cnt;
  628. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  629. return NULL;
  630. BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
  631. dev_dbg(mv_chan->device->common.dev,
  632. "%s src_cnt: %d len: dest %x %u flags: %ld\n",
  633. __func__, src_cnt, len, dest, flags);
  634. spin_lock_bh(&mv_chan->lock);
  635. slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
  636. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  637. if (sw_desc) {
  638. sw_desc->type = DMA_XOR;
  639. sw_desc->async_tx.flags = flags;
  640. grp_start = sw_desc->group_head;
  641. mv_desc_init(grp_start, flags);
  642. /* the byte count field is the same as in memcpy desc*/
  643. mv_desc_set_byte_count(grp_start, len);
  644. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  645. sw_desc->unmap_src_cnt = src_cnt;
  646. sw_desc->unmap_len = len;
  647. while (src_cnt--)
  648. mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
  649. }
  650. spin_unlock_bh(&mv_chan->lock);
  651. dev_dbg(mv_chan->device->common.dev,
  652. "%s sw_desc %p async_tx %p \n",
  653. __func__, sw_desc, &sw_desc->async_tx);
  654. return sw_desc ? &sw_desc->async_tx : NULL;
  655. }
  656. static void mv_xor_free_chan_resources(struct dma_chan *chan)
  657. {
  658. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  659. struct mv_xor_desc_slot *iter, *_iter;
  660. int in_use_descs = 0;
  661. mv_xor_slot_cleanup(mv_chan);
  662. spin_lock_bh(&mv_chan->lock);
  663. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  664. chain_node) {
  665. in_use_descs++;
  666. list_del(&iter->chain_node);
  667. }
  668. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  669. completed_node) {
  670. in_use_descs++;
  671. list_del(&iter->completed_node);
  672. }
  673. list_for_each_entry_safe_reverse(
  674. iter, _iter, &mv_chan->all_slots, slot_node) {
  675. list_del(&iter->slot_node);
  676. kfree(iter);
  677. mv_chan->slots_allocated--;
  678. }
  679. mv_chan->last_used = NULL;
  680. dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
  681. __func__, mv_chan->slots_allocated);
  682. spin_unlock_bh(&mv_chan->lock);
  683. if (in_use_descs)
  684. dev_err(mv_chan->device->common.dev,
  685. "freeing %d in use descriptors!\n", in_use_descs);
  686. }
  687. /**
  688. * mv_xor_is_complete - poll the status of an XOR transaction
  689. * @chan: XOR channel handle
  690. * @cookie: XOR transaction identifier
  691. */
  692. static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
  693. dma_cookie_t cookie,
  694. dma_cookie_t *done,
  695. dma_cookie_t *used)
  696. {
  697. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  698. dma_cookie_t last_used;
  699. dma_cookie_t last_complete;
  700. enum dma_status ret;
  701. last_used = chan->cookie;
  702. last_complete = mv_chan->completed_cookie;
  703. mv_chan->is_complete_cookie = cookie;
  704. if (done)
  705. *done = last_complete;
  706. if (used)
  707. *used = last_used;
  708. ret = dma_async_is_complete(cookie, last_complete, last_used);
  709. if (ret == DMA_SUCCESS) {
  710. mv_xor_clean_completed_slots(mv_chan);
  711. return ret;
  712. }
  713. mv_xor_slot_cleanup(mv_chan);
  714. last_used = chan->cookie;
  715. last_complete = mv_chan->completed_cookie;
  716. if (done)
  717. *done = last_complete;
  718. if (used)
  719. *used = last_used;
  720. return dma_async_is_complete(cookie, last_complete, last_used);
  721. }
  722. static void mv_dump_xor_regs(struct mv_xor_chan *chan)
  723. {
  724. u32 val;
  725. val = __raw_readl(XOR_CONFIG(chan));
  726. dev_printk(KERN_ERR, chan->device->common.dev,
  727. "config 0x%08x.\n", val);
  728. val = __raw_readl(XOR_ACTIVATION(chan));
  729. dev_printk(KERN_ERR, chan->device->common.dev,
  730. "activation 0x%08x.\n", val);
  731. val = __raw_readl(XOR_INTR_CAUSE(chan));
  732. dev_printk(KERN_ERR, chan->device->common.dev,
  733. "intr cause 0x%08x.\n", val);
  734. val = __raw_readl(XOR_INTR_MASK(chan));
  735. dev_printk(KERN_ERR, chan->device->common.dev,
  736. "intr mask 0x%08x.\n", val);
  737. val = __raw_readl(XOR_ERROR_CAUSE(chan));
  738. dev_printk(KERN_ERR, chan->device->common.dev,
  739. "error cause 0x%08x.\n", val);
  740. val = __raw_readl(XOR_ERROR_ADDR(chan));
  741. dev_printk(KERN_ERR, chan->device->common.dev,
  742. "error addr 0x%08x.\n", val);
  743. }
  744. static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
  745. u32 intr_cause)
  746. {
  747. if (intr_cause & (1 << 4)) {
  748. dev_dbg(chan->device->common.dev,
  749. "ignore this error\n");
  750. return;
  751. }
  752. dev_printk(KERN_ERR, chan->device->common.dev,
  753. "error on chan %d. intr cause 0x%08x.\n",
  754. chan->idx, intr_cause);
  755. mv_dump_xor_regs(chan);
  756. BUG();
  757. }
  758. static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
  759. {
  760. struct mv_xor_chan *chan = data;
  761. u32 intr_cause = mv_chan_get_intr_cause(chan);
  762. dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
  763. if (mv_is_err_intr(intr_cause))
  764. mv_xor_err_interrupt_handler(chan, intr_cause);
  765. tasklet_schedule(&chan->irq_tasklet);
  766. mv_xor_device_clear_eoc_cause(chan);
  767. return IRQ_HANDLED;
  768. }
  769. static void mv_xor_issue_pending(struct dma_chan *chan)
  770. {
  771. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  772. if (mv_chan->pending >= MV_XOR_THRESHOLD) {
  773. mv_chan->pending = 0;
  774. mv_chan_activate(mv_chan);
  775. }
  776. }
  777. /*
  778. * Perform a transaction to verify the HW works.
  779. */
  780. #define MV_XOR_TEST_SIZE 2000
  781. static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
  782. {
  783. int i;
  784. void *src, *dest;
  785. dma_addr_t src_dma, dest_dma;
  786. struct dma_chan *dma_chan;
  787. dma_cookie_t cookie;
  788. struct dma_async_tx_descriptor *tx;
  789. int err = 0;
  790. struct mv_xor_chan *mv_chan;
  791. src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
  792. if (!src)
  793. return -ENOMEM;
  794. dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
  795. if (!dest) {
  796. kfree(src);
  797. return -ENOMEM;
  798. }
  799. /* Fill in src buffer */
  800. for (i = 0; i < MV_XOR_TEST_SIZE; i++)
  801. ((u8 *) src)[i] = (u8)i;
  802. /* Start copy, using first DMA channel */
  803. dma_chan = container_of(device->common.channels.next,
  804. struct dma_chan,
  805. device_node);
  806. if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
  807. err = -ENODEV;
  808. goto out;
  809. }
  810. dest_dma = dma_map_single(dma_chan->device->dev, dest,
  811. MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
  812. src_dma = dma_map_single(dma_chan->device->dev, src,
  813. MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
  814. tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
  815. MV_XOR_TEST_SIZE, 0);
  816. cookie = mv_xor_tx_submit(tx);
  817. mv_xor_issue_pending(dma_chan);
  818. async_tx_ack(tx);
  819. msleep(1);
  820. if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
  821. DMA_SUCCESS) {
  822. dev_printk(KERN_ERR, dma_chan->device->dev,
  823. "Self-test copy timed out, disabling\n");
  824. err = -ENODEV;
  825. goto free_resources;
  826. }
  827. mv_chan = to_mv_xor_chan(dma_chan);
  828. dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
  829. MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
  830. if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
  831. dev_printk(KERN_ERR, dma_chan->device->dev,
  832. "Self-test copy failed compare, disabling\n");
  833. err = -ENODEV;
  834. goto free_resources;
  835. }
  836. free_resources:
  837. mv_xor_free_chan_resources(dma_chan);
  838. out:
  839. kfree(src);
  840. kfree(dest);
  841. return err;
  842. }
  843. #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
  844. static int __devinit
  845. mv_xor_xor_self_test(struct mv_xor_device *device)
  846. {
  847. int i, src_idx;
  848. struct page *dest;
  849. struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
  850. dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
  851. dma_addr_t dest_dma;
  852. struct dma_async_tx_descriptor *tx;
  853. struct dma_chan *dma_chan;
  854. dma_cookie_t cookie;
  855. u8 cmp_byte = 0;
  856. u32 cmp_word;
  857. int err = 0;
  858. struct mv_xor_chan *mv_chan;
  859. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
  860. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  861. if (!xor_srcs[src_idx])
  862. while (src_idx--) {
  863. __free_page(xor_srcs[src_idx]);
  864. return -ENOMEM;
  865. }
  866. }
  867. dest = alloc_page(GFP_KERNEL);
  868. if (!dest)
  869. while (src_idx--) {
  870. __free_page(xor_srcs[src_idx]);
  871. return -ENOMEM;
  872. }
  873. /* Fill in src buffers */
  874. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
  875. u8 *ptr = page_address(xor_srcs[src_idx]);
  876. for (i = 0; i < PAGE_SIZE; i++)
  877. ptr[i] = (1 << src_idx);
  878. }
  879. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
  880. cmp_byte ^= (u8) (1 << src_idx);
  881. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  882. (cmp_byte << 8) | cmp_byte;
  883. memset(page_address(dest), 0, PAGE_SIZE);
  884. dma_chan = container_of(device->common.channels.next,
  885. struct dma_chan,
  886. device_node);
  887. if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
  888. err = -ENODEV;
  889. goto out;
  890. }
  891. /* test xor */
  892. dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
  893. DMA_FROM_DEVICE);
  894. for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
  895. dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  896. 0, PAGE_SIZE, DMA_TO_DEVICE);
  897. tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  898. MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
  899. cookie = mv_xor_tx_submit(tx);
  900. mv_xor_issue_pending(dma_chan);
  901. async_tx_ack(tx);
  902. msleep(8);
  903. if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
  904. DMA_SUCCESS) {
  905. dev_printk(KERN_ERR, dma_chan->device->dev,
  906. "Self-test xor timed out, disabling\n");
  907. err = -ENODEV;
  908. goto free_resources;
  909. }
  910. mv_chan = to_mv_xor_chan(dma_chan);
  911. dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
  912. PAGE_SIZE, DMA_FROM_DEVICE);
  913. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  914. u32 *ptr = page_address(dest);
  915. if (ptr[i] != cmp_word) {
  916. dev_printk(KERN_ERR, dma_chan->device->dev,
  917. "Self-test xor failed compare, disabling."
  918. " index %d, data %x, expected %x\n", i,
  919. ptr[i], cmp_word);
  920. err = -ENODEV;
  921. goto free_resources;
  922. }
  923. }
  924. free_resources:
  925. mv_xor_free_chan_resources(dma_chan);
  926. out:
  927. src_idx = MV_XOR_NUM_SRC_TEST;
  928. while (src_idx--)
  929. __free_page(xor_srcs[src_idx]);
  930. __free_page(dest);
  931. return err;
  932. }
  933. static int __devexit mv_xor_remove(struct platform_device *dev)
  934. {
  935. struct mv_xor_device *device = platform_get_drvdata(dev);
  936. struct dma_chan *chan, *_chan;
  937. struct mv_xor_chan *mv_chan;
  938. struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
  939. dma_async_device_unregister(&device->common);
  940. dma_free_coherent(&dev->dev, plat_data->pool_size,
  941. device->dma_desc_pool_virt, device->dma_desc_pool);
  942. list_for_each_entry_safe(chan, _chan, &device->common.channels,
  943. device_node) {
  944. mv_chan = to_mv_xor_chan(chan);
  945. list_del(&chan->device_node);
  946. }
  947. return 0;
  948. }
  949. static int __devinit mv_xor_probe(struct platform_device *pdev)
  950. {
  951. int ret = 0;
  952. int irq;
  953. struct mv_xor_device *adev;
  954. struct mv_xor_chan *mv_chan;
  955. struct dma_device *dma_dev;
  956. struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
  957. adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
  958. if (!adev)
  959. return -ENOMEM;
  960. dma_dev = &adev->common;
  961. /* allocate coherent memory for hardware descriptors
  962. * note: writecombine gives slightly better performance, but
  963. * requires that we explicitly flush the writes
  964. */
  965. adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
  966. plat_data->pool_size,
  967. &adev->dma_desc_pool,
  968. GFP_KERNEL);
  969. if (!adev->dma_desc_pool_virt)
  970. return -ENOMEM;
  971. adev->id = plat_data->hw_id;
  972. /* discover transaction capabilites from the platform data */
  973. dma_dev->cap_mask = plat_data->cap_mask;
  974. adev->pdev = pdev;
  975. platform_set_drvdata(pdev, adev);
  976. adev->shared = platform_get_drvdata(plat_data->shared);
  977. INIT_LIST_HEAD(&dma_dev->channels);
  978. /* set base routines */
  979. dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
  980. dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
  981. dma_dev->device_is_tx_complete = mv_xor_is_complete;
  982. dma_dev->device_issue_pending = mv_xor_issue_pending;
  983. dma_dev->dev = &pdev->dev;
  984. /* set prep routines based on capability */
  985. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  986. dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
  987. if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
  988. dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
  989. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  990. dma_dev->max_xor = 8; ;
  991. dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
  992. }
  993. mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
  994. if (!mv_chan) {
  995. ret = -ENOMEM;
  996. goto err_free_dma;
  997. }
  998. mv_chan->device = adev;
  999. mv_chan->idx = plat_data->hw_id;
  1000. mv_chan->mmr_base = adev->shared->xor_base;
  1001. if (!mv_chan->mmr_base) {
  1002. ret = -ENOMEM;
  1003. goto err_free_dma;
  1004. }
  1005. tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
  1006. mv_chan);
  1007. /* clear errors before enabling interrupts */
  1008. mv_xor_device_clear_err_status(mv_chan);
  1009. irq = platform_get_irq(pdev, 0);
  1010. if (irq < 0) {
  1011. ret = irq;
  1012. goto err_free_dma;
  1013. }
  1014. ret = devm_request_irq(&pdev->dev, irq,
  1015. mv_xor_interrupt_handler,
  1016. 0, dev_name(&pdev->dev), mv_chan);
  1017. if (ret)
  1018. goto err_free_dma;
  1019. mv_chan_unmask_interrupts(mv_chan);
  1020. mv_set_mode(mv_chan, DMA_MEMCPY);
  1021. spin_lock_init(&mv_chan->lock);
  1022. INIT_LIST_HEAD(&mv_chan->chain);
  1023. INIT_LIST_HEAD(&mv_chan->completed_slots);
  1024. INIT_LIST_HEAD(&mv_chan->all_slots);
  1025. INIT_RCU_HEAD(&mv_chan->common.rcu);
  1026. mv_chan->common.device = dma_dev;
  1027. list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
  1028. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  1029. ret = mv_xor_memcpy_self_test(adev);
  1030. dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
  1031. if (ret)
  1032. goto err_free_dma;
  1033. }
  1034. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1035. ret = mv_xor_xor_self_test(adev);
  1036. dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
  1037. if (ret)
  1038. goto err_free_dma;
  1039. }
  1040. dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
  1041. "( %s%s%s%s)\n",
  1042. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1043. dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
  1044. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  1045. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
  1046. dma_async_device_register(dma_dev);
  1047. goto out;
  1048. err_free_dma:
  1049. dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
  1050. adev->dma_desc_pool_virt, adev->dma_desc_pool);
  1051. out:
  1052. return ret;
  1053. }
  1054. static void
  1055. mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
  1056. struct mbus_dram_target_info *dram)
  1057. {
  1058. void __iomem *base = msp->xor_base;
  1059. u32 win_enable = 0;
  1060. int i;
  1061. for (i = 0; i < 8; i++) {
  1062. writel(0, base + WINDOW_BASE(i));
  1063. writel(0, base + WINDOW_SIZE(i));
  1064. if (i < 4)
  1065. writel(0, base + WINDOW_REMAP_HIGH(i));
  1066. }
  1067. for (i = 0; i < dram->num_cs; i++) {
  1068. struct mbus_dram_window *cs = dram->cs + i;
  1069. writel((cs->base & 0xffff0000) |
  1070. (cs->mbus_attr << 8) |
  1071. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1072. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1073. win_enable |= (1 << i);
  1074. win_enable |= 3 << (16 + (2 * i));
  1075. }
  1076. writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  1077. writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  1078. }
  1079. static struct platform_driver mv_xor_driver = {
  1080. .probe = mv_xor_probe,
  1081. .remove = mv_xor_remove,
  1082. .driver = {
  1083. .owner = THIS_MODULE,
  1084. .name = MV_XOR_NAME,
  1085. },
  1086. };
  1087. static int mv_xor_shared_probe(struct platform_device *pdev)
  1088. {
  1089. struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
  1090. struct mv_xor_shared_private *msp;
  1091. struct resource *res;
  1092. dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
  1093. msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
  1094. if (!msp)
  1095. return -ENOMEM;
  1096. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1097. if (!res)
  1098. return -ENODEV;
  1099. msp->xor_base = devm_ioremap(&pdev->dev, res->start,
  1100. res->end - res->start + 1);
  1101. if (!msp->xor_base)
  1102. return -EBUSY;
  1103. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1104. if (!res)
  1105. return -ENODEV;
  1106. msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
  1107. res->end - res->start + 1);
  1108. if (!msp->xor_high_base)
  1109. return -EBUSY;
  1110. platform_set_drvdata(pdev, msp);
  1111. /*
  1112. * (Re-)program MBUS remapping windows if we are asked to.
  1113. */
  1114. if (msd != NULL && msd->dram != NULL)
  1115. mv_xor_conf_mbus_windows(msp, msd->dram);
  1116. return 0;
  1117. }
  1118. static int mv_xor_shared_remove(struct platform_device *pdev)
  1119. {
  1120. return 0;
  1121. }
  1122. static struct platform_driver mv_xor_shared_driver = {
  1123. .probe = mv_xor_shared_probe,
  1124. .remove = mv_xor_shared_remove,
  1125. .driver = {
  1126. .owner = THIS_MODULE,
  1127. .name = MV_XOR_SHARED_NAME,
  1128. },
  1129. };
  1130. static int __init mv_xor_init(void)
  1131. {
  1132. int rc;
  1133. rc = platform_driver_register(&mv_xor_shared_driver);
  1134. if (!rc) {
  1135. rc = platform_driver_register(&mv_xor_driver);
  1136. if (rc)
  1137. platform_driver_unregister(&mv_xor_shared_driver);
  1138. }
  1139. return rc;
  1140. }
  1141. module_init(mv_xor_init);
  1142. /* it's currently unsafe to unload this module */
  1143. #if 0
  1144. static void __exit mv_xor_exit(void)
  1145. {
  1146. platform_driver_unregister(&mv_xor_driver);
  1147. platform_driver_unregister(&mv_xor_shared_driver);
  1148. return;
  1149. }
  1150. module_exit(mv_xor_exit);
  1151. #endif
  1152. MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
  1153. MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
  1154. MODULE_LICENSE("GPL");