mv_xor.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435
  1. /*
  2. * offload engine driver for the Marvell XOR engine
  3. * Copyright (C) 2007, 2008, Marvell International Ltd.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/memory.h>
  27. #include <linux/clk.h>
  28. #include <linux/platform_data/dma-mv_xor.h>
  29. #include "dmaengine.h"
  30. #include "mv_xor.h"
  31. static void mv_xor_issue_pending(struct dma_chan *chan);
  32. #define to_mv_xor_chan(chan) \
  33. container_of(chan, struct mv_xor_chan, common)
  34. #define to_mv_xor_device(dev) \
  35. container_of(dev, struct mv_xor_device, common)
  36. #define to_mv_xor_slot(tx) \
  37. container_of(tx, struct mv_xor_desc_slot, async_tx)
  38. static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
  39. {
  40. struct mv_xor_desc *hw_desc = desc->hw_desc;
  41. hw_desc->status = (1 << 31);
  42. hw_desc->phy_next_desc = 0;
  43. hw_desc->desc_command = (1 << 31);
  44. }
  45. static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
  46. {
  47. struct mv_xor_desc *hw_desc = desc->hw_desc;
  48. return hw_desc->phy_dest_addr;
  49. }
  50. static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
  51. int src_idx)
  52. {
  53. struct mv_xor_desc *hw_desc = desc->hw_desc;
  54. return hw_desc->phy_src_addr[src_idx];
  55. }
  56. static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
  57. u32 byte_count)
  58. {
  59. struct mv_xor_desc *hw_desc = desc->hw_desc;
  60. hw_desc->byte_count = byte_count;
  61. }
  62. static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  63. u32 next_desc_addr)
  64. {
  65. struct mv_xor_desc *hw_desc = desc->hw_desc;
  66. BUG_ON(hw_desc->phy_next_desc);
  67. hw_desc->phy_next_desc = next_desc_addr;
  68. }
  69. static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
  70. {
  71. struct mv_xor_desc *hw_desc = desc->hw_desc;
  72. hw_desc->phy_next_desc = 0;
  73. }
  74. static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
  75. {
  76. desc->value = val;
  77. }
  78. static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
  79. dma_addr_t addr)
  80. {
  81. struct mv_xor_desc *hw_desc = desc->hw_desc;
  82. hw_desc->phy_dest_addr = addr;
  83. }
  84. static int mv_chan_memset_slot_count(size_t len)
  85. {
  86. return 1;
  87. }
  88. #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
  89. static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  90. int index, dma_addr_t addr)
  91. {
  92. struct mv_xor_desc *hw_desc = desc->hw_desc;
  93. hw_desc->phy_src_addr[index] = addr;
  94. if (desc->type == DMA_XOR)
  95. hw_desc->desc_command |= (1 << index);
  96. }
  97. static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  98. {
  99. return __raw_readl(XOR_CURR_DESC(chan));
  100. }
  101. static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
  102. u32 next_desc_addr)
  103. {
  104. __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
  105. }
  106. static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
  107. {
  108. __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
  109. }
  110. static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
  111. {
  112. __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
  113. }
  114. static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
  115. {
  116. __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
  117. __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
  118. }
  119. static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  120. {
  121. u32 val = __raw_readl(XOR_INTR_MASK(chan));
  122. val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
  123. __raw_writel(val, XOR_INTR_MASK(chan));
  124. }
  125. static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  126. {
  127. u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
  128. intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
  129. return intr_cause;
  130. }
  131. static int mv_is_err_intr(u32 intr_cause)
  132. {
  133. if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
  134. return 1;
  135. return 0;
  136. }
  137. static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
  138. {
  139. u32 val = ~(1 << (chan->idx * 16));
  140. dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
  141. __raw_writel(val, XOR_INTR_CAUSE(chan));
  142. }
  143. static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
  144. {
  145. u32 val = 0xFFFF0000 >> (chan->idx * 16);
  146. __raw_writel(val, XOR_INTR_CAUSE(chan));
  147. }
  148. static int mv_can_chain(struct mv_xor_desc_slot *desc)
  149. {
  150. struct mv_xor_desc_slot *chain_old_tail = list_entry(
  151. desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
  152. if (chain_old_tail->type != desc->type)
  153. return 0;
  154. if (desc->type == DMA_MEMSET)
  155. return 0;
  156. return 1;
  157. }
  158. static void mv_set_mode(struct mv_xor_chan *chan,
  159. enum dma_transaction_type type)
  160. {
  161. u32 op_mode;
  162. u32 config = __raw_readl(XOR_CONFIG(chan));
  163. switch (type) {
  164. case DMA_XOR:
  165. op_mode = XOR_OPERATION_MODE_XOR;
  166. break;
  167. case DMA_MEMCPY:
  168. op_mode = XOR_OPERATION_MODE_MEMCPY;
  169. break;
  170. case DMA_MEMSET:
  171. op_mode = XOR_OPERATION_MODE_MEMSET;
  172. break;
  173. default:
  174. dev_err(chan->device->common.dev,
  175. "error: unsupported operation %d.\n",
  176. type);
  177. BUG();
  178. return;
  179. }
  180. config &= ~0x7;
  181. config |= op_mode;
  182. __raw_writel(config, XOR_CONFIG(chan));
  183. chan->current_type = type;
  184. }
  185. static void mv_chan_activate(struct mv_xor_chan *chan)
  186. {
  187. u32 activation;
  188. dev_dbg(chan->device->common.dev, " activate chan.\n");
  189. activation = __raw_readl(XOR_ACTIVATION(chan));
  190. activation |= 0x1;
  191. __raw_writel(activation, XOR_ACTIVATION(chan));
  192. }
  193. static char mv_chan_is_busy(struct mv_xor_chan *chan)
  194. {
  195. u32 state = __raw_readl(XOR_ACTIVATION(chan));
  196. state = (state >> 4) & 0x3;
  197. return (state == 1) ? 1 : 0;
  198. }
  199. static int mv_chan_xor_slot_count(size_t len, int src_cnt)
  200. {
  201. return 1;
  202. }
  203. /**
  204. * mv_xor_free_slots - flags descriptor slots for reuse
  205. * @slot: Slot to free
  206. * Caller must hold &mv_chan->lock while calling this function
  207. */
  208. static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
  209. struct mv_xor_desc_slot *slot)
  210. {
  211. dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
  212. __func__, __LINE__, slot);
  213. slot->slots_per_op = 0;
  214. }
  215. /*
  216. * mv_xor_start_new_chain - program the engine to operate on new chain headed by
  217. * sw_desc
  218. * Caller must hold &mv_chan->lock while calling this function
  219. */
  220. static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
  221. struct mv_xor_desc_slot *sw_desc)
  222. {
  223. dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
  224. __func__, __LINE__, sw_desc);
  225. if (sw_desc->type != mv_chan->current_type)
  226. mv_set_mode(mv_chan, sw_desc->type);
  227. if (sw_desc->type == DMA_MEMSET) {
  228. /* for memset requests we need to program the engine, no
  229. * descriptors used.
  230. */
  231. struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
  232. mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
  233. mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
  234. mv_chan_set_value(mv_chan, sw_desc->value);
  235. } else {
  236. /* set the hardware chain */
  237. mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
  238. }
  239. mv_chan->pending += sw_desc->slot_cnt;
  240. mv_xor_issue_pending(&mv_chan->common);
  241. }
  242. static dma_cookie_t
  243. mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
  244. struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
  245. {
  246. BUG_ON(desc->async_tx.cookie < 0);
  247. if (desc->async_tx.cookie > 0) {
  248. cookie = desc->async_tx.cookie;
  249. /* call the callback (must not sleep or submit new
  250. * operations to this channel)
  251. */
  252. if (desc->async_tx.callback)
  253. desc->async_tx.callback(
  254. desc->async_tx.callback_param);
  255. /* unmap dma addresses
  256. * (unmap_single vs unmap_page?)
  257. */
  258. if (desc->group_head && desc->unmap_len) {
  259. struct mv_xor_desc_slot *unmap = desc->group_head;
  260. struct device *dev =
  261. &mv_chan->device->pdev->dev;
  262. u32 len = unmap->unmap_len;
  263. enum dma_ctrl_flags flags = desc->async_tx.flags;
  264. u32 src_cnt;
  265. dma_addr_t addr;
  266. dma_addr_t dest;
  267. src_cnt = unmap->unmap_src_cnt;
  268. dest = mv_desc_get_dest_addr(unmap);
  269. if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  270. enum dma_data_direction dir;
  271. if (src_cnt > 1) /* is xor ? */
  272. dir = DMA_BIDIRECTIONAL;
  273. else
  274. dir = DMA_FROM_DEVICE;
  275. dma_unmap_page(dev, dest, len, dir);
  276. }
  277. if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  278. while (src_cnt--) {
  279. addr = mv_desc_get_src_addr(unmap,
  280. src_cnt);
  281. if (addr == dest)
  282. continue;
  283. dma_unmap_page(dev, addr, len,
  284. DMA_TO_DEVICE);
  285. }
  286. }
  287. desc->group_head = NULL;
  288. }
  289. }
  290. /* run dependent operations */
  291. dma_run_dependencies(&desc->async_tx);
  292. return cookie;
  293. }
  294. static int
  295. mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
  296. {
  297. struct mv_xor_desc_slot *iter, *_iter;
  298. dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
  299. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  300. completed_node) {
  301. if (async_tx_test_ack(&iter->async_tx)) {
  302. list_del(&iter->completed_node);
  303. mv_xor_free_slots(mv_chan, iter);
  304. }
  305. }
  306. return 0;
  307. }
  308. static int
  309. mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
  310. struct mv_xor_chan *mv_chan)
  311. {
  312. dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
  313. __func__, __LINE__, desc, desc->async_tx.flags);
  314. list_del(&desc->chain_node);
  315. /* the client is allowed to attach dependent operations
  316. * until 'ack' is set
  317. */
  318. if (!async_tx_test_ack(&desc->async_tx)) {
  319. /* move this slot to the completed_slots */
  320. list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
  321. return 0;
  322. }
  323. mv_xor_free_slots(mv_chan, desc);
  324. return 0;
  325. }
  326. static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
  327. {
  328. struct mv_xor_desc_slot *iter, *_iter;
  329. dma_cookie_t cookie = 0;
  330. int busy = mv_chan_is_busy(mv_chan);
  331. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  332. int seen_current = 0;
  333. dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
  334. dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
  335. mv_xor_clean_completed_slots(mv_chan);
  336. /* free completed slots from the chain starting with
  337. * the oldest descriptor
  338. */
  339. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  340. chain_node) {
  341. prefetch(_iter);
  342. prefetch(&_iter->async_tx);
  343. /* do not advance past the current descriptor loaded into the
  344. * hardware channel, subsequent descriptors are either in
  345. * process or have not been submitted
  346. */
  347. if (seen_current)
  348. break;
  349. /* stop the search if we reach the current descriptor and the
  350. * channel is busy
  351. */
  352. if (iter->async_tx.phys == current_desc) {
  353. seen_current = 1;
  354. if (busy)
  355. break;
  356. }
  357. cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
  358. if (mv_xor_clean_slot(iter, mv_chan))
  359. break;
  360. }
  361. if ((busy == 0) && !list_empty(&mv_chan->chain)) {
  362. struct mv_xor_desc_slot *chain_head;
  363. chain_head = list_entry(mv_chan->chain.next,
  364. struct mv_xor_desc_slot,
  365. chain_node);
  366. mv_xor_start_new_chain(mv_chan, chain_head);
  367. }
  368. if (cookie > 0)
  369. mv_chan->common.completed_cookie = cookie;
  370. }
  371. static void
  372. mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
  373. {
  374. spin_lock_bh(&mv_chan->lock);
  375. __mv_xor_slot_cleanup(mv_chan);
  376. spin_unlock_bh(&mv_chan->lock);
  377. }
  378. static void mv_xor_tasklet(unsigned long data)
  379. {
  380. struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
  381. mv_xor_slot_cleanup(chan);
  382. }
  383. static struct mv_xor_desc_slot *
  384. mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
  385. int slots_per_op)
  386. {
  387. struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
  388. LIST_HEAD(chain);
  389. int slots_found, retry = 0;
  390. /* start search from the last allocated descrtiptor
  391. * if a contiguous allocation can not be found start searching
  392. * from the beginning of the list
  393. */
  394. retry:
  395. slots_found = 0;
  396. if (retry == 0)
  397. iter = mv_chan->last_used;
  398. else
  399. iter = list_entry(&mv_chan->all_slots,
  400. struct mv_xor_desc_slot,
  401. slot_node);
  402. list_for_each_entry_safe_continue(
  403. iter, _iter, &mv_chan->all_slots, slot_node) {
  404. prefetch(_iter);
  405. prefetch(&_iter->async_tx);
  406. if (iter->slots_per_op) {
  407. /* give up after finding the first busy slot
  408. * on the second pass through the list
  409. */
  410. if (retry)
  411. break;
  412. slots_found = 0;
  413. continue;
  414. }
  415. /* start the allocation if the slot is correctly aligned */
  416. if (!slots_found++)
  417. alloc_start = iter;
  418. if (slots_found == num_slots) {
  419. struct mv_xor_desc_slot *alloc_tail = NULL;
  420. struct mv_xor_desc_slot *last_used = NULL;
  421. iter = alloc_start;
  422. while (num_slots) {
  423. int i;
  424. /* pre-ack all but the last descriptor */
  425. async_tx_ack(&iter->async_tx);
  426. list_add_tail(&iter->chain_node, &chain);
  427. alloc_tail = iter;
  428. iter->async_tx.cookie = 0;
  429. iter->slot_cnt = num_slots;
  430. iter->xor_check_result = NULL;
  431. for (i = 0; i < slots_per_op; i++) {
  432. iter->slots_per_op = slots_per_op - i;
  433. last_used = iter;
  434. iter = list_entry(iter->slot_node.next,
  435. struct mv_xor_desc_slot,
  436. slot_node);
  437. }
  438. num_slots -= slots_per_op;
  439. }
  440. alloc_tail->group_head = alloc_start;
  441. alloc_tail->async_tx.cookie = -EBUSY;
  442. list_splice(&chain, &alloc_tail->tx_list);
  443. mv_chan->last_used = last_used;
  444. mv_desc_clear_next_desc(alloc_start);
  445. mv_desc_clear_next_desc(alloc_tail);
  446. return alloc_tail;
  447. }
  448. }
  449. if (!retry++)
  450. goto retry;
  451. /* try to free some slots if the allocation fails */
  452. tasklet_schedule(&mv_chan->irq_tasklet);
  453. return NULL;
  454. }
  455. /************************ DMA engine API functions ****************************/
  456. static dma_cookie_t
  457. mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
  458. {
  459. struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
  460. struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
  461. struct mv_xor_desc_slot *grp_start, *old_chain_tail;
  462. dma_cookie_t cookie;
  463. int new_hw_chain = 1;
  464. dev_dbg(mv_chan->device->common.dev,
  465. "%s sw_desc %p: async_tx %p\n",
  466. __func__, sw_desc, &sw_desc->async_tx);
  467. grp_start = sw_desc->group_head;
  468. spin_lock_bh(&mv_chan->lock);
  469. cookie = dma_cookie_assign(tx);
  470. if (list_empty(&mv_chan->chain))
  471. list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
  472. else {
  473. new_hw_chain = 0;
  474. old_chain_tail = list_entry(mv_chan->chain.prev,
  475. struct mv_xor_desc_slot,
  476. chain_node);
  477. list_splice_init(&grp_start->tx_list,
  478. &old_chain_tail->chain_node);
  479. if (!mv_can_chain(grp_start))
  480. goto submit_done;
  481. dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
  482. old_chain_tail->async_tx.phys);
  483. /* fix up the hardware chain */
  484. mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
  485. /* if the channel is not busy */
  486. if (!mv_chan_is_busy(mv_chan)) {
  487. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  488. /*
  489. * and the curren desc is the end of the chain before
  490. * the append, then we need to start the channel
  491. */
  492. if (current_desc == old_chain_tail->async_tx.phys)
  493. new_hw_chain = 1;
  494. }
  495. }
  496. if (new_hw_chain)
  497. mv_xor_start_new_chain(mv_chan, grp_start);
  498. submit_done:
  499. spin_unlock_bh(&mv_chan->lock);
  500. return cookie;
  501. }
  502. /* returns the number of allocated descriptors */
  503. static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
  504. {
  505. char *hw_desc;
  506. int idx;
  507. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  508. struct mv_xor_desc_slot *slot = NULL;
  509. int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE;
  510. /* Allocate descriptor slots */
  511. idx = mv_chan->slots_allocated;
  512. while (idx < num_descs_in_pool) {
  513. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  514. if (!slot) {
  515. printk(KERN_INFO "MV XOR Channel only initialized"
  516. " %d descriptor slots", idx);
  517. break;
  518. }
  519. hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
  520. slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
  521. dma_async_tx_descriptor_init(&slot->async_tx, chan);
  522. slot->async_tx.tx_submit = mv_xor_tx_submit;
  523. INIT_LIST_HEAD(&slot->chain_node);
  524. INIT_LIST_HEAD(&slot->slot_node);
  525. INIT_LIST_HEAD(&slot->tx_list);
  526. hw_desc = (char *) mv_chan->device->dma_desc_pool;
  527. slot->async_tx.phys =
  528. (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
  529. slot->idx = idx++;
  530. spin_lock_bh(&mv_chan->lock);
  531. mv_chan->slots_allocated = idx;
  532. list_add_tail(&slot->slot_node, &mv_chan->all_slots);
  533. spin_unlock_bh(&mv_chan->lock);
  534. }
  535. if (mv_chan->slots_allocated && !mv_chan->last_used)
  536. mv_chan->last_used = list_entry(mv_chan->all_slots.next,
  537. struct mv_xor_desc_slot,
  538. slot_node);
  539. dev_dbg(mv_chan->device->common.dev,
  540. "allocated %d descriptor slots last_used: %p\n",
  541. mv_chan->slots_allocated, mv_chan->last_used);
  542. return mv_chan->slots_allocated ? : -ENOMEM;
  543. }
  544. static struct dma_async_tx_descriptor *
  545. mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  546. size_t len, unsigned long flags)
  547. {
  548. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  549. struct mv_xor_desc_slot *sw_desc, *grp_start;
  550. int slot_cnt;
  551. dev_dbg(mv_chan->device->common.dev,
  552. "%s dest: %x src %x len: %u flags: %ld\n",
  553. __func__, dest, src, len, flags);
  554. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  555. return NULL;
  556. BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
  557. spin_lock_bh(&mv_chan->lock);
  558. slot_cnt = mv_chan_memcpy_slot_count(len);
  559. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  560. if (sw_desc) {
  561. sw_desc->type = DMA_MEMCPY;
  562. sw_desc->async_tx.flags = flags;
  563. grp_start = sw_desc->group_head;
  564. mv_desc_init(grp_start, flags);
  565. mv_desc_set_byte_count(grp_start, len);
  566. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  567. mv_desc_set_src_addr(grp_start, 0, src);
  568. sw_desc->unmap_src_cnt = 1;
  569. sw_desc->unmap_len = len;
  570. }
  571. spin_unlock_bh(&mv_chan->lock);
  572. dev_dbg(mv_chan->device->common.dev,
  573. "%s sw_desc %p async_tx %p\n",
  574. __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
  575. return sw_desc ? &sw_desc->async_tx : NULL;
  576. }
  577. static struct dma_async_tx_descriptor *
  578. mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  579. size_t len, unsigned long flags)
  580. {
  581. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  582. struct mv_xor_desc_slot *sw_desc, *grp_start;
  583. int slot_cnt;
  584. dev_dbg(mv_chan->device->common.dev,
  585. "%s dest: %x len: %u flags: %ld\n",
  586. __func__, dest, len, flags);
  587. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  588. return NULL;
  589. BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
  590. spin_lock_bh(&mv_chan->lock);
  591. slot_cnt = mv_chan_memset_slot_count(len);
  592. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  593. if (sw_desc) {
  594. sw_desc->type = DMA_MEMSET;
  595. sw_desc->async_tx.flags = flags;
  596. grp_start = sw_desc->group_head;
  597. mv_desc_init(grp_start, flags);
  598. mv_desc_set_byte_count(grp_start, len);
  599. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  600. mv_desc_set_block_fill_val(grp_start, value);
  601. sw_desc->unmap_src_cnt = 1;
  602. sw_desc->unmap_len = len;
  603. }
  604. spin_unlock_bh(&mv_chan->lock);
  605. dev_dbg(mv_chan->device->common.dev,
  606. "%s sw_desc %p async_tx %p \n",
  607. __func__, sw_desc, &sw_desc->async_tx);
  608. return sw_desc ? &sw_desc->async_tx : NULL;
  609. }
  610. static struct dma_async_tx_descriptor *
  611. mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  612. unsigned int src_cnt, size_t len, unsigned long flags)
  613. {
  614. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  615. struct mv_xor_desc_slot *sw_desc, *grp_start;
  616. int slot_cnt;
  617. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  618. return NULL;
  619. BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
  620. dev_dbg(mv_chan->device->common.dev,
  621. "%s src_cnt: %d len: dest %x %u flags: %ld\n",
  622. __func__, src_cnt, len, dest, flags);
  623. spin_lock_bh(&mv_chan->lock);
  624. slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
  625. sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
  626. if (sw_desc) {
  627. sw_desc->type = DMA_XOR;
  628. sw_desc->async_tx.flags = flags;
  629. grp_start = sw_desc->group_head;
  630. mv_desc_init(grp_start, flags);
  631. /* the byte count field is the same as in memcpy desc*/
  632. mv_desc_set_byte_count(grp_start, len);
  633. mv_desc_set_dest_addr(sw_desc->group_head, dest);
  634. sw_desc->unmap_src_cnt = src_cnt;
  635. sw_desc->unmap_len = len;
  636. while (src_cnt--)
  637. mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
  638. }
  639. spin_unlock_bh(&mv_chan->lock);
  640. dev_dbg(mv_chan->device->common.dev,
  641. "%s sw_desc %p async_tx %p \n",
  642. __func__, sw_desc, &sw_desc->async_tx);
  643. return sw_desc ? &sw_desc->async_tx : NULL;
  644. }
  645. static void mv_xor_free_chan_resources(struct dma_chan *chan)
  646. {
  647. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  648. struct mv_xor_desc_slot *iter, *_iter;
  649. int in_use_descs = 0;
  650. mv_xor_slot_cleanup(mv_chan);
  651. spin_lock_bh(&mv_chan->lock);
  652. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  653. chain_node) {
  654. in_use_descs++;
  655. list_del(&iter->chain_node);
  656. }
  657. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  658. completed_node) {
  659. in_use_descs++;
  660. list_del(&iter->completed_node);
  661. }
  662. list_for_each_entry_safe_reverse(
  663. iter, _iter, &mv_chan->all_slots, slot_node) {
  664. list_del(&iter->slot_node);
  665. kfree(iter);
  666. mv_chan->slots_allocated--;
  667. }
  668. mv_chan->last_used = NULL;
  669. dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
  670. __func__, mv_chan->slots_allocated);
  671. spin_unlock_bh(&mv_chan->lock);
  672. if (in_use_descs)
  673. dev_err(mv_chan->device->common.dev,
  674. "freeing %d in use descriptors!\n", in_use_descs);
  675. }
  676. /**
  677. * mv_xor_status - poll the status of an XOR transaction
  678. * @chan: XOR channel handle
  679. * @cookie: XOR transaction identifier
  680. * @txstate: XOR transactions state holder (or NULL)
  681. */
  682. static enum dma_status mv_xor_status(struct dma_chan *chan,
  683. dma_cookie_t cookie,
  684. struct dma_tx_state *txstate)
  685. {
  686. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  687. enum dma_status ret;
  688. ret = dma_cookie_status(chan, cookie, txstate);
  689. if (ret == DMA_SUCCESS) {
  690. mv_xor_clean_completed_slots(mv_chan);
  691. return ret;
  692. }
  693. mv_xor_slot_cleanup(mv_chan);
  694. return dma_cookie_status(chan, cookie, txstate);
  695. }
  696. static void mv_dump_xor_regs(struct mv_xor_chan *chan)
  697. {
  698. u32 val;
  699. val = __raw_readl(XOR_CONFIG(chan));
  700. dev_err(chan->device->common.dev,
  701. "config 0x%08x.\n", val);
  702. val = __raw_readl(XOR_ACTIVATION(chan));
  703. dev_err(chan->device->common.dev,
  704. "activation 0x%08x.\n", val);
  705. val = __raw_readl(XOR_INTR_CAUSE(chan));
  706. dev_err(chan->device->common.dev,
  707. "intr cause 0x%08x.\n", val);
  708. val = __raw_readl(XOR_INTR_MASK(chan));
  709. dev_err(chan->device->common.dev,
  710. "intr mask 0x%08x.\n", val);
  711. val = __raw_readl(XOR_ERROR_CAUSE(chan));
  712. dev_err(chan->device->common.dev,
  713. "error cause 0x%08x.\n", val);
  714. val = __raw_readl(XOR_ERROR_ADDR(chan));
  715. dev_err(chan->device->common.dev,
  716. "error addr 0x%08x.\n", val);
  717. }
  718. static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
  719. u32 intr_cause)
  720. {
  721. if (intr_cause & (1 << 4)) {
  722. dev_dbg(chan->device->common.dev,
  723. "ignore this error\n");
  724. return;
  725. }
  726. dev_err(chan->device->common.dev,
  727. "error on chan %d. intr cause 0x%08x.\n",
  728. chan->idx, intr_cause);
  729. mv_dump_xor_regs(chan);
  730. BUG();
  731. }
  732. static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
  733. {
  734. struct mv_xor_chan *chan = data;
  735. u32 intr_cause = mv_chan_get_intr_cause(chan);
  736. dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
  737. if (mv_is_err_intr(intr_cause))
  738. mv_xor_err_interrupt_handler(chan, intr_cause);
  739. tasklet_schedule(&chan->irq_tasklet);
  740. mv_xor_device_clear_eoc_cause(chan);
  741. return IRQ_HANDLED;
  742. }
  743. static void mv_xor_issue_pending(struct dma_chan *chan)
  744. {
  745. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  746. if (mv_chan->pending >= MV_XOR_THRESHOLD) {
  747. mv_chan->pending = 0;
  748. mv_chan_activate(mv_chan);
  749. }
  750. }
  751. /*
  752. * Perform a transaction to verify the HW works.
  753. */
  754. #define MV_XOR_TEST_SIZE 2000
  755. static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
  756. {
  757. int i;
  758. void *src, *dest;
  759. dma_addr_t src_dma, dest_dma;
  760. struct dma_chan *dma_chan;
  761. dma_cookie_t cookie;
  762. struct dma_async_tx_descriptor *tx;
  763. int err = 0;
  764. struct mv_xor_chan *mv_chan;
  765. src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
  766. if (!src)
  767. return -ENOMEM;
  768. dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
  769. if (!dest) {
  770. kfree(src);
  771. return -ENOMEM;
  772. }
  773. /* Fill in src buffer */
  774. for (i = 0; i < MV_XOR_TEST_SIZE; i++)
  775. ((u8 *) src)[i] = (u8)i;
  776. /* Start copy, using first DMA channel */
  777. dma_chan = container_of(device->common.channels.next,
  778. struct dma_chan,
  779. device_node);
  780. if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
  781. err = -ENODEV;
  782. goto out;
  783. }
  784. dest_dma = dma_map_single(dma_chan->device->dev, dest,
  785. MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
  786. src_dma = dma_map_single(dma_chan->device->dev, src,
  787. MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
  788. tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
  789. MV_XOR_TEST_SIZE, 0);
  790. cookie = mv_xor_tx_submit(tx);
  791. mv_xor_issue_pending(dma_chan);
  792. async_tx_ack(tx);
  793. msleep(1);
  794. if (mv_xor_status(dma_chan, cookie, NULL) !=
  795. DMA_SUCCESS) {
  796. dev_err(dma_chan->device->dev,
  797. "Self-test copy timed out, disabling\n");
  798. err = -ENODEV;
  799. goto free_resources;
  800. }
  801. mv_chan = to_mv_xor_chan(dma_chan);
  802. dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
  803. MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
  804. if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
  805. dev_err(dma_chan->device->dev,
  806. "Self-test copy failed compare, disabling\n");
  807. err = -ENODEV;
  808. goto free_resources;
  809. }
  810. free_resources:
  811. mv_xor_free_chan_resources(dma_chan);
  812. out:
  813. kfree(src);
  814. kfree(dest);
  815. return err;
  816. }
  817. #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
  818. static int __devinit
  819. mv_xor_xor_self_test(struct mv_xor_device *device)
  820. {
  821. int i, src_idx;
  822. struct page *dest;
  823. struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
  824. dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
  825. dma_addr_t dest_dma;
  826. struct dma_async_tx_descriptor *tx;
  827. struct dma_chan *dma_chan;
  828. dma_cookie_t cookie;
  829. u8 cmp_byte = 0;
  830. u32 cmp_word;
  831. int err = 0;
  832. struct mv_xor_chan *mv_chan;
  833. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
  834. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  835. if (!xor_srcs[src_idx]) {
  836. while (src_idx--)
  837. __free_page(xor_srcs[src_idx]);
  838. return -ENOMEM;
  839. }
  840. }
  841. dest = alloc_page(GFP_KERNEL);
  842. if (!dest) {
  843. while (src_idx--)
  844. __free_page(xor_srcs[src_idx]);
  845. return -ENOMEM;
  846. }
  847. /* Fill in src buffers */
  848. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
  849. u8 *ptr = page_address(xor_srcs[src_idx]);
  850. for (i = 0; i < PAGE_SIZE; i++)
  851. ptr[i] = (1 << src_idx);
  852. }
  853. for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
  854. cmp_byte ^= (u8) (1 << src_idx);
  855. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  856. (cmp_byte << 8) | cmp_byte;
  857. memset(page_address(dest), 0, PAGE_SIZE);
  858. dma_chan = container_of(device->common.channels.next,
  859. struct dma_chan,
  860. device_node);
  861. if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
  862. err = -ENODEV;
  863. goto out;
  864. }
  865. /* test xor */
  866. dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
  867. DMA_FROM_DEVICE);
  868. for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
  869. dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  870. 0, PAGE_SIZE, DMA_TO_DEVICE);
  871. tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  872. MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
  873. cookie = mv_xor_tx_submit(tx);
  874. mv_xor_issue_pending(dma_chan);
  875. async_tx_ack(tx);
  876. msleep(8);
  877. if (mv_xor_status(dma_chan, cookie, NULL) !=
  878. DMA_SUCCESS) {
  879. dev_err(dma_chan->device->dev,
  880. "Self-test xor timed out, disabling\n");
  881. err = -ENODEV;
  882. goto free_resources;
  883. }
  884. mv_chan = to_mv_xor_chan(dma_chan);
  885. dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
  886. PAGE_SIZE, DMA_FROM_DEVICE);
  887. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  888. u32 *ptr = page_address(dest);
  889. if (ptr[i] != cmp_word) {
  890. dev_err(dma_chan->device->dev,
  891. "Self-test xor failed compare, disabling."
  892. " index %d, data %x, expected %x\n", i,
  893. ptr[i], cmp_word);
  894. err = -ENODEV;
  895. goto free_resources;
  896. }
  897. }
  898. free_resources:
  899. mv_xor_free_chan_resources(dma_chan);
  900. out:
  901. src_idx = MV_XOR_NUM_SRC_TEST;
  902. while (src_idx--)
  903. __free_page(xor_srcs[src_idx]);
  904. __free_page(dest);
  905. return err;
  906. }
  907. static int mv_xor_channel_remove(struct mv_xor_device *device)
  908. {
  909. struct dma_chan *chan, *_chan;
  910. struct mv_xor_chan *mv_chan;
  911. dma_async_device_unregister(&device->common);
  912. dma_free_coherent(&device->pdev->dev, device->pool_size,
  913. device->dma_desc_pool_virt, device->dma_desc_pool);
  914. list_for_each_entry_safe(chan, _chan, &device->common.channels,
  915. device_node) {
  916. mv_chan = to_mv_xor_chan(chan);
  917. list_del(&chan->device_node);
  918. }
  919. return 0;
  920. }
  921. static struct mv_xor_device *
  922. mv_xor_channel_add(struct mv_xor_shared_private *msp,
  923. struct platform_device *pdev,
  924. int hw_id, dma_cap_mask_t cap_mask,
  925. size_t pool_size, int irq)
  926. {
  927. int ret = 0;
  928. struct mv_xor_device *adev;
  929. struct mv_xor_chan *mv_chan;
  930. struct dma_device *dma_dev;
  931. adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
  932. if (!adev)
  933. return ERR_PTR(-ENOMEM);
  934. dma_dev = &adev->common;
  935. /* allocate coherent memory for hardware descriptors
  936. * note: writecombine gives slightly better performance, but
  937. * requires that we explicitly flush the writes
  938. */
  939. adev->pool_size = pool_size;
  940. adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
  941. adev->pool_size,
  942. &adev->dma_desc_pool,
  943. GFP_KERNEL);
  944. if (!adev->dma_desc_pool_virt)
  945. return ERR_PTR(-ENOMEM);
  946. adev->id = hw_id;
  947. /* discover transaction capabilites from the platform data */
  948. dma_dev->cap_mask = cap_mask;
  949. adev->pdev = pdev;
  950. adev->shared = msp;
  951. INIT_LIST_HEAD(&dma_dev->channels);
  952. /* set base routines */
  953. dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
  954. dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
  955. dma_dev->device_tx_status = mv_xor_status;
  956. dma_dev->device_issue_pending = mv_xor_issue_pending;
  957. dma_dev->dev = &pdev->dev;
  958. /* set prep routines based on capability */
  959. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  960. dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
  961. if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
  962. dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
  963. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  964. dma_dev->max_xor = 8;
  965. dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
  966. }
  967. mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
  968. if (!mv_chan) {
  969. ret = -ENOMEM;
  970. goto err_free_dma;
  971. }
  972. mv_chan->device = adev;
  973. mv_chan->idx = hw_id;
  974. mv_chan->mmr_base = adev->shared->xor_base;
  975. if (!mv_chan->mmr_base) {
  976. ret = -ENOMEM;
  977. goto err_free_dma;
  978. }
  979. tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
  980. mv_chan);
  981. /* clear errors before enabling interrupts */
  982. mv_xor_device_clear_err_status(mv_chan);
  983. ret = devm_request_irq(&pdev->dev, irq,
  984. mv_xor_interrupt_handler,
  985. 0, dev_name(&pdev->dev), mv_chan);
  986. if (ret)
  987. goto err_free_dma;
  988. mv_chan_unmask_interrupts(mv_chan);
  989. mv_set_mode(mv_chan, DMA_MEMCPY);
  990. spin_lock_init(&mv_chan->lock);
  991. INIT_LIST_HEAD(&mv_chan->chain);
  992. INIT_LIST_HEAD(&mv_chan->completed_slots);
  993. INIT_LIST_HEAD(&mv_chan->all_slots);
  994. mv_chan->common.device = dma_dev;
  995. dma_cookie_init(&mv_chan->common);
  996. list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
  997. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  998. ret = mv_xor_memcpy_self_test(adev);
  999. dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
  1000. if (ret)
  1001. goto err_free_dma;
  1002. }
  1003. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1004. ret = mv_xor_xor_self_test(adev);
  1005. dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
  1006. if (ret)
  1007. goto err_free_dma;
  1008. }
  1009. dev_info(&pdev->dev, "Marvell XOR: "
  1010. "( %s%s%s%s)\n",
  1011. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1012. dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
  1013. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  1014. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
  1015. dma_async_device_register(dma_dev);
  1016. return adev;
  1017. err_free_dma:
  1018. dma_free_coherent(&adev->pdev->dev, pool_size,
  1019. adev->dma_desc_pool_virt, adev->dma_desc_pool);
  1020. return ERR_PTR(ret);
  1021. }
  1022. static int __devexit mv_xor_remove(struct platform_device *pdev)
  1023. {
  1024. struct mv_xor_device *device = platform_get_drvdata(pdev);
  1025. return mv_xor_channel_remove(device);
  1026. }
  1027. static int __devinit mv_xor_probe(struct platform_device *pdev)
  1028. {
  1029. struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
  1030. struct mv_xor_shared_private *msp =
  1031. platform_get_drvdata(plat_data->shared);
  1032. struct mv_xor_device *mv_xor_device;
  1033. int irq;
  1034. irq = platform_get_irq(pdev, 0);
  1035. if (irq < 0)
  1036. return irq;
  1037. mv_xor_device = mv_xor_channel_add(msp, pdev, plat_data->hw_id,
  1038. plat_data->cap_mask,
  1039. plat_data->pool_size, irq);
  1040. if (IS_ERR(mv_xor_device))
  1041. return PTR_ERR(mv_xor_device);
  1042. platform_set_drvdata(pdev, mv_xor_device);
  1043. return 0;
  1044. }
  1045. static void
  1046. mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
  1047. const struct mbus_dram_target_info *dram)
  1048. {
  1049. void __iomem *base = msp->xor_base;
  1050. u32 win_enable = 0;
  1051. int i;
  1052. for (i = 0; i < 8; i++) {
  1053. writel(0, base + WINDOW_BASE(i));
  1054. writel(0, base + WINDOW_SIZE(i));
  1055. if (i < 4)
  1056. writel(0, base + WINDOW_REMAP_HIGH(i));
  1057. }
  1058. for (i = 0; i < dram->num_cs; i++) {
  1059. const struct mbus_dram_window *cs = dram->cs + i;
  1060. writel((cs->base & 0xffff0000) |
  1061. (cs->mbus_attr << 8) |
  1062. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1063. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1064. win_enable |= (1 << i);
  1065. win_enable |= 3 << (16 + (2 * i));
  1066. }
  1067. writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  1068. writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  1069. }
  1070. static struct platform_driver mv_xor_driver = {
  1071. .probe = mv_xor_probe,
  1072. .remove = __devexit_p(mv_xor_remove),
  1073. .driver = {
  1074. .owner = THIS_MODULE,
  1075. .name = MV_XOR_NAME,
  1076. },
  1077. };
  1078. static int mv_xor_shared_probe(struct platform_device *pdev)
  1079. {
  1080. const struct mbus_dram_target_info *dram;
  1081. struct mv_xor_shared_private *msp;
  1082. struct mv_xor_shared_platform_data *pdata = pdev->dev.platform_data;
  1083. struct resource *res;
  1084. int i, ret;
  1085. dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
  1086. msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
  1087. if (!msp)
  1088. return -ENOMEM;
  1089. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1090. if (!res)
  1091. return -ENODEV;
  1092. msp->xor_base = devm_ioremap(&pdev->dev, res->start,
  1093. resource_size(res));
  1094. if (!msp->xor_base)
  1095. return -EBUSY;
  1096. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1097. if (!res)
  1098. return -ENODEV;
  1099. msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
  1100. resource_size(res));
  1101. if (!msp->xor_high_base)
  1102. return -EBUSY;
  1103. platform_set_drvdata(pdev, msp);
  1104. /*
  1105. * (Re-)program MBUS remapping windows if we are asked to.
  1106. */
  1107. dram = mv_mbus_dram_info();
  1108. if (dram)
  1109. mv_xor_conf_mbus_windows(msp, dram);
  1110. /* Not all platforms can gate the clock, so it is not
  1111. * an error if the clock does not exists.
  1112. */
  1113. msp->clk = clk_get(&pdev->dev, NULL);
  1114. if (!IS_ERR(msp->clk))
  1115. clk_prepare_enable(msp->clk);
  1116. if (pdata && pdata->channels) {
  1117. for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  1118. struct mv_xor_platform_data *cd;
  1119. int irq;
  1120. cd = &pdata->channels[i];
  1121. if (!cd) {
  1122. ret = -ENODEV;
  1123. goto err_channel_add;
  1124. }
  1125. irq = platform_get_irq(pdev, i);
  1126. if (irq < 0) {
  1127. ret = irq;
  1128. goto err_channel_add;
  1129. }
  1130. msp->channels[i] =
  1131. mv_xor_channel_add(msp, pdev, cd->hw_id,
  1132. cd->cap_mask,
  1133. cd->pool_size, irq);
  1134. if (IS_ERR(msp->channels[i])) {
  1135. ret = PTR_ERR(msp->channels[i]);
  1136. goto err_channel_add;
  1137. }
  1138. }
  1139. }
  1140. return 0;
  1141. err_channel_add:
  1142. for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
  1143. if (msp->channels[i])
  1144. mv_xor_channel_remove(msp->channels[i]);
  1145. clk_disable_unprepare(msp->clk);
  1146. clk_put(msp->clk);
  1147. return ret;
  1148. }
  1149. static int mv_xor_shared_remove(struct platform_device *pdev)
  1150. {
  1151. struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
  1152. int i;
  1153. for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  1154. if (msp->channels[i])
  1155. mv_xor_channel_remove(msp->channels[i]);
  1156. }
  1157. if (!IS_ERR(msp->clk)) {
  1158. clk_disable_unprepare(msp->clk);
  1159. clk_put(msp->clk);
  1160. }
  1161. return 0;
  1162. }
  1163. static struct platform_driver mv_xor_shared_driver = {
  1164. .probe = mv_xor_shared_probe,
  1165. .remove = mv_xor_shared_remove,
  1166. .driver = {
  1167. .owner = THIS_MODULE,
  1168. .name = MV_XOR_SHARED_NAME,
  1169. },
  1170. };
  1171. static int __init mv_xor_init(void)
  1172. {
  1173. int rc;
  1174. rc = platform_driver_register(&mv_xor_shared_driver);
  1175. if (!rc) {
  1176. rc = platform_driver_register(&mv_xor_driver);
  1177. if (rc)
  1178. platform_driver_unregister(&mv_xor_shared_driver);
  1179. }
  1180. return rc;
  1181. }
  1182. module_init(mv_xor_init);
  1183. /* it's currently unsafe to unload this module */
  1184. #if 0
  1185. static void __exit mv_xor_exit(void)
  1186. {
  1187. platform_driver_unregister(&mv_xor_driver);
  1188. platform_driver_unregister(&mv_xor_shared_driver);
  1189. return;
  1190. }
  1191. module_exit(mv_xor_exit);
  1192. #endif
  1193. MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
  1194. MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
  1195. MODULE_LICENSE("GPL");