iop3xx-adma.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ADMA_H
  19. #define _ADMA_H
  20. #include <linux/types.h>
  21. #include <linux/io.h>
  22. #include <mach/hardware.h>
  23. #include <asm/hardware/iop_adma.h>
  24. /* Memory copy units */
  25. #define DMA_CCR(chan) (chan->mmr_base + 0x0)
  26. #define DMA_CSR(chan) (chan->mmr_base + 0x4)
  27. #define DMA_DAR(chan) (chan->mmr_base + 0xc)
  28. #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
  29. #define DMA_PADR(chan) (chan->mmr_base + 0x14)
  30. #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
  31. #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
  32. #define DMA_BCR(chan) (chan->mmr_base + 0x20)
  33. #define DMA_DCR(chan) (chan->mmr_base + 0x24)
  34. /* Application accelerator unit */
  35. #define AAU_ACR(chan) (chan->mmr_base + 0x0)
  36. #define AAU_ASR(chan) (chan->mmr_base + 0x4)
  37. #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
  38. #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
  39. #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
  40. #define AAU_DAR(chan) (chan->mmr_base + 0x20)
  41. #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
  42. #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
  43. #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
  44. #define AAU_EDCR0_IDX 8
  45. #define AAU_EDCR1_IDX 17
  46. #define AAU_EDCR2_IDX 26
  47. #define DMA0_ID 0
  48. #define DMA1_ID 1
  49. #define AAU_ID 2
  50. struct iop3xx_aau_desc_ctrl {
  51. unsigned int int_en:1;
  52. unsigned int blk1_cmd_ctrl:3;
  53. unsigned int blk2_cmd_ctrl:3;
  54. unsigned int blk3_cmd_ctrl:3;
  55. unsigned int blk4_cmd_ctrl:3;
  56. unsigned int blk5_cmd_ctrl:3;
  57. unsigned int blk6_cmd_ctrl:3;
  58. unsigned int blk7_cmd_ctrl:3;
  59. unsigned int blk8_cmd_ctrl:3;
  60. unsigned int blk_ctrl:2;
  61. unsigned int dual_xor_en:1;
  62. unsigned int tx_complete:1;
  63. unsigned int zero_result_err:1;
  64. unsigned int zero_result_en:1;
  65. unsigned int dest_write_en:1;
  66. };
  67. struct iop3xx_aau_e_desc_ctrl {
  68. unsigned int reserved:1;
  69. unsigned int blk1_cmd_ctrl:3;
  70. unsigned int blk2_cmd_ctrl:3;
  71. unsigned int blk3_cmd_ctrl:3;
  72. unsigned int blk4_cmd_ctrl:3;
  73. unsigned int blk5_cmd_ctrl:3;
  74. unsigned int blk6_cmd_ctrl:3;
  75. unsigned int blk7_cmd_ctrl:3;
  76. unsigned int blk8_cmd_ctrl:3;
  77. unsigned int reserved2:7;
  78. };
  79. struct iop3xx_dma_desc_ctrl {
  80. unsigned int pci_transaction:4;
  81. unsigned int int_en:1;
  82. unsigned int dac_cycle_en:1;
  83. unsigned int mem_to_mem_en:1;
  84. unsigned int crc_data_tx_en:1;
  85. unsigned int crc_gen_en:1;
  86. unsigned int crc_seed_dis:1;
  87. unsigned int reserved:21;
  88. unsigned int crc_tx_complete:1;
  89. };
  90. struct iop3xx_desc_dma {
  91. u32 next_desc;
  92. union {
  93. u32 pci_src_addr;
  94. u32 pci_dest_addr;
  95. u32 src_addr;
  96. };
  97. union {
  98. u32 upper_pci_src_addr;
  99. u32 upper_pci_dest_addr;
  100. };
  101. union {
  102. u32 local_pci_src_addr;
  103. u32 local_pci_dest_addr;
  104. u32 dest_addr;
  105. };
  106. u32 byte_count;
  107. union {
  108. u32 desc_ctrl;
  109. struct iop3xx_dma_desc_ctrl desc_ctrl_field;
  110. };
  111. u32 crc_addr;
  112. };
  113. struct iop3xx_desc_aau {
  114. u32 next_desc;
  115. u32 src[4];
  116. u32 dest_addr;
  117. u32 byte_count;
  118. union {
  119. u32 desc_ctrl;
  120. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  121. };
  122. union {
  123. u32 src_addr;
  124. u32 e_desc_ctrl;
  125. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  126. } src_edc[31];
  127. };
  128. struct iop3xx_aau_gfmr {
  129. unsigned int gfmr1:8;
  130. unsigned int gfmr2:8;
  131. unsigned int gfmr3:8;
  132. unsigned int gfmr4:8;
  133. };
  134. struct iop3xx_desc_pq_xor {
  135. u32 next_desc;
  136. u32 src[3];
  137. union {
  138. u32 data_mult1;
  139. struct iop3xx_aau_gfmr data_mult1_field;
  140. };
  141. u32 dest_addr;
  142. u32 byte_count;
  143. union {
  144. u32 desc_ctrl;
  145. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  146. };
  147. union {
  148. u32 src_addr;
  149. u32 e_desc_ctrl;
  150. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  151. u32 data_multiplier;
  152. struct iop3xx_aau_gfmr data_mult_field;
  153. u32 reserved;
  154. } src_edc_gfmr[19];
  155. };
  156. struct iop3xx_desc_dual_xor {
  157. u32 next_desc;
  158. u32 src0_addr;
  159. u32 src1_addr;
  160. u32 h_src_addr;
  161. u32 d_src_addr;
  162. u32 h_dest_addr;
  163. u32 byte_count;
  164. union {
  165. u32 desc_ctrl;
  166. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  167. };
  168. u32 d_dest_addr;
  169. };
  170. union iop3xx_desc {
  171. struct iop3xx_desc_aau *aau;
  172. struct iop3xx_desc_dma *dma;
  173. struct iop3xx_desc_pq_xor *pq_xor;
  174. struct iop3xx_desc_dual_xor *dual_xor;
  175. void *ptr;
  176. };
  177. static inline int iop_adma_get_max_xor(void)
  178. {
  179. return 32;
  180. }
  181. static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
  182. {
  183. int id = chan->device->id;
  184. switch (id) {
  185. case DMA0_ID:
  186. case DMA1_ID:
  187. return __raw_readl(DMA_DAR(chan));
  188. case AAU_ID:
  189. return __raw_readl(AAU_ADAR(chan));
  190. default:
  191. BUG();
  192. }
  193. return 0;
  194. }
  195. static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
  196. u32 next_desc_addr)
  197. {
  198. int id = chan->device->id;
  199. switch (id) {
  200. case DMA0_ID:
  201. case DMA1_ID:
  202. __raw_writel(next_desc_addr, DMA_NDAR(chan));
  203. break;
  204. case AAU_ID:
  205. __raw_writel(next_desc_addr, AAU_ANDAR(chan));
  206. break;
  207. }
  208. }
  209. #define IOP_ADMA_STATUS_BUSY (1 << 10)
  210. #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
  211. #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
  212. #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
  213. static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
  214. {
  215. u32 status = __raw_readl(DMA_CSR(chan));
  216. return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
  217. }
  218. static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
  219. int num_slots)
  220. {
  221. /* num_slots will only ever be 1, 2, 4, or 8 */
  222. return (desc->idx & (num_slots - 1)) ? 0 : 1;
  223. }
  224. /* to do: support large (i.e. > hw max) buffer sizes */
  225. static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
  226. {
  227. *slots_per_op = 1;
  228. return 1;
  229. }
  230. /* to do: support large (i.e. > hw max) buffer sizes */
  231. static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
  232. {
  233. *slots_per_op = 1;
  234. return 1;
  235. }
  236. static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
  237. int *slots_per_op)
  238. {
  239. static const char slot_count_table[] = {
  240. 1, 1, 1, 1, /* 01 - 04 */
  241. 2, 2, 2, 2, /* 05 - 08 */
  242. 4, 4, 4, 4, /* 09 - 12 */
  243. 4, 4, 4, 4, /* 13 - 16 */
  244. 8, 8, 8, 8, /* 17 - 20 */
  245. 8, 8, 8, 8, /* 21 - 24 */
  246. 8, 8, 8, 8, /* 25 - 28 */
  247. 8, 8, 8, 8, /* 29 - 32 */
  248. };
  249. *slots_per_op = slot_count_table[src_cnt - 1];
  250. return *slots_per_op;
  251. }
  252. static inline int
  253. iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
  254. {
  255. switch (chan->device->id) {
  256. case DMA0_ID:
  257. case DMA1_ID:
  258. return iop_chan_memcpy_slot_count(0, slots_per_op);
  259. case AAU_ID:
  260. return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
  261. default:
  262. BUG();
  263. }
  264. return 0;
  265. }
  266. static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
  267. int *slots_per_op)
  268. {
  269. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  270. if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
  271. return slot_cnt;
  272. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  273. while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  274. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  275. slot_cnt += *slots_per_op;
  276. }
  277. if (len)
  278. slot_cnt += *slots_per_op;
  279. return slot_cnt;
  280. }
  281. /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
  282. * descriptors
  283. */
  284. static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
  285. int *slots_per_op)
  286. {
  287. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  288. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
  289. return slot_cnt;
  290. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  291. while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  292. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  293. slot_cnt += *slots_per_op;
  294. }
  295. if (len)
  296. slot_cnt += *slots_per_op;
  297. return slot_cnt;
  298. }
  299. static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
  300. struct iop_adma_chan *chan)
  301. {
  302. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  303. switch (chan->device->id) {
  304. case DMA0_ID:
  305. case DMA1_ID:
  306. return hw_desc.dma->dest_addr;
  307. case AAU_ID:
  308. return hw_desc.aau->dest_addr;
  309. default:
  310. BUG();
  311. }
  312. return 0;
  313. }
  314. static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
  315. struct iop_adma_chan *chan)
  316. {
  317. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  318. switch (chan->device->id) {
  319. case DMA0_ID:
  320. case DMA1_ID:
  321. return hw_desc.dma->byte_count;
  322. case AAU_ID:
  323. return hw_desc.aau->byte_count;
  324. default:
  325. BUG();
  326. }
  327. return 0;
  328. }
  329. /* translate the src_idx to a descriptor word index */
  330. static inline int __desc_idx(int src_idx)
  331. {
  332. static const int desc_idx_table[] = { 0, 0, 0, 0,
  333. 0, 1, 2, 3,
  334. 5, 6, 7, 8,
  335. 9, 10, 11, 12,
  336. 14, 15, 16, 17,
  337. 18, 19, 20, 21,
  338. 23, 24, 25, 26,
  339. 27, 28, 29, 30,
  340. };
  341. return desc_idx_table[src_idx];
  342. }
  343. static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
  344. struct iop_adma_chan *chan,
  345. int src_idx)
  346. {
  347. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  348. switch (chan->device->id) {
  349. case DMA0_ID:
  350. case DMA1_ID:
  351. return hw_desc.dma->src_addr;
  352. case AAU_ID:
  353. break;
  354. default:
  355. BUG();
  356. }
  357. if (src_idx < 4)
  358. return hw_desc.aau->src[src_idx];
  359. else
  360. return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
  361. }
  362. static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
  363. int src_idx, dma_addr_t addr)
  364. {
  365. if (src_idx < 4)
  366. hw_desc->src[src_idx] = addr;
  367. else
  368. hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
  369. }
  370. static inline void
  371. iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
  372. {
  373. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  374. union {
  375. u32 value;
  376. struct iop3xx_dma_desc_ctrl field;
  377. } u_desc_ctrl;
  378. u_desc_ctrl.value = 0;
  379. u_desc_ctrl.field.mem_to_mem_en = 1;
  380. u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
  381. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  382. hw_desc->desc_ctrl = u_desc_ctrl.value;
  383. hw_desc->upper_pci_src_addr = 0;
  384. hw_desc->crc_addr = 0;
  385. }
  386. static inline void
  387. iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
  388. {
  389. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  390. union {
  391. u32 value;
  392. struct iop3xx_aau_desc_ctrl field;
  393. } u_desc_ctrl;
  394. u_desc_ctrl.value = 0;
  395. u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
  396. u_desc_ctrl.field.dest_write_en = 1;
  397. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  398. hw_desc->desc_ctrl = u_desc_ctrl.value;
  399. }
  400. static inline u32
  401. iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
  402. unsigned long flags)
  403. {
  404. int i, shift;
  405. u32 edcr;
  406. union {
  407. u32 value;
  408. struct iop3xx_aau_desc_ctrl field;
  409. } u_desc_ctrl;
  410. u_desc_ctrl.value = 0;
  411. switch (src_cnt) {
  412. case 25 ... 32:
  413. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  414. edcr = 0;
  415. shift = 1;
  416. for (i = 24; i < src_cnt; i++) {
  417. edcr |= (1 << shift);
  418. shift += 3;
  419. }
  420. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
  421. src_cnt = 24;
  422. /* fall through */
  423. case 17 ... 24:
  424. if (!u_desc_ctrl.field.blk_ctrl) {
  425. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  426. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  427. }
  428. edcr = 0;
  429. shift = 1;
  430. for (i = 16; i < src_cnt; i++) {
  431. edcr |= (1 << shift);
  432. shift += 3;
  433. }
  434. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
  435. src_cnt = 16;
  436. /* fall through */
  437. case 9 ... 16:
  438. if (!u_desc_ctrl.field.blk_ctrl)
  439. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  440. edcr = 0;
  441. shift = 1;
  442. for (i = 8; i < src_cnt; i++) {
  443. edcr |= (1 << shift);
  444. shift += 3;
  445. }
  446. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
  447. src_cnt = 8;
  448. /* fall through */
  449. case 2 ... 8:
  450. shift = 1;
  451. for (i = 0; i < src_cnt; i++) {
  452. u_desc_ctrl.value |= (1 << shift);
  453. shift += 3;
  454. }
  455. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  456. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  457. }
  458. u_desc_ctrl.field.dest_write_en = 1;
  459. u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
  460. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  461. hw_desc->desc_ctrl = u_desc_ctrl.value;
  462. return u_desc_ctrl.value;
  463. }
  464. static inline void
  465. iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  466. unsigned long flags)
  467. {
  468. iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
  469. }
  470. /* return the number of operations */
  471. static inline int
  472. iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  473. unsigned long flags)
  474. {
  475. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  476. struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
  477. union {
  478. u32 value;
  479. struct iop3xx_aau_desc_ctrl field;
  480. } u_desc_ctrl;
  481. int i, j;
  482. hw_desc = desc->hw_desc;
  483. for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
  484. i += slots_per_op, j++) {
  485. iter = iop_hw_desc_slot_idx(hw_desc, i);
  486. u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
  487. u_desc_ctrl.field.dest_write_en = 0;
  488. u_desc_ctrl.field.zero_result_en = 1;
  489. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  490. iter->desc_ctrl = u_desc_ctrl.value;
  491. /* for the subsequent descriptors preserve the store queue
  492. * and chain them together
  493. */
  494. if (i) {
  495. prev_hw_desc =
  496. iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
  497. prev_hw_desc->next_desc =
  498. (u32) (desc->async_tx.phys + (i << 5));
  499. }
  500. }
  501. return j;
  502. }
  503. static inline void
  504. iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  505. unsigned long flags)
  506. {
  507. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  508. union {
  509. u32 value;
  510. struct iop3xx_aau_desc_ctrl field;
  511. } u_desc_ctrl;
  512. u_desc_ctrl.value = 0;
  513. switch (src_cnt) {
  514. case 25 ... 32:
  515. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  516. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  517. /* fall through */
  518. case 17 ... 24:
  519. if (!u_desc_ctrl.field.blk_ctrl) {
  520. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  521. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  522. }
  523. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
  524. /* fall through */
  525. case 9 ... 16:
  526. if (!u_desc_ctrl.field.blk_ctrl)
  527. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  528. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
  529. /* fall through */
  530. case 1 ... 8:
  531. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  532. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  533. }
  534. u_desc_ctrl.field.dest_write_en = 0;
  535. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  536. hw_desc->desc_ctrl = u_desc_ctrl.value;
  537. }
  538. static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
  539. struct iop_adma_chan *chan,
  540. u32 byte_count)
  541. {
  542. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  543. switch (chan->device->id) {
  544. case DMA0_ID:
  545. case DMA1_ID:
  546. hw_desc.dma->byte_count = byte_count;
  547. break;
  548. case AAU_ID:
  549. hw_desc.aau->byte_count = byte_count;
  550. break;
  551. default:
  552. BUG();
  553. }
  554. }
  555. static inline void
  556. iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
  557. struct iop_adma_chan *chan)
  558. {
  559. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  560. switch (chan->device->id) {
  561. case DMA0_ID:
  562. case DMA1_ID:
  563. iop_desc_init_memcpy(desc, 1);
  564. hw_desc.dma->byte_count = 0;
  565. hw_desc.dma->dest_addr = 0;
  566. hw_desc.dma->src_addr = 0;
  567. break;
  568. case AAU_ID:
  569. iop_desc_init_null_xor(desc, 2, 1);
  570. hw_desc.aau->byte_count = 0;
  571. hw_desc.aau->dest_addr = 0;
  572. hw_desc.aau->src[0] = 0;
  573. hw_desc.aau->src[1] = 0;
  574. break;
  575. default:
  576. BUG();
  577. }
  578. }
  579. static inline void
  580. iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  581. {
  582. int slots_per_op = desc->slots_per_op;
  583. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  584. int i = 0;
  585. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  586. hw_desc->byte_count = len;
  587. } else {
  588. do {
  589. iter = iop_hw_desc_slot_idx(hw_desc, i);
  590. iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  591. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  592. i += slots_per_op;
  593. } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
  594. if (len) {
  595. iter = iop_hw_desc_slot_idx(hw_desc, i);
  596. iter->byte_count = len;
  597. }
  598. }
  599. }
  600. static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
  601. struct iop_adma_chan *chan,
  602. dma_addr_t addr)
  603. {
  604. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  605. switch (chan->device->id) {
  606. case DMA0_ID:
  607. case DMA1_ID:
  608. hw_desc.dma->dest_addr = addr;
  609. break;
  610. case AAU_ID:
  611. hw_desc.aau->dest_addr = addr;
  612. break;
  613. default:
  614. BUG();
  615. }
  616. }
  617. static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
  618. dma_addr_t addr)
  619. {
  620. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  621. hw_desc->src_addr = addr;
  622. }
  623. static inline void
  624. iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  625. dma_addr_t addr)
  626. {
  627. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  628. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  629. int i;
  630. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  631. i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  632. iter = iop_hw_desc_slot_idx(hw_desc, i);
  633. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  634. }
  635. }
  636. static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
  637. int src_idx, dma_addr_t addr)
  638. {
  639. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  640. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  641. int i;
  642. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  643. i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  644. iter = iop_hw_desc_slot_idx(hw_desc, i);
  645. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  646. }
  647. }
  648. static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
  649. u32 next_desc_addr)
  650. {
  651. /* hw_desc->next_desc is the same location for all channels */
  652. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  653. iop_paranoia(hw_desc.dma->next_desc);
  654. hw_desc.dma->next_desc = next_desc_addr;
  655. }
  656. static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
  657. {
  658. /* hw_desc->next_desc is the same location for all channels */
  659. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  660. return hw_desc.dma->next_desc;
  661. }
  662. static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
  663. {
  664. /* hw_desc->next_desc is the same location for all channels */
  665. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  666. hw_desc.dma->next_desc = 0;
  667. }
  668. static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
  669. u32 val)
  670. {
  671. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  672. hw_desc->src[0] = val;
  673. }
  674. static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
  675. {
  676. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  677. struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
  678. iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
  679. return desc_ctrl.zero_result_err;
  680. }
  681. static inline void iop_chan_append(struct iop_adma_chan *chan)
  682. {
  683. u32 dma_chan_ctrl;
  684. dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  685. dma_chan_ctrl |= 0x2;
  686. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  687. }
  688. static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
  689. {
  690. return __raw_readl(DMA_CSR(chan));
  691. }
  692. static inline void iop_chan_disable(struct iop_adma_chan *chan)
  693. {
  694. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  695. dma_chan_ctrl &= ~1;
  696. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  697. }
  698. static inline void iop_chan_enable(struct iop_adma_chan *chan)
  699. {
  700. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  701. dma_chan_ctrl |= 1;
  702. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  703. }
  704. static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
  705. {
  706. u32 status = __raw_readl(DMA_CSR(chan));
  707. status &= (1 << 9);
  708. __raw_writel(status, DMA_CSR(chan));
  709. }
  710. static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
  711. {
  712. u32 status = __raw_readl(DMA_CSR(chan));
  713. status &= (1 << 8);
  714. __raw_writel(status, DMA_CSR(chan));
  715. }
  716. static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
  717. {
  718. u32 status = __raw_readl(DMA_CSR(chan));
  719. switch (chan->device->id) {
  720. case DMA0_ID:
  721. case DMA1_ID:
  722. status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
  723. break;
  724. case AAU_ID:
  725. status &= (1 << 5);
  726. break;
  727. default:
  728. BUG();
  729. }
  730. __raw_writel(status, DMA_CSR(chan));
  731. }
  732. static inline int
  733. iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
  734. {
  735. return 0;
  736. }
  737. static inline int
  738. iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
  739. {
  740. return 0;
  741. }
  742. static inline int
  743. iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
  744. {
  745. return 0;
  746. }
  747. static inline int
  748. iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
  749. {
  750. return test_bit(5, &status);
  751. }
  752. static inline int
  753. iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
  754. {
  755. switch (chan->device->id) {
  756. case DMA0_ID:
  757. case DMA1_ID:
  758. return test_bit(2, &status);
  759. default:
  760. return 0;
  761. }
  762. }
  763. static inline int
  764. iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
  765. {
  766. switch (chan->device->id) {
  767. case DMA0_ID:
  768. case DMA1_ID:
  769. return test_bit(3, &status);
  770. default:
  771. return 0;
  772. }
  773. }
  774. static inline int
  775. iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
  776. {
  777. switch (chan->device->id) {
  778. case DMA0_ID:
  779. case DMA1_ID:
  780. return test_bit(1, &status);
  781. default:
  782. return 0;
  783. }
  784. }
  785. #endif /* _ADMA_H */