iop3xx-adma.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ADMA_H
  19. #define _ADMA_H
  20. #include <linux/types.h>
  21. #include <linux/io.h>
  22. #include <asm/hardware.h>
  23. #include <asm/hardware/iop_adma.h>
  24. /* Memory copy units */
  25. #define DMA_CCR(chan) (chan->mmr_base + 0x0)
  26. #define DMA_CSR(chan) (chan->mmr_base + 0x4)
  27. #define DMA_DAR(chan) (chan->mmr_base + 0xc)
  28. #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
  29. #define DMA_PADR(chan) (chan->mmr_base + 0x14)
  30. #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
  31. #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
  32. #define DMA_BCR(chan) (chan->mmr_base + 0x20)
  33. #define DMA_DCR(chan) (chan->mmr_base + 0x24)
  34. /* Application accelerator unit */
  35. #define AAU_ACR(chan) (chan->mmr_base + 0x0)
  36. #define AAU_ASR(chan) (chan->mmr_base + 0x4)
  37. #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
  38. #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
  39. #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
  40. #define AAU_DAR(chan) (chan->mmr_base + 0x20)
  41. #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
  42. #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
  43. #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
  44. #define AAU_EDCR0_IDX 8
  45. #define AAU_EDCR1_IDX 17
  46. #define AAU_EDCR2_IDX 26
  47. #define DMA0_ID 0
  48. #define DMA1_ID 1
  49. #define AAU_ID 2
  50. struct iop3xx_aau_desc_ctrl {
  51. unsigned int int_en:1;
  52. unsigned int blk1_cmd_ctrl:3;
  53. unsigned int blk2_cmd_ctrl:3;
  54. unsigned int blk3_cmd_ctrl:3;
  55. unsigned int blk4_cmd_ctrl:3;
  56. unsigned int blk5_cmd_ctrl:3;
  57. unsigned int blk6_cmd_ctrl:3;
  58. unsigned int blk7_cmd_ctrl:3;
  59. unsigned int blk8_cmd_ctrl:3;
  60. unsigned int blk_ctrl:2;
  61. unsigned int dual_xor_en:1;
  62. unsigned int tx_complete:1;
  63. unsigned int zero_result_err:1;
  64. unsigned int zero_result_en:1;
  65. unsigned int dest_write_en:1;
  66. };
  67. struct iop3xx_aau_e_desc_ctrl {
  68. unsigned int reserved:1;
  69. unsigned int blk1_cmd_ctrl:3;
  70. unsigned int blk2_cmd_ctrl:3;
  71. unsigned int blk3_cmd_ctrl:3;
  72. unsigned int blk4_cmd_ctrl:3;
  73. unsigned int blk5_cmd_ctrl:3;
  74. unsigned int blk6_cmd_ctrl:3;
  75. unsigned int blk7_cmd_ctrl:3;
  76. unsigned int blk8_cmd_ctrl:3;
  77. unsigned int reserved2:7;
  78. };
  79. struct iop3xx_dma_desc_ctrl {
  80. unsigned int pci_transaction:4;
  81. unsigned int int_en:1;
  82. unsigned int dac_cycle_en:1;
  83. unsigned int mem_to_mem_en:1;
  84. unsigned int crc_data_tx_en:1;
  85. unsigned int crc_gen_en:1;
  86. unsigned int crc_seed_dis:1;
  87. unsigned int reserved:21;
  88. unsigned int crc_tx_complete:1;
  89. };
  90. struct iop3xx_desc_dma {
  91. u32 next_desc;
  92. union {
  93. u32 pci_src_addr;
  94. u32 pci_dest_addr;
  95. u32 src_addr;
  96. };
  97. union {
  98. u32 upper_pci_src_addr;
  99. u32 upper_pci_dest_addr;
  100. };
  101. union {
  102. u32 local_pci_src_addr;
  103. u32 local_pci_dest_addr;
  104. u32 dest_addr;
  105. };
  106. u32 byte_count;
  107. union {
  108. u32 desc_ctrl;
  109. struct iop3xx_dma_desc_ctrl desc_ctrl_field;
  110. };
  111. u32 crc_addr;
  112. };
  113. struct iop3xx_desc_aau {
  114. u32 next_desc;
  115. u32 src[4];
  116. u32 dest_addr;
  117. u32 byte_count;
  118. union {
  119. u32 desc_ctrl;
  120. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  121. };
  122. union {
  123. u32 src_addr;
  124. u32 e_desc_ctrl;
  125. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  126. } src_edc[31];
  127. };
  128. struct iop3xx_aau_gfmr {
  129. unsigned int gfmr1:8;
  130. unsigned int gfmr2:8;
  131. unsigned int gfmr3:8;
  132. unsigned int gfmr4:8;
  133. };
  134. struct iop3xx_desc_pq_xor {
  135. u32 next_desc;
  136. u32 src[3];
  137. union {
  138. u32 data_mult1;
  139. struct iop3xx_aau_gfmr data_mult1_field;
  140. };
  141. u32 dest_addr;
  142. u32 byte_count;
  143. union {
  144. u32 desc_ctrl;
  145. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  146. };
  147. union {
  148. u32 src_addr;
  149. u32 e_desc_ctrl;
  150. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  151. u32 data_multiplier;
  152. struct iop3xx_aau_gfmr data_mult_field;
  153. u32 reserved;
  154. } src_edc_gfmr[19];
  155. };
  156. struct iop3xx_desc_dual_xor {
  157. u32 next_desc;
  158. u32 src0_addr;
  159. u32 src1_addr;
  160. u32 h_src_addr;
  161. u32 d_src_addr;
  162. u32 h_dest_addr;
  163. u32 byte_count;
  164. union {
  165. u32 desc_ctrl;
  166. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  167. };
  168. u32 d_dest_addr;
  169. };
  170. union iop3xx_desc {
  171. struct iop3xx_desc_aau *aau;
  172. struct iop3xx_desc_dma *dma;
  173. struct iop3xx_desc_pq_xor *pq_xor;
  174. struct iop3xx_desc_dual_xor *dual_xor;
  175. void *ptr;
  176. };
  177. static inline int iop_adma_get_max_xor(void)
  178. {
  179. return 32;
  180. }
  181. static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
  182. {
  183. int id = chan->device->id;
  184. switch (id) {
  185. case DMA0_ID:
  186. case DMA1_ID:
  187. return __raw_readl(DMA_DAR(chan));
  188. case AAU_ID:
  189. return __raw_readl(AAU_ADAR(chan));
  190. default:
  191. BUG();
  192. }
  193. return 0;
  194. }
  195. static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
  196. u32 next_desc_addr)
  197. {
  198. int id = chan->device->id;
  199. switch (id) {
  200. case DMA0_ID:
  201. case DMA1_ID:
  202. __raw_writel(next_desc_addr, DMA_NDAR(chan));
  203. break;
  204. case AAU_ID:
  205. __raw_writel(next_desc_addr, AAU_ANDAR(chan));
  206. break;
  207. }
  208. }
  209. #define IOP_ADMA_STATUS_BUSY (1 << 10)
  210. #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
  211. #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
  212. #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
  213. static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
  214. {
  215. u32 status = __raw_readl(DMA_CSR(chan));
  216. return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
  217. }
  218. static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
  219. int num_slots)
  220. {
  221. /* num_slots will only ever be 1, 2, 4, or 8 */
  222. return (desc->idx & (num_slots - 1)) ? 0 : 1;
  223. }
  224. /* to do: support large (i.e. > hw max) buffer sizes */
  225. static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
  226. {
  227. *slots_per_op = 1;
  228. return 1;
  229. }
  230. /* to do: support large (i.e. > hw max) buffer sizes */
  231. static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
  232. {
  233. *slots_per_op = 1;
  234. return 1;
  235. }
  236. static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
  237. int *slots_per_op)
  238. {
  239. const static int slot_count_table[] = { 0,
  240. 1, 1, 1, 1, /* 01 - 04 */
  241. 2, 2, 2, 2, /* 05 - 08 */
  242. 4, 4, 4, 4, /* 09 - 12 */
  243. 4, 4, 4, 4, /* 13 - 16 */
  244. 8, 8, 8, 8, /* 17 - 20 */
  245. 8, 8, 8, 8, /* 21 - 24 */
  246. 8, 8, 8, 8, /* 25 - 28 */
  247. 8, 8, 8, 8, /* 29 - 32 */
  248. };
  249. *slots_per_op = slot_count_table[src_cnt];
  250. return *slots_per_op;
  251. }
  252. static inline int
  253. iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
  254. {
  255. switch (chan->device->id) {
  256. case DMA0_ID:
  257. case DMA1_ID:
  258. return iop_chan_memcpy_slot_count(0, slots_per_op);
  259. case AAU_ID:
  260. return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
  261. default:
  262. BUG();
  263. }
  264. return 0;
  265. }
  266. static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
  267. int *slots_per_op)
  268. {
  269. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  270. if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
  271. return slot_cnt;
  272. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  273. while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  274. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  275. slot_cnt += *slots_per_op;
  276. }
  277. if (len)
  278. slot_cnt += *slots_per_op;
  279. return slot_cnt;
  280. }
  281. /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
  282. * descriptors
  283. */
  284. static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
  285. int *slots_per_op)
  286. {
  287. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  288. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
  289. return slot_cnt;
  290. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  291. while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  292. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  293. slot_cnt += *slots_per_op;
  294. }
  295. if (len)
  296. slot_cnt += *slots_per_op;
  297. return slot_cnt;
  298. }
  299. static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
  300. struct iop_adma_chan *chan)
  301. {
  302. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  303. switch (chan->device->id) {
  304. case DMA0_ID:
  305. case DMA1_ID:
  306. return hw_desc.dma->dest_addr;
  307. case AAU_ID:
  308. return hw_desc.aau->dest_addr;
  309. default:
  310. BUG();
  311. }
  312. return 0;
  313. }
  314. static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
  315. struct iop_adma_chan *chan)
  316. {
  317. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  318. switch (chan->device->id) {
  319. case DMA0_ID:
  320. case DMA1_ID:
  321. return hw_desc.dma->byte_count;
  322. case AAU_ID:
  323. return hw_desc.aau->byte_count;
  324. default:
  325. BUG();
  326. }
  327. return 0;
  328. }
  329. /* translate the src_idx to a descriptor word index */
  330. static inline int __desc_idx(int src_idx)
  331. {
  332. const static int desc_idx_table[] = { 0, 0, 0, 0,
  333. 0, 1, 2, 3,
  334. 5, 6, 7, 8,
  335. 9, 10, 11, 12,
  336. 14, 15, 16, 17,
  337. 18, 19, 20, 21,
  338. 23, 24, 25, 26,
  339. 27, 28, 29, 30,
  340. };
  341. return desc_idx_table[src_idx];
  342. }
  343. static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
  344. struct iop_adma_chan *chan,
  345. int src_idx)
  346. {
  347. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  348. switch (chan->device->id) {
  349. case DMA0_ID:
  350. case DMA1_ID:
  351. return hw_desc.dma->src_addr;
  352. case AAU_ID:
  353. break;
  354. default:
  355. BUG();
  356. }
  357. if (src_idx < 4)
  358. return hw_desc.aau->src[src_idx];
  359. else
  360. return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
  361. }
  362. static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
  363. int src_idx, dma_addr_t addr)
  364. {
  365. if (src_idx < 4)
  366. hw_desc->src[src_idx] = addr;
  367. else
  368. hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
  369. }
  370. static inline void
  371. iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
  372. {
  373. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  374. union {
  375. u32 value;
  376. struct iop3xx_dma_desc_ctrl field;
  377. } u_desc_ctrl;
  378. u_desc_ctrl.value = 0;
  379. u_desc_ctrl.field.mem_to_mem_en = 1;
  380. u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
  381. u_desc_ctrl.field.int_en = int_en;
  382. hw_desc->desc_ctrl = u_desc_ctrl.value;
  383. hw_desc->upper_pci_src_addr = 0;
  384. hw_desc->crc_addr = 0;
  385. }
  386. static inline void
  387. iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
  388. {
  389. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  390. union {
  391. u32 value;
  392. struct iop3xx_aau_desc_ctrl field;
  393. } u_desc_ctrl;
  394. u_desc_ctrl.value = 0;
  395. u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
  396. u_desc_ctrl.field.dest_write_en = 1;
  397. u_desc_ctrl.field.int_en = int_en;
  398. hw_desc->desc_ctrl = u_desc_ctrl.value;
  399. }
  400. static inline u32
  401. iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
  402. {
  403. int i, shift;
  404. u32 edcr;
  405. union {
  406. u32 value;
  407. struct iop3xx_aau_desc_ctrl field;
  408. } u_desc_ctrl;
  409. u_desc_ctrl.value = 0;
  410. switch (src_cnt) {
  411. case 25 ... 32:
  412. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  413. edcr = 0;
  414. shift = 1;
  415. for (i = 24; i < src_cnt; i++) {
  416. edcr |= (1 << shift);
  417. shift += 3;
  418. }
  419. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
  420. src_cnt = 24;
  421. /* fall through */
  422. case 17 ... 24:
  423. if (!u_desc_ctrl.field.blk_ctrl) {
  424. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  425. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  426. }
  427. edcr = 0;
  428. shift = 1;
  429. for (i = 16; i < src_cnt; i++) {
  430. edcr |= (1 << shift);
  431. shift += 3;
  432. }
  433. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
  434. src_cnt = 16;
  435. /* fall through */
  436. case 9 ... 16:
  437. if (!u_desc_ctrl.field.blk_ctrl)
  438. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  439. edcr = 0;
  440. shift = 1;
  441. for (i = 8; i < src_cnt; i++) {
  442. edcr |= (1 << shift);
  443. shift += 3;
  444. }
  445. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
  446. src_cnt = 8;
  447. /* fall through */
  448. case 2 ... 8:
  449. shift = 1;
  450. for (i = 0; i < src_cnt; i++) {
  451. u_desc_ctrl.value |= (1 << shift);
  452. shift += 3;
  453. }
  454. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  455. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  456. }
  457. u_desc_ctrl.field.dest_write_en = 1;
  458. u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
  459. u_desc_ctrl.field.int_en = int_en;
  460. hw_desc->desc_ctrl = u_desc_ctrl.value;
  461. return u_desc_ctrl.value;
  462. }
  463. static inline void
  464. iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
  465. {
  466. iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en);
  467. }
  468. /* return the number of operations */
  469. static inline int
  470. iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
  471. {
  472. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  473. struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
  474. union {
  475. u32 value;
  476. struct iop3xx_aau_desc_ctrl field;
  477. } u_desc_ctrl;
  478. int i, j;
  479. hw_desc = desc->hw_desc;
  480. for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
  481. i += slots_per_op, j++) {
  482. iter = iop_hw_desc_slot_idx(hw_desc, i);
  483. u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en);
  484. u_desc_ctrl.field.dest_write_en = 0;
  485. u_desc_ctrl.field.zero_result_en = 1;
  486. u_desc_ctrl.field.int_en = int_en;
  487. iter->desc_ctrl = u_desc_ctrl.value;
  488. /* for the subsequent descriptors preserve the store queue
  489. * and chain them together
  490. */
  491. if (i) {
  492. prev_hw_desc =
  493. iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
  494. prev_hw_desc->next_desc =
  495. (u32) (desc->async_tx.phys + (i << 5));
  496. }
  497. }
  498. return j;
  499. }
  500. static inline void
  501. iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
  502. {
  503. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  504. union {
  505. u32 value;
  506. struct iop3xx_aau_desc_ctrl field;
  507. } u_desc_ctrl;
  508. u_desc_ctrl.value = 0;
  509. switch (src_cnt) {
  510. case 25 ... 32:
  511. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  512. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  513. /* fall through */
  514. case 17 ... 24:
  515. if (!u_desc_ctrl.field.blk_ctrl) {
  516. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  517. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  518. }
  519. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
  520. /* fall through */
  521. case 9 ... 16:
  522. if (!u_desc_ctrl.field.blk_ctrl)
  523. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  524. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
  525. /* fall through */
  526. case 1 ... 8:
  527. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  528. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  529. }
  530. u_desc_ctrl.field.dest_write_en = 0;
  531. u_desc_ctrl.field.int_en = int_en;
  532. hw_desc->desc_ctrl = u_desc_ctrl.value;
  533. }
  534. static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
  535. struct iop_adma_chan *chan,
  536. u32 byte_count)
  537. {
  538. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  539. switch (chan->device->id) {
  540. case DMA0_ID:
  541. case DMA1_ID:
  542. hw_desc.dma->byte_count = byte_count;
  543. break;
  544. case AAU_ID:
  545. hw_desc.aau->byte_count = byte_count;
  546. break;
  547. default:
  548. BUG();
  549. }
  550. }
  551. static inline void
  552. iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
  553. struct iop_adma_chan *chan)
  554. {
  555. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  556. switch (chan->device->id) {
  557. case DMA0_ID:
  558. case DMA1_ID:
  559. iop_desc_init_memcpy(desc, 1);
  560. hw_desc.dma->byte_count = 0;
  561. hw_desc.dma->dest_addr = 0;
  562. hw_desc.dma->src_addr = 0;
  563. break;
  564. case AAU_ID:
  565. iop_desc_init_null_xor(desc, 2, 1);
  566. hw_desc.aau->byte_count = 0;
  567. hw_desc.aau->dest_addr = 0;
  568. hw_desc.aau->src[0] = 0;
  569. hw_desc.aau->src[1] = 0;
  570. break;
  571. default:
  572. BUG();
  573. }
  574. }
  575. static inline void
  576. iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  577. {
  578. int slots_per_op = desc->slots_per_op;
  579. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  580. int i = 0;
  581. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  582. hw_desc->byte_count = len;
  583. } else {
  584. do {
  585. iter = iop_hw_desc_slot_idx(hw_desc, i);
  586. iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  587. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  588. i += slots_per_op;
  589. } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
  590. if (len) {
  591. iter = iop_hw_desc_slot_idx(hw_desc, i);
  592. iter->byte_count = len;
  593. }
  594. }
  595. }
  596. static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
  597. struct iop_adma_chan *chan,
  598. dma_addr_t addr)
  599. {
  600. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  601. switch (chan->device->id) {
  602. case DMA0_ID:
  603. case DMA1_ID:
  604. hw_desc.dma->dest_addr = addr;
  605. break;
  606. case AAU_ID:
  607. hw_desc.aau->dest_addr = addr;
  608. break;
  609. default:
  610. BUG();
  611. }
  612. }
  613. static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
  614. dma_addr_t addr)
  615. {
  616. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  617. hw_desc->src_addr = addr;
  618. }
  619. static inline void
  620. iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  621. dma_addr_t addr)
  622. {
  623. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  624. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  625. int i;
  626. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  627. i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  628. iter = iop_hw_desc_slot_idx(hw_desc, i);
  629. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  630. }
  631. }
  632. static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
  633. int src_idx, dma_addr_t addr)
  634. {
  635. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  636. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  637. int i;
  638. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  639. i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  640. iter = iop_hw_desc_slot_idx(hw_desc, i);
  641. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  642. }
  643. }
  644. static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
  645. u32 next_desc_addr)
  646. {
  647. /* hw_desc->next_desc is the same location for all channels */
  648. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  649. BUG_ON(hw_desc.dma->next_desc);
  650. hw_desc.dma->next_desc = next_desc_addr;
  651. }
  652. static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
  653. {
  654. /* hw_desc->next_desc is the same location for all channels */
  655. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  656. return hw_desc.dma->next_desc;
  657. }
  658. static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
  659. {
  660. /* hw_desc->next_desc is the same location for all channels */
  661. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  662. hw_desc.dma->next_desc = 0;
  663. }
  664. static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
  665. u32 val)
  666. {
  667. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  668. hw_desc->src[0] = val;
  669. }
  670. static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
  671. {
  672. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  673. struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
  674. BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
  675. return desc_ctrl.zero_result_err;
  676. }
  677. static inline void iop_chan_append(struct iop_adma_chan *chan)
  678. {
  679. u32 dma_chan_ctrl;
  680. /* workaround dropped interrupts on 3xx */
  681. mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
  682. dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  683. dma_chan_ctrl |= 0x2;
  684. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  685. }
  686. static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
  687. {
  688. if (!busy)
  689. del_timer(&chan->cleanup_watchdog);
  690. }
  691. static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
  692. {
  693. return __raw_readl(DMA_CSR(chan));
  694. }
  695. static inline void iop_chan_disable(struct iop_adma_chan *chan)
  696. {
  697. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  698. dma_chan_ctrl &= ~1;
  699. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  700. }
  701. static inline void iop_chan_enable(struct iop_adma_chan *chan)
  702. {
  703. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  704. dma_chan_ctrl |= 1;
  705. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  706. }
  707. static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
  708. {
  709. u32 status = __raw_readl(DMA_CSR(chan));
  710. status &= (1 << 9);
  711. __raw_writel(status, DMA_CSR(chan));
  712. }
  713. static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
  714. {
  715. u32 status = __raw_readl(DMA_CSR(chan));
  716. status &= (1 << 8);
  717. __raw_writel(status, DMA_CSR(chan));
  718. }
  719. static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
  720. {
  721. u32 status = __raw_readl(DMA_CSR(chan));
  722. switch (chan->device->id) {
  723. case DMA0_ID:
  724. case DMA1_ID:
  725. status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
  726. break;
  727. case AAU_ID:
  728. status &= (1 << 5);
  729. break;
  730. default:
  731. BUG();
  732. }
  733. __raw_writel(status, DMA_CSR(chan));
  734. }
  735. static inline int
  736. iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
  737. {
  738. return 0;
  739. }
  740. static inline int
  741. iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
  742. {
  743. return 0;
  744. }
  745. static inline int
  746. iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
  747. {
  748. return 0;
  749. }
  750. static inline int
  751. iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
  752. {
  753. return test_bit(5, &status);
  754. }
  755. static inline int
  756. iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
  757. {
  758. switch (chan->device->id) {
  759. case DMA0_ID:
  760. case DMA1_ID:
  761. return test_bit(2, &status);
  762. default:
  763. return 0;
  764. }
  765. }
  766. static inline int
  767. iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
  768. {
  769. switch (chan->device->id) {
  770. case DMA0_ID:
  771. case DMA1_ID:
  772. return test_bit(3, &status);
  773. default:
  774. return 0;
  775. }
  776. }
  777. static inline int
  778. iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
  779. {
  780. switch (chan->device->id) {
  781. case DMA0_ID:
  782. case DMA1_ID:
  783. return test_bit(1, &status);
  784. default:
  785. return 0;
  786. }
  787. }
  788. #endif /* _ADMA_H */