iop3xx-adma.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ADMA_H
  19. #define _ADMA_H
  20. #include <linux/types.h>
  21. #include <linux/io.h>
  22. #include <mach/hardware.h>
  23. #include <asm/hardware/iop_adma.h>
  24. /* Memory copy units */
  25. #define DMA_CCR(chan) (chan->mmr_base + 0x0)
  26. #define DMA_CSR(chan) (chan->mmr_base + 0x4)
  27. #define DMA_DAR(chan) (chan->mmr_base + 0xc)
  28. #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
  29. #define DMA_PADR(chan) (chan->mmr_base + 0x14)
  30. #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
  31. #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
  32. #define DMA_BCR(chan) (chan->mmr_base + 0x20)
  33. #define DMA_DCR(chan) (chan->mmr_base + 0x24)
  34. /* Application accelerator unit */
  35. #define AAU_ACR(chan) (chan->mmr_base + 0x0)
  36. #define AAU_ASR(chan) (chan->mmr_base + 0x4)
  37. #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
  38. #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
  39. #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
  40. #define AAU_DAR(chan) (chan->mmr_base + 0x20)
  41. #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
  42. #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
  43. #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
  44. #define AAU_EDCR0_IDX 8
  45. #define AAU_EDCR1_IDX 17
  46. #define AAU_EDCR2_IDX 26
  47. #define DMA0_ID 0
  48. #define DMA1_ID 1
  49. #define AAU_ID 2
  50. struct iop3xx_aau_desc_ctrl {
  51. unsigned int int_en:1;
  52. unsigned int blk1_cmd_ctrl:3;
  53. unsigned int blk2_cmd_ctrl:3;
  54. unsigned int blk3_cmd_ctrl:3;
  55. unsigned int blk4_cmd_ctrl:3;
  56. unsigned int blk5_cmd_ctrl:3;
  57. unsigned int blk6_cmd_ctrl:3;
  58. unsigned int blk7_cmd_ctrl:3;
  59. unsigned int blk8_cmd_ctrl:3;
  60. unsigned int blk_ctrl:2;
  61. unsigned int dual_xor_en:1;
  62. unsigned int tx_complete:1;
  63. unsigned int zero_result_err:1;
  64. unsigned int zero_result_en:1;
  65. unsigned int dest_write_en:1;
  66. };
  67. struct iop3xx_aau_e_desc_ctrl {
  68. unsigned int reserved:1;
  69. unsigned int blk1_cmd_ctrl:3;
  70. unsigned int blk2_cmd_ctrl:3;
  71. unsigned int blk3_cmd_ctrl:3;
  72. unsigned int blk4_cmd_ctrl:3;
  73. unsigned int blk5_cmd_ctrl:3;
  74. unsigned int blk6_cmd_ctrl:3;
  75. unsigned int blk7_cmd_ctrl:3;
  76. unsigned int blk8_cmd_ctrl:3;
  77. unsigned int reserved2:7;
  78. };
  79. struct iop3xx_dma_desc_ctrl {
  80. unsigned int pci_transaction:4;
  81. unsigned int int_en:1;
  82. unsigned int dac_cycle_en:1;
  83. unsigned int mem_to_mem_en:1;
  84. unsigned int crc_data_tx_en:1;
  85. unsigned int crc_gen_en:1;
  86. unsigned int crc_seed_dis:1;
  87. unsigned int reserved:21;
  88. unsigned int crc_tx_complete:1;
  89. };
  90. struct iop3xx_desc_dma {
  91. u32 next_desc;
  92. union {
  93. u32 pci_src_addr;
  94. u32 pci_dest_addr;
  95. u32 src_addr;
  96. };
  97. union {
  98. u32 upper_pci_src_addr;
  99. u32 upper_pci_dest_addr;
  100. };
  101. union {
  102. u32 local_pci_src_addr;
  103. u32 local_pci_dest_addr;
  104. u32 dest_addr;
  105. };
  106. u32 byte_count;
  107. union {
  108. u32 desc_ctrl;
  109. struct iop3xx_dma_desc_ctrl desc_ctrl_field;
  110. };
  111. u32 crc_addr;
  112. };
  113. struct iop3xx_desc_aau {
  114. u32 next_desc;
  115. u32 src[4];
  116. u32 dest_addr;
  117. u32 byte_count;
  118. union {
  119. u32 desc_ctrl;
  120. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  121. };
  122. union {
  123. u32 src_addr;
  124. u32 e_desc_ctrl;
  125. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  126. } src_edc[31];
  127. };
  128. struct iop3xx_aau_gfmr {
  129. unsigned int gfmr1:8;
  130. unsigned int gfmr2:8;
  131. unsigned int gfmr3:8;
  132. unsigned int gfmr4:8;
  133. };
  134. struct iop3xx_desc_pq_xor {
  135. u32 next_desc;
  136. u32 src[3];
  137. union {
  138. u32 data_mult1;
  139. struct iop3xx_aau_gfmr data_mult1_field;
  140. };
  141. u32 dest_addr;
  142. u32 byte_count;
  143. union {
  144. u32 desc_ctrl;
  145. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  146. };
  147. union {
  148. u32 src_addr;
  149. u32 e_desc_ctrl;
  150. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  151. u32 data_multiplier;
  152. struct iop3xx_aau_gfmr data_mult_field;
  153. u32 reserved;
  154. } src_edc_gfmr[19];
  155. };
  156. struct iop3xx_desc_dual_xor {
  157. u32 next_desc;
  158. u32 src0_addr;
  159. u32 src1_addr;
  160. u32 h_src_addr;
  161. u32 d_src_addr;
  162. u32 h_dest_addr;
  163. u32 byte_count;
  164. union {
  165. u32 desc_ctrl;
  166. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  167. };
  168. u32 d_dest_addr;
  169. };
  170. union iop3xx_desc {
  171. struct iop3xx_desc_aau *aau;
  172. struct iop3xx_desc_dma *dma;
  173. struct iop3xx_desc_pq_xor *pq_xor;
  174. struct iop3xx_desc_dual_xor *dual_xor;
  175. void *ptr;
  176. };
  177. /* No support for p+q operations */
  178. static inline int
  179. iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
  180. {
  181. BUG();
  182. return 0;
  183. }
  184. static inline void
  185. iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
  186. unsigned long flags)
  187. {
  188. BUG();
  189. }
  190. static inline void
  191. iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
  192. {
  193. BUG();
  194. }
  195. static inline void
  196. iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  197. dma_addr_t addr, unsigned char coef)
  198. {
  199. BUG();
  200. }
  201. static inline int
  202. iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
  203. {
  204. BUG();
  205. return 0;
  206. }
  207. static inline void
  208. iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  209. unsigned long flags)
  210. {
  211. BUG();
  212. }
  213. static inline void
  214. iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  215. {
  216. BUG();
  217. }
  218. #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
  219. static inline void
  220. iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
  221. dma_addr_t *src)
  222. {
  223. BUG();
  224. }
  225. static inline int iop_adma_get_max_xor(void)
  226. {
  227. return 32;
  228. }
  229. static inline int iop_adma_get_max_pq(void)
  230. {
  231. BUG();
  232. return 0;
  233. }
  234. static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
  235. {
  236. int id = chan->device->id;
  237. switch (id) {
  238. case DMA0_ID:
  239. case DMA1_ID:
  240. return __raw_readl(DMA_DAR(chan));
  241. case AAU_ID:
  242. return __raw_readl(AAU_ADAR(chan));
  243. default:
  244. BUG();
  245. }
  246. return 0;
  247. }
  248. static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
  249. u32 next_desc_addr)
  250. {
  251. int id = chan->device->id;
  252. switch (id) {
  253. case DMA0_ID:
  254. case DMA1_ID:
  255. __raw_writel(next_desc_addr, DMA_NDAR(chan));
  256. break;
  257. case AAU_ID:
  258. __raw_writel(next_desc_addr, AAU_ANDAR(chan));
  259. break;
  260. }
  261. }
  262. #define IOP_ADMA_STATUS_BUSY (1 << 10)
  263. #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
  264. #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
  265. #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
  266. static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
  267. {
  268. u32 status = __raw_readl(DMA_CSR(chan));
  269. return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
  270. }
  271. static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
  272. int num_slots)
  273. {
  274. /* num_slots will only ever be 1, 2, 4, or 8 */
  275. return (desc->idx & (num_slots - 1)) ? 0 : 1;
  276. }
  277. /* to do: support large (i.e. > hw max) buffer sizes */
  278. static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
  279. {
  280. *slots_per_op = 1;
  281. return 1;
  282. }
  283. /* to do: support large (i.e. > hw max) buffer sizes */
  284. static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
  285. {
  286. *slots_per_op = 1;
  287. return 1;
  288. }
  289. static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
  290. int *slots_per_op)
  291. {
  292. static const char slot_count_table[] = {
  293. 1, 1, 1, 1, /* 01 - 04 */
  294. 2, 2, 2, 2, /* 05 - 08 */
  295. 4, 4, 4, 4, /* 09 - 12 */
  296. 4, 4, 4, 4, /* 13 - 16 */
  297. 8, 8, 8, 8, /* 17 - 20 */
  298. 8, 8, 8, 8, /* 21 - 24 */
  299. 8, 8, 8, 8, /* 25 - 28 */
  300. 8, 8, 8, 8, /* 29 - 32 */
  301. };
  302. *slots_per_op = slot_count_table[src_cnt - 1];
  303. return *slots_per_op;
  304. }
  305. static inline int
  306. iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
  307. {
  308. switch (chan->device->id) {
  309. case DMA0_ID:
  310. case DMA1_ID:
  311. return iop_chan_memcpy_slot_count(0, slots_per_op);
  312. case AAU_ID:
  313. return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
  314. default:
  315. BUG();
  316. }
  317. return 0;
  318. }
  319. static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
  320. int *slots_per_op)
  321. {
  322. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  323. if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
  324. return slot_cnt;
  325. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  326. while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  327. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  328. slot_cnt += *slots_per_op;
  329. }
  330. slot_cnt += *slots_per_op;
  331. return slot_cnt;
  332. }
  333. /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
  334. * descriptors
  335. */
  336. static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
  337. int *slots_per_op)
  338. {
  339. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  340. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
  341. return slot_cnt;
  342. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  343. while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  344. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  345. slot_cnt += *slots_per_op;
  346. }
  347. slot_cnt += *slots_per_op;
  348. return slot_cnt;
  349. }
  350. static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
  351. {
  352. return 0;
  353. }
  354. static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
  355. struct iop_adma_chan *chan)
  356. {
  357. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  358. switch (chan->device->id) {
  359. case DMA0_ID:
  360. case DMA1_ID:
  361. return hw_desc.dma->dest_addr;
  362. case AAU_ID:
  363. return hw_desc.aau->dest_addr;
  364. default:
  365. BUG();
  366. }
  367. return 0;
  368. }
  369. static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
  370. struct iop_adma_chan *chan)
  371. {
  372. BUG();
  373. return 0;
  374. }
  375. static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
  376. struct iop_adma_chan *chan)
  377. {
  378. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  379. switch (chan->device->id) {
  380. case DMA0_ID:
  381. case DMA1_ID:
  382. return hw_desc.dma->byte_count;
  383. case AAU_ID:
  384. return hw_desc.aau->byte_count;
  385. default:
  386. BUG();
  387. }
  388. return 0;
  389. }
  390. /* translate the src_idx to a descriptor word index */
  391. static inline int __desc_idx(int src_idx)
  392. {
  393. static const int desc_idx_table[] = { 0, 0, 0, 0,
  394. 0, 1, 2, 3,
  395. 5, 6, 7, 8,
  396. 9, 10, 11, 12,
  397. 14, 15, 16, 17,
  398. 18, 19, 20, 21,
  399. 23, 24, 25, 26,
  400. 27, 28, 29, 30,
  401. };
  402. return desc_idx_table[src_idx];
  403. }
  404. static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
  405. struct iop_adma_chan *chan,
  406. int src_idx)
  407. {
  408. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  409. switch (chan->device->id) {
  410. case DMA0_ID:
  411. case DMA1_ID:
  412. return hw_desc.dma->src_addr;
  413. case AAU_ID:
  414. break;
  415. default:
  416. BUG();
  417. }
  418. if (src_idx < 4)
  419. return hw_desc.aau->src[src_idx];
  420. else
  421. return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
  422. }
  423. static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
  424. int src_idx, dma_addr_t addr)
  425. {
  426. if (src_idx < 4)
  427. hw_desc->src[src_idx] = addr;
  428. else
  429. hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
  430. }
  431. static inline void
  432. iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
  433. {
  434. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  435. union {
  436. u32 value;
  437. struct iop3xx_dma_desc_ctrl field;
  438. } u_desc_ctrl;
  439. u_desc_ctrl.value = 0;
  440. u_desc_ctrl.field.mem_to_mem_en = 1;
  441. u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
  442. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  443. hw_desc->desc_ctrl = u_desc_ctrl.value;
  444. hw_desc->upper_pci_src_addr = 0;
  445. hw_desc->crc_addr = 0;
  446. }
  447. static inline void
  448. iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
  449. {
  450. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  451. union {
  452. u32 value;
  453. struct iop3xx_aau_desc_ctrl field;
  454. } u_desc_ctrl;
  455. u_desc_ctrl.value = 0;
  456. u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
  457. u_desc_ctrl.field.dest_write_en = 1;
  458. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  459. hw_desc->desc_ctrl = u_desc_ctrl.value;
  460. }
  461. static inline u32
  462. iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
  463. unsigned long flags)
  464. {
  465. int i, shift;
  466. u32 edcr;
  467. union {
  468. u32 value;
  469. struct iop3xx_aau_desc_ctrl field;
  470. } u_desc_ctrl;
  471. u_desc_ctrl.value = 0;
  472. switch (src_cnt) {
  473. case 25 ... 32:
  474. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  475. edcr = 0;
  476. shift = 1;
  477. for (i = 24; i < src_cnt; i++) {
  478. edcr |= (1 << shift);
  479. shift += 3;
  480. }
  481. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
  482. src_cnt = 24;
  483. /* fall through */
  484. case 17 ... 24:
  485. if (!u_desc_ctrl.field.blk_ctrl) {
  486. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  487. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  488. }
  489. edcr = 0;
  490. shift = 1;
  491. for (i = 16; i < src_cnt; i++) {
  492. edcr |= (1 << shift);
  493. shift += 3;
  494. }
  495. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
  496. src_cnt = 16;
  497. /* fall through */
  498. case 9 ... 16:
  499. if (!u_desc_ctrl.field.blk_ctrl)
  500. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  501. edcr = 0;
  502. shift = 1;
  503. for (i = 8; i < src_cnt; i++) {
  504. edcr |= (1 << shift);
  505. shift += 3;
  506. }
  507. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
  508. src_cnt = 8;
  509. /* fall through */
  510. case 2 ... 8:
  511. shift = 1;
  512. for (i = 0; i < src_cnt; i++) {
  513. u_desc_ctrl.value |= (1 << shift);
  514. shift += 3;
  515. }
  516. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  517. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  518. }
  519. u_desc_ctrl.field.dest_write_en = 1;
  520. u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
  521. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  522. hw_desc->desc_ctrl = u_desc_ctrl.value;
  523. return u_desc_ctrl.value;
  524. }
  525. static inline void
  526. iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  527. unsigned long flags)
  528. {
  529. iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
  530. }
  531. /* return the number of operations */
  532. static inline int
  533. iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  534. unsigned long flags)
  535. {
  536. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  537. struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
  538. union {
  539. u32 value;
  540. struct iop3xx_aau_desc_ctrl field;
  541. } u_desc_ctrl;
  542. int i, j;
  543. hw_desc = desc->hw_desc;
  544. for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
  545. i += slots_per_op, j++) {
  546. iter = iop_hw_desc_slot_idx(hw_desc, i);
  547. u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
  548. u_desc_ctrl.field.dest_write_en = 0;
  549. u_desc_ctrl.field.zero_result_en = 1;
  550. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  551. iter->desc_ctrl = u_desc_ctrl.value;
  552. /* for the subsequent descriptors preserve the store queue
  553. * and chain them together
  554. */
  555. if (i) {
  556. prev_hw_desc =
  557. iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
  558. prev_hw_desc->next_desc =
  559. (u32) (desc->async_tx.phys + (i << 5));
  560. }
  561. }
  562. return j;
  563. }
  564. static inline void
  565. iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  566. unsigned long flags)
  567. {
  568. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  569. union {
  570. u32 value;
  571. struct iop3xx_aau_desc_ctrl field;
  572. } u_desc_ctrl;
  573. u_desc_ctrl.value = 0;
  574. switch (src_cnt) {
  575. case 25 ... 32:
  576. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  577. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  578. /* fall through */
  579. case 17 ... 24:
  580. if (!u_desc_ctrl.field.blk_ctrl) {
  581. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  582. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  583. }
  584. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
  585. /* fall through */
  586. case 9 ... 16:
  587. if (!u_desc_ctrl.field.blk_ctrl)
  588. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  589. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
  590. /* fall through */
  591. case 1 ... 8:
  592. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  593. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  594. }
  595. u_desc_ctrl.field.dest_write_en = 0;
  596. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  597. hw_desc->desc_ctrl = u_desc_ctrl.value;
  598. }
  599. static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
  600. struct iop_adma_chan *chan,
  601. u32 byte_count)
  602. {
  603. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  604. switch (chan->device->id) {
  605. case DMA0_ID:
  606. case DMA1_ID:
  607. hw_desc.dma->byte_count = byte_count;
  608. break;
  609. case AAU_ID:
  610. hw_desc.aau->byte_count = byte_count;
  611. break;
  612. default:
  613. BUG();
  614. }
  615. }
  616. static inline void
  617. iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
  618. struct iop_adma_chan *chan)
  619. {
  620. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  621. switch (chan->device->id) {
  622. case DMA0_ID:
  623. case DMA1_ID:
  624. iop_desc_init_memcpy(desc, 1);
  625. hw_desc.dma->byte_count = 0;
  626. hw_desc.dma->dest_addr = 0;
  627. hw_desc.dma->src_addr = 0;
  628. break;
  629. case AAU_ID:
  630. iop_desc_init_null_xor(desc, 2, 1);
  631. hw_desc.aau->byte_count = 0;
  632. hw_desc.aau->dest_addr = 0;
  633. hw_desc.aau->src[0] = 0;
  634. hw_desc.aau->src[1] = 0;
  635. break;
  636. default:
  637. BUG();
  638. }
  639. }
  640. static inline void
  641. iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  642. {
  643. int slots_per_op = desc->slots_per_op;
  644. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  645. int i = 0;
  646. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  647. hw_desc->byte_count = len;
  648. } else {
  649. do {
  650. iter = iop_hw_desc_slot_idx(hw_desc, i);
  651. iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  652. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  653. i += slots_per_op;
  654. } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
  655. iter = iop_hw_desc_slot_idx(hw_desc, i);
  656. iter->byte_count = len;
  657. }
  658. }
  659. static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
  660. struct iop_adma_chan *chan,
  661. dma_addr_t addr)
  662. {
  663. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  664. switch (chan->device->id) {
  665. case DMA0_ID:
  666. case DMA1_ID:
  667. hw_desc.dma->dest_addr = addr;
  668. break;
  669. case AAU_ID:
  670. hw_desc.aau->dest_addr = addr;
  671. break;
  672. default:
  673. BUG();
  674. }
  675. }
  676. static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
  677. dma_addr_t addr)
  678. {
  679. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  680. hw_desc->src_addr = addr;
  681. }
  682. static inline void
  683. iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  684. dma_addr_t addr)
  685. {
  686. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  687. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  688. int i;
  689. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  690. i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  691. iter = iop_hw_desc_slot_idx(hw_desc, i);
  692. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  693. }
  694. }
  695. static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
  696. int src_idx, dma_addr_t addr)
  697. {
  698. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  699. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  700. int i;
  701. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  702. i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  703. iter = iop_hw_desc_slot_idx(hw_desc, i);
  704. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  705. }
  706. }
  707. static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
  708. u32 next_desc_addr)
  709. {
  710. /* hw_desc->next_desc is the same location for all channels */
  711. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  712. iop_paranoia(hw_desc.dma->next_desc);
  713. hw_desc.dma->next_desc = next_desc_addr;
  714. }
  715. static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
  716. {
  717. /* hw_desc->next_desc is the same location for all channels */
  718. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  719. return hw_desc.dma->next_desc;
  720. }
  721. static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
  722. {
  723. /* hw_desc->next_desc is the same location for all channels */
  724. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  725. hw_desc.dma->next_desc = 0;
  726. }
  727. static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
  728. u32 val)
  729. {
  730. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  731. hw_desc->src[0] = val;
  732. }
  733. static inline enum sum_check_flags
  734. iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
  735. {
  736. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  737. struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
  738. iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
  739. return desc_ctrl.zero_result_err << SUM_CHECK_P;
  740. }
  741. static inline void iop_chan_append(struct iop_adma_chan *chan)
  742. {
  743. u32 dma_chan_ctrl;
  744. dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  745. dma_chan_ctrl |= 0x2;
  746. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  747. }
  748. static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
  749. {
  750. return __raw_readl(DMA_CSR(chan));
  751. }
  752. static inline void iop_chan_disable(struct iop_adma_chan *chan)
  753. {
  754. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  755. dma_chan_ctrl &= ~1;
  756. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  757. }
  758. static inline void iop_chan_enable(struct iop_adma_chan *chan)
  759. {
  760. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  761. dma_chan_ctrl |= 1;
  762. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  763. }
  764. static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
  765. {
  766. u32 status = __raw_readl(DMA_CSR(chan));
  767. status &= (1 << 9);
  768. __raw_writel(status, DMA_CSR(chan));
  769. }
  770. static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
  771. {
  772. u32 status = __raw_readl(DMA_CSR(chan));
  773. status &= (1 << 8);
  774. __raw_writel(status, DMA_CSR(chan));
  775. }
  776. static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
  777. {
  778. u32 status = __raw_readl(DMA_CSR(chan));
  779. switch (chan->device->id) {
  780. case DMA0_ID:
  781. case DMA1_ID:
  782. status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
  783. break;
  784. case AAU_ID:
  785. status &= (1 << 5);
  786. break;
  787. default:
  788. BUG();
  789. }
  790. __raw_writel(status, DMA_CSR(chan));
  791. }
  792. static inline int
  793. iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
  794. {
  795. return 0;
  796. }
  797. static inline int
  798. iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
  799. {
  800. return 0;
  801. }
  802. static inline int
  803. iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
  804. {
  805. return 0;
  806. }
  807. static inline int
  808. iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
  809. {
  810. return test_bit(5, &status);
  811. }
  812. static inline int
  813. iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
  814. {
  815. switch (chan->device->id) {
  816. case DMA0_ID:
  817. case DMA1_ID:
  818. return test_bit(2, &status);
  819. default:
  820. return 0;
  821. }
  822. }
  823. static inline int
  824. iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
  825. {
  826. switch (chan->device->id) {
  827. case DMA0_ID:
  828. case DMA1_ID:
  829. return test_bit(3, &status);
  830. default:
  831. return 0;
  832. }
  833. }
  834. static inline int
  835. iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
  836. {
  837. switch (chan->device->id) {
  838. case DMA0_ID:
  839. case DMA1_ID:
  840. return test_bit(1, &status);
  841. default:
  842. return 0;
  843. }
  844. }
  845. #endif /* _ADMA_H */