iop3xx-adma.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ADMA_H
  19. #define _ADMA_H
  20. #include <linux/types.h>
  21. #include <linux/io.h>
  22. #include <mach/hardware.h>
  23. #include <asm/hardware/iop_adma.h>
  24. /* Memory copy units */
  25. #define DMA_CCR(chan) (chan->mmr_base + 0x0)
  26. #define DMA_CSR(chan) (chan->mmr_base + 0x4)
  27. #define DMA_DAR(chan) (chan->mmr_base + 0xc)
  28. #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
  29. #define DMA_PADR(chan) (chan->mmr_base + 0x14)
  30. #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
  31. #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
  32. #define DMA_BCR(chan) (chan->mmr_base + 0x20)
  33. #define DMA_DCR(chan) (chan->mmr_base + 0x24)
  34. /* Application accelerator unit */
  35. #define AAU_ACR(chan) (chan->mmr_base + 0x0)
  36. #define AAU_ASR(chan) (chan->mmr_base + 0x4)
  37. #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
  38. #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
  39. #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
  40. #define AAU_DAR(chan) (chan->mmr_base + 0x20)
  41. #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
  42. #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
  43. #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
  44. #define AAU_EDCR0_IDX 8
  45. #define AAU_EDCR1_IDX 17
  46. #define AAU_EDCR2_IDX 26
  47. #define DMA0_ID 0
  48. #define DMA1_ID 1
  49. #define AAU_ID 2
  50. struct iop3xx_aau_desc_ctrl {
  51. unsigned int int_en:1;
  52. unsigned int blk1_cmd_ctrl:3;
  53. unsigned int blk2_cmd_ctrl:3;
  54. unsigned int blk3_cmd_ctrl:3;
  55. unsigned int blk4_cmd_ctrl:3;
  56. unsigned int blk5_cmd_ctrl:3;
  57. unsigned int blk6_cmd_ctrl:3;
  58. unsigned int blk7_cmd_ctrl:3;
  59. unsigned int blk8_cmd_ctrl:3;
  60. unsigned int blk_ctrl:2;
  61. unsigned int dual_xor_en:1;
  62. unsigned int tx_complete:1;
  63. unsigned int zero_result_err:1;
  64. unsigned int zero_result_en:1;
  65. unsigned int dest_write_en:1;
  66. };
  67. struct iop3xx_aau_e_desc_ctrl {
  68. unsigned int reserved:1;
  69. unsigned int blk1_cmd_ctrl:3;
  70. unsigned int blk2_cmd_ctrl:3;
  71. unsigned int blk3_cmd_ctrl:3;
  72. unsigned int blk4_cmd_ctrl:3;
  73. unsigned int blk5_cmd_ctrl:3;
  74. unsigned int blk6_cmd_ctrl:3;
  75. unsigned int blk7_cmd_ctrl:3;
  76. unsigned int blk8_cmd_ctrl:3;
  77. unsigned int reserved2:7;
  78. };
  79. struct iop3xx_dma_desc_ctrl {
  80. unsigned int pci_transaction:4;
  81. unsigned int int_en:1;
  82. unsigned int dac_cycle_en:1;
  83. unsigned int mem_to_mem_en:1;
  84. unsigned int crc_data_tx_en:1;
  85. unsigned int crc_gen_en:1;
  86. unsigned int crc_seed_dis:1;
  87. unsigned int reserved:21;
  88. unsigned int crc_tx_complete:1;
  89. };
  90. struct iop3xx_desc_dma {
  91. u32 next_desc;
  92. union {
  93. u32 pci_src_addr;
  94. u32 pci_dest_addr;
  95. u32 src_addr;
  96. };
  97. union {
  98. u32 upper_pci_src_addr;
  99. u32 upper_pci_dest_addr;
  100. };
  101. union {
  102. u32 local_pci_src_addr;
  103. u32 local_pci_dest_addr;
  104. u32 dest_addr;
  105. };
  106. u32 byte_count;
  107. union {
  108. u32 desc_ctrl;
  109. struct iop3xx_dma_desc_ctrl desc_ctrl_field;
  110. };
  111. u32 crc_addr;
  112. };
  113. struct iop3xx_desc_aau {
  114. u32 next_desc;
  115. u32 src[4];
  116. u32 dest_addr;
  117. u32 byte_count;
  118. union {
  119. u32 desc_ctrl;
  120. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  121. };
  122. union {
  123. u32 src_addr;
  124. u32 e_desc_ctrl;
  125. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  126. } src_edc[31];
  127. };
  128. struct iop3xx_aau_gfmr {
  129. unsigned int gfmr1:8;
  130. unsigned int gfmr2:8;
  131. unsigned int gfmr3:8;
  132. unsigned int gfmr4:8;
  133. };
  134. struct iop3xx_desc_pq_xor {
  135. u32 next_desc;
  136. u32 src[3];
  137. union {
  138. u32 data_mult1;
  139. struct iop3xx_aau_gfmr data_mult1_field;
  140. };
  141. u32 dest_addr;
  142. u32 byte_count;
  143. union {
  144. u32 desc_ctrl;
  145. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  146. };
  147. union {
  148. u32 src_addr;
  149. u32 e_desc_ctrl;
  150. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  151. u32 data_multiplier;
  152. struct iop3xx_aau_gfmr data_mult_field;
  153. u32 reserved;
  154. } src_edc_gfmr[19];
  155. };
  156. struct iop3xx_desc_dual_xor {
  157. u32 next_desc;
  158. u32 src0_addr;
  159. u32 src1_addr;
  160. u32 h_src_addr;
  161. u32 d_src_addr;
  162. u32 h_dest_addr;
  163. u32 byte_count;
  164. union {
  165. u32 desc_ctrl;
  166. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  167. };
  168. u32 d_dest_addr;
  169. };
  170. union iop3xx_desc {
  171. struct iop3xx_desc_aau *aau;
  172. struct iop3xx_desc_dma *dma;
  173. struct iop3xx_desc_pq_xor *pq_xor;
  174. struct iop3xx_desc_dual_xor *dual_xor;
  175. void *ptr;
  176. };
  177. /* No support for p+q operations */
  178. static inline int
  179. iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
  180. {
  181. BUG();
  182. return 0;
  183. }
  184. static inline void
  185. iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
  186. unsigned long flags)
  187. {
  188. BUG();
  189. }
  190. static inline void
  191. iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
  192. {
  193. BUG();
  194. }
  195. static inline void
  196. iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  197. dma_addr_t addr, unsigned char coef)
  198. {
  199. BUG();
  200. }
  201. static inline int
  202. iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
  203. {
  204. BUG();
  205. return 0;
  206. }
  207. static inline void
  208. iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  209. unsigned long flags)
  210. {
  211. BUG();
  212. }
  213. static inline void
  214. iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  215. {
  216. BUG();
  217. }
  218. #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
  219. static inline void
  220. iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
  221. dma_addr_t *src)
  222. {
  223. BUG();
  224. }
  225. static inline int iop_adma_get_max_xor(void)
  226. {
  227. return 32;
  228. }
  229. static inline int iop_adma_get_max_pq(void)
  230. {
  231. BUG();
  232. return 0;
  233. }
  234. static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
  235. {
  236. int id = chan->device->id;
  237. switch (id) {
  238. case DMA0_ID:
  239. case DMA1_ID:
  240. return __raw_readl(DMA_DAR(chan));
  241. case AAU_ID:
  242. return __raw_readl(AAU_ADAR(chan));
  243. default:
  244. BUG();
  245. }
  246. return 0;
  247. }
  248. static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
  249. u32 next_desc_addr)
  250. {
  251. int id = chan->device->id;
  252. switch (id) {
  253. case DMA0_ID:
  254. case DMA1_ID:
  255. __raw_writel(next_desc_addr, DMA_NDAR(chan));
  256. break;
  257. case AAU_ID:
  258. __raw_writel(next_desc_addr, AAU_ANDAR(chan));
  259. break;
  260. }
  261. }
  262. #define IOP_ADMA_STATUS_BUSY (1 << 10)
  263. #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
  264. #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
  265. #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
  266. static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
  267. {
  268. u32 status = __raw_readl(DMA_CSR(chan));
  269. return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
  270. }
  271. static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
  272. int num_slots)
  273. {
  274. /* num_slots will only ever be 1, 2, 4, or 8 */
  275. return (desc->idx & (num_slots - 1)) ? 0 : 1;
  276. }
  277. /* to do: support large (i.e. > hw max) buffer sizes */
  278. static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
  279. {
  280. *slots_per_op = 1;
  281. return 1;
  282. }
  283. /* to do: support large (i.e. > hw max) buffer sizes */
  284. static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
  285. {
  286. *slots_per_op = 1;
  287. return 1;
  288. }
  289. static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
  290. int *slots_per_op)
  291. {
  292. static const char slot_count_table[] = {
  293. 1, 1, 1, 1, /* 01 - 04 */
  294. 2, 2, 2, 2, /* 05 - 08 */
  295. 4, 4, 4, 4, /* 09 - 12 */
  296. 4, 4, 4, 4, /* 13 - 16 */
  297. 8, 8, 8, 8, /* 17 - 20 */
  298. 8, 8, 8, 8, /* 21 - 24 */
  299. 8, 8, 8, 8, /* 25 - 28 */
  300. 8, 8, 8, 8, /* 29 - 32 */
  301. };
  302. *slots_per_op = slot_count_table[src_cnt - 1];
  303. return *slots_per_op;
  304. }
  305. static inline int
  306. iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
  307. {
  308. switch (chan->device->id) {
  309. case DMA0_ID:
  310. case DMA1_ID:
  311. return iop_chan_memcpy_slot_count(0, slots_per_op);
  312. case AAU_ID:
  313. return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
  314. default:
  315. BUG();
  316. }
  317. return 0;
  318. }
  319. static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
  320. int *slots_per_op)
  321. {
  322. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  323. if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
  324. return slot_cnt;
  325. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  326. while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  327. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  328. slot_cnt += *slots_per_op;
  329. }
  330. if (len)
  331. slot_cnt += *slots_per_op;
  332. return slot_cnt;
  333. }
  334. /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
  335. * descriptors
  336. */
  337. static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
  338. int *slots_per_op)
  339. {
  340. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  341. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
  342. return slot_cnt;
  343. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  344. while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  345. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  346. slot_cnt += *slots_per_op;
  347. }
  348. if (len)
  349. slot_cnt += *slots_per_op;
  350. return slot_cnt;
  351. }
  352. static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
  353. {
  354. return 0;
  355. }
  356. static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
  357. struct iop_adma_chan *chan)
  358. {
  359. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  360. switch (chan->device->id) {
  361. case DMA0_ID:
  362. case DMA1_ID:
  363. return hw_desc.dma->dest_addr;
  364. case AAU_ID:
  365. return hw_desc.aau->dest_addr;
  366. default:
  367. BUG();
  368. }
  369. return 0;
  370. }
  371. static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
  372. struct iop_adma_chan *chan)
  373. {
  374. BUG();
  375. return 0;
  376. }
  377. static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
  378. struct iop_adma_chan *chan)
  379. {
  380. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  381. switch (chan->device->id) {
  382. case DMA0_ID:
  383. case DMA1_ID:
  384. return hw_desc.dma->byte_count;
  385. case AAU_ID:
  386. return hw_desc.aau->byte_count;
  387. default:
  388. BUG();
  389. }
  390. return 0;
  391. }
  392. /* translate the src_idx to a descriptor word index */
  393. static inline int __desc_idx(int src_idx)
  394. {
  395. static const int desc_idx_table[] = { 0, 0, 0, 0,
  396. 0, 1, 2, 3,
  397. 5, 6, 7, 8,
  398. 9, 10, 11, 12,
  399. 14, 15, 16, 17,
  400. 18, 19, 20, 21,
  401. 23, 24, 25, 26,
  402. 27, 28, 29, 30,
  403. };
  404. return desc_idx_table[src_idx];
  405. }
  406. static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
  407. struct iop_adma_chan *chan,
  408. int src_idx)
  409. {
  410. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  411. switch (chan->device->id) {
  412. case DMA0_ID:
  413. case DMA1_ID:
  414. return hw_desc.dma->src_addr;
  415. case AAU_ID:
  416. break;
  417. default:
  418. BUG();
  419. }
  420. if (src_idx < 4)
  421. return hw_desc.aau->src[src_idx];
  422. else
  423. return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
  424. }
  425. static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
  426. int src_idx, dma_addr_t addr)
  427. {
  428. if (src_idx < 4)
  429. hw_desc->src[src_idx] = addr;
  430. else
  431. hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
  432. }
  433. static inline void
  434. iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
  435. {
  436. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  437. union {
  438. u32 value;
  439. struct iop3xx_dma_desc_ctrl field;
  440. } u_desc_ctrl;
  441. u_desc_ctrl.value = 0;
  442. u_desc_ctrl.field.mem_to_mem_en = 1;
  443. u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
  444. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  445. hw_desc->desc_ctrl = u_desc_ctrl.value;
  446. hw_desc->upper_pci_src_addr = 0;
  447. hw_desc->crc_addr = 0;
  448. }
  449. static inline void
  450. iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
  451. {
  452. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  453. union {
  454. u32 value;
  455. struct iop3xx_aau_desc_ctrl field;
  456. } u_desc_ctrl;
  457. u_desc_ctrl.value = 0;
  458. u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
  459. u_desc_ctrl.field.dest_write_en = 1;
  460. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  461. hw_desc->desc_ctrl = u_desc_ctrl.value;
  462. }
  463. static inline u32
  464. iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
  465. unsigned long flags)
  466. {
  467. int i, shift;
  468. u32 edcr;
  469. union {
  470. u32 value;
  471. struct iop3xx_aau_desc_ctrl field;
  472. } u_desc_ctrl;
  473. u_desc_ctrl.value = 0;
  474. switch (src_cnt) {
  475. case 25 ... 32:
  476. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  477. edcr = 0;
  478. shift = 1;
  479. for (i = 24; i < src_cnt; i++) {
  480. edcr |= (1 << shift);
  481. shift += 3;
  482. }
  483. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
  484. src_cnt = 24;
  485. /* fall through */
  486. case 17 ... 24:
  487. if (!u_desc_ctrl.field.blk_ctrl) {
  488. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  489. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  490. }
  491. edcr = 0;
  492. shift = 1;
  493. for (i = 16; i < src_cnt; i++) {
  494. edcr |= (1 << shift);
  495. shift += 3;
  496. }
  497. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
  498. src_cnt = 16;
  499. /* fall through */
  500. case 9 ... 16:
  501. if (!u_desc_ctrl.field.blk_ctrl)
  502. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  503. edcr = 0;
  504. shift = 1;
  505. for (i = 8; i < src_cnt; i++) {
  506. edcr |= (1 << shift);
  507. shift += 3;
  508. }
  509. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
  510. src_cnt = 8;
  511. /* fall through */
  512. case 2 ... 8:
  513. shift = 1;
  514. for (i = 0; i < src_cnt; i++) {
  515. u_desc_ctrl.value |= (1 << shift);
  516. shift += 3;
  517. }
  518. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  519. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  520. }
  521. u_desc_ctrl.field.dest_write_en = 1;
  522. u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
  523. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  524. hw_desc->desc_ctrl = u_desc_ctrl.value;
  525. return u_desc_ctrl.value;
  526. }
  527. static inline void
  528. iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  529. unsigned long flags)
  530. {
  531. iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
  532. }
  533. /* return the number of operations */
  534. static inline int
  535. iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  536. unsigned long flags)
  537. {
  538. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  539. struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
  540. union {
  541. u32 value;
  542. struct iop3xx_aau_desc_ctrl field;
  543. } u_desc_ctrl;
  544. int i, j;
  545. hw_desc = desc->hw_desc;
  546. for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
  547. i += slots_per_op, j++) {
  548. iter = iop_hw_desc_slot_idx(hw_desc, i);
  549. u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
  550. u_desc_ctrl.field.dest_write_en = 0;
  551. u_desc_ctrl.field.zero_result_en = 1;
  552. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  553. iter->desc_ctrl = u_desc_ctrl.value;
  554. /* for the subsequent descriptors preserve the store queue
  555. * and chain them together
  556. */
  557. if (i) {
  558. prev_hw_desc =
  559. iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
  560. prev_hw_desc->next_desc =
  561. (u32) (desc->async_tx.phys + (i << 5));
  562. }
  563. }
  564. return j;
  565. }
  566. static inline void
  567. iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  568. unsigned long flags)
  569. {
  570. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  571. union {
  572. u32 value;
  573. struct iop3xx_aau_desc_ctrl field;
  574. } u_desc_ctrl;
  575. u_desc_ctrl.value = 0;
  576. switch (src_cnt) {
  577. case 25 ... 32:
  578. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  579. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  580. /* fall through */
  581. case 17 ... 24:
  582. if (!u_desc_ctrl.field.blk_ctrl) {
  583. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  584. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  585. }
  586. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
  587. /* fall through */
  588. case 9 ... 16:
  589. if (!u_desc_ctrl.field.blk_ctrl)
  590. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  591. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
  592. /* fall through */
  593. case 1 ... 8:
  594. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  595. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  596. }
  597. u_desc_ctrl.field.dest_write_en = 0;
  598. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  599. hw_desc->desc_ctrl = u_desc_ctrl.value;
  600. }
  601. static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
  602. struct iop_adma_chan *chan,
  603. u32 byte_count)
  604. {
  605. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  606. switch (chan->device->id) {
  607. case DMA0_ID:
  608. case DMA1_ID:
  609. hw_desc.dma->byte_count = byte_count;
  610. break;
  611. case AAU_ID:
  612. hw_desc.aau->byte_count = byte_count;
  613. break;
  614. default:
  615. BUG();
  616. }
  617. }
  618. static inline void
  619. iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
  620. struct iop_adma_chan *chan)
  621. {
  622. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  623. switch (chan->device->id) {
  624. case DMA0_ID:
  625. case DMA1_ID:
  626. iop_desc_init_memcpy(desc, 1);
  627. hw_desc.dma->byte_count = 0;
  628. hw_desc.dma->dest_addr = 0;
  629. hw_desc.dma->src_addr = 0;
  630. break;
  631. case AAU_ID:
  632. iop_desc_init_null_xor(desc, 2, 1);
  633. hw_desc.aau->byte_count = 0;
  634. hw_desc.aau->dest_addr = 0;
  635. hw_desc.aau->src[0] = 0;
  636. hw_desc.aau->src[1] = 0;
  637. break;
  638. default:
  639. BUG();
  640. }
  641. }
  642. static inline void
  643. iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  644. {
  645. int slots_per_op = desc->slots_per_op;
  646. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  647. int i = 0;
  648. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  649. hw_desc->byte_count = len;
  650. } else {
  651. do {
  652. iter = iop_hw_desc_slot_idx(hw_desc, i);
  653. iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  654. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  655. i += slots_per_op;
  656. } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
  657. if (len) {
  658. iter = iop_hw_desc_slot_idx(hw_desc, i);
  659. iter->byte_count = len;
  660. }
  661. }
  662. }
  663. static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
  664. struct iop_adma_chan *chan,
  665. dma_addr_t addr)
  666. {
  667. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  668. switch (chan->device->id) {
  669. case DMA0_ID:
  670. case DMA1_ID:
  671. hw_desc.dma->dest_addr = addr;
  672. break;
  673. case AAU_ID:
  674. hw_desc.aau->dest_addr = addr;
  675. break;
  676. default:
  677. BUG();
  678. }
  679. }
  680. static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
  681. dma_addr_t addr)
  682. {
  683. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  684. hw_desc->src_addr = addr;
  685. }
  686. static inline void
  687. iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  688. dma_addr_t addr)
  689. {
  690. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  691. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  692. int i;
  693. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  694. i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  695. iter = iop_hw_desc_slot_idx(hw_desc, i);
  696. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  697. }
  698. }
  699. static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
  700. int src_idx, dma_addr_t addr)
  701. {
  702. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  703. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  704. int i;
  705. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  706. i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  707. iter = iop_hw_desc_slot_idx(hw_desc, i);
  708. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  709. }
  710. }
  711. static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
  712. u32 next_desc_addr)
  713. {
  714. /* hw_desc->next_desc is the same location for all channels */
  715. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  716. iop_paranoia(hw_desc.dma->next_desc);
  717. hw_desc.dma->next_desc = next_desc_addr;
  718. }
  719. static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
  720. {
  721. /* hw_desc->next_desc is the same location for all channels */
  722. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  723. return hw_desc.dma->next_desc;
  724. }
  725. static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
  726. {
  727. /* hw_desc->next_desc is the same location for all channels */
  728. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  729. hw_desc.dma->next_desc = 0;
  730. }
  731. static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
  732. u32 val)
  733. {
  734. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  735. hw_desc->src[0] = val;
  736. }
  737. static inline enum sum_check_flags
  738. iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
  739. {
  740. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  741. struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
  742. iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
  743. return desc_ctrl.zero_result_err << SUM_CHECK_P;
  744. }
  745. static inline void iop_chan_append(struct iop_adma_chan *chan)
  746. {
  747. u32 dma_chan_ctrl;
  748. dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  749. dma_chan_ctrl |= 0x2;
  750. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  751. }
  752. static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
  753. {
  754. return __raw_readl(DMA_CSR(chan));
  755. }
  756. static inline void iop_chan_disable(struct iop_adma_chan *chan)
  757. {
  758. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  759. dma_chan_ctrl &= ~1;
  760. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  761. }
  762. static inline void iop_chan_enable(struct iop_adma_chan *chan)
  763. {
  764. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  765. dma_chan_ctrl |= 1;
  766. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  767. }
  768. static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
  769. {
  770. u32 status = __raw_readl(DMA_CSR(chan));
  771. status &= (1 << 9);
  772. __raw_writel(status, DMA_CSR(chan));
  773. }
  774. static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
  775. {
  776. u32 status = __raw_readl(DMA_CSR(chan));
  777. status &= (1 << 8);
  778. __raw_writel(status, DMA_CSR(chan));
  779. }
  780. static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
  781. {
  782. u32 status = __raw_readl(DMA_CSR(chan));
  783. switch (chan->device->id) {
  784. case DMA0_ID:
  785. case DMA1_ID:
  786. status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
  787. break;
  788. case AAU_ID:
  789. status &= (1 << 5);
  790. break;
  791. default:
  792. BUG();
  793. }
  794. __raw_writel(status, DMA_CSR(chan));
  795. }
  796. static inline int
  797. iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
  798. {
  799. return 0;
  800. }
  801. static inline int
  802. iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
  803. {
  804. return 0;
  805. }
  806. static inline int
  807. iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
  808. {
  809. return 0;
  810. }
  811. static inline int
  812. iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
  813. {
  814. return test_bit(5, &status);
  815. }
  816. static inline int
  817. iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
  818. {
  819. switch (chan->device->id) {
  820. case DMA0_ID:
  821. case DMA1_ID:
  822. return test_bit(2, &status);
  823. default:
  824. return 0;
  825. }
  826. }
  827. static inline int
  828. iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
  829. {
  830. switch (chan->device->id) {
  831. case DMA0_ID:
  832. case DMA1_ID:
  833. return test_bit(3, &status);
  834. default:
  835. return 0;
  836. }
  837. }
  838. static inline int
  839. iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
  840. {
  841. switch (chan->device->id) {
  842. case DMA0_ID:
  843. case DMA1_ID:
  844. return test_bit(1, &status);
  845. default:
  846. return 0;
  847. }
  848. }
  849. #endif /* _ADMA_H */