s3c-pl330.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224
  1. /* linux/arch/arm/plat-samsung/s3c-pl330.c
  2. *
  3. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  4. * Jaswinder Singh <jassi.brar@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/slab.h>
  16. #include <linux/platform_device.h>
  17. #include <asm/hardware/pl330.h>
  18. #include <plat/s3c-pl330-pdata.h>
  19. /**
  20. * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
  21. * @busy_chan: Number of channels currently busy.
  22. * @peri: List of IDs of peripherals this DMAC can work with.
  23. * @node: To attach to the global list of DMACs.
  24. * @pi: PL330 configuration info for the DMAC.
  25. * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
  26. */
  27. struct s3c_pl330_dmac {
  28. unsigned busy_chan;
  29. enum dma_ch *peri;
  30. struct list_head node;
  31. struct pl330_info *pi;
  32. struct kmem_cache *kmcache;
  33. };
  34. /**
  35. * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
  36. * @token: Xfer ID provided by the client.
  37. * @node: To attach to the list of xfers on a channel.
  38. * @px: Xfer for PL330 core.
  39. * @chan: Owner channel of this xfer.
  40. */
  41. struct s3c_pl330_xfer {
  42. void *token;
  43. struct list_head node;
  44. struct pl330_xfer px;
  45. struct s3c_pl330_chan *chan;
  46. };
  47. /**
  48. * struct s3c_pl330_chan - Logical channel to communicate with
  49. * a Physical peripheral.
  50. * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
  51. * NULL if the channel is available to be acquired.
  52. * @id: ID of the peripheral that this channel can communicate with.
  53. * @options: Options specified by the client.
  54. * @sdaddr: Address provided via s3c2410_dma_devconfig.
  55. * @node: To attach to the global list of channels.
  56. * @lrq: Pointer to the last submitted pl330_req to PL330 core.
  57. * @xfer_list: To manage list of xfers enqueued.
  58. * @req: Two requests to communicate with the PL330 engine.
  59. * @callback_fn: Callback function to the client.
  60. * @rqcfg: Channel configuration for the xfers.
  61. * @xfer_head: Pointer to the xfer to be next excecuted.
  62. * @dmac: Pointer to the DMAC that manages this channel, NULL if the
  63. * channel is available to be acquired.
  64. * @client: Client of this channel. NULL if the
  65. * channel is available to be acquired.
  66. */
  67. struct s3c_pl330_chan {
  68. void *pl330_chan_id;
  69. enum dma_ch id;
  70. unsigned int options;
  71. unsigned long sdaddr;
  72. struct list_head node;
  73. struct pl330_req *lrq;
  74. struct list_head xfer_list;
  75. struct pl330_req req[2];
  76. s3c2410_dma_cbfn_t callback_fn;
  77. struct pl330_reqcfg rqcfg;
  78. struct s3c_pl330_xfer *xfer_head;
  79. struct s3c_pl330_dmac *dmac;
  80. struct s3c2410_dma_client *client;
  81. };
  82. /* All DMACs in the platform */
  83. static LIST_HEAD(dmac_list);
  84. /* All channels to peripherals in the platform */
  85. static LIST_HEAD(chan_list);
  86. /*
  87. * Since we add resources(DMACs and Channels) to the global pool,
  88. * we need to guard access to the resources using a global lock
  89. */
  90. static DEFINE_SPINLOCK(res_lock);
  91. /* Returns the channel with ID 'id' in the chan_list */
  92. static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
  93. {
  94. struct s3c_pl330_chan *ch;
  95. list_for_each_entry(ch, &chan_list, node)
  96. if (ch->id == id)
  97. return ch;
  98. return NULL;
  99. }
  100. /* Allocate a new channel with ID 'id' and add to chan_list */
  101. static void chan_add(const enum dma_ch id)
  102. {
  103. struct s3c_pl330_chan *ch = id_to_chan(id);
  104. /* Return if the channel already exists */
  105. if (ch)
  106. return;
  107. ch = kmalloc(sizeof(*ch), GFP_KERNEL);
  108. /* Return silently to work with other channels */
  109. if (!ch)
  110. return;
  111. ch->id = id;
  112. ch->dmac = NULL;
  113. list_add_tail(&ch->node, &chan_list);
  114. }
  115. /* If the channel is not yet acquired by any client */
  116. static bool chan_free(struct s3c_pl330_chan *ch)
  117. {
  118. if (!ch)
  119. return false;
  120. /* Channel points to some DMAC only when it's acquired */
  121. return ch->dmac ? false : true;
  122. }
  123. /*
  124. * Returns 0 is peripheral i/f is invalid or not present on the dmac.
  125. * Index + 1, otherwise.
  126. */
  127. static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
  128. {
  129. enum dma_ch *id = dmac->peri;
  130. int i;
  131. /* Discount invalid markers */
  132. if (ch_id == DMACH_MAX)
  133. return 0;
  134. for (i = 0; i < PL330_MAX_PERI; i++)
  135. if (id[i] == ch_id)
  136. return i + 1;
  137. return 0;
  138. }
  139. /* If all channel threads of the DMAC are busy */
  140. static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
  141. {
  142. struct pl330_info *pi = dmac->pi;
  143. return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
  144. }
  145. /*
  146. * Returns the number of free channels that
  147. * can be handled by this dmac only.
  148. */
  149. static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
  150. {
  151. enum dma_ch *id = dmac->peri;
  152. struct s3c_pl330_dmac *d;
  153. struct s3c_pl330_chan *ch;
  154. unsigned found, count = 0;
  155. enum dma_ch p;
  156. int i;
  157. for (i = 0; i < PL330_MAX_PERI; i++) {
  158. p = id[i];
  159. ch = id_to_chan(p);
  160. if (p == DMACH_MAX || !chan_free(ch))
  161. continue;
  162. found = 0;
  163. list_for_each_entry(d, &dmac_list, node) {
  164. if (d != dmac && iface_of_dmac(d, ch->id)) {
  165. found = 1;
  166. break;
  167. }
  168. }
  169. if (!found)
  170. count++;
  171. }
  172. return count;
  173. }
  174. /*
  175. * Measure of suitability of 'dmac' handling 'ch'
  176. *
  177. * 0 indicates 'dmac' can not handle 'ch' either
  178. * because it is not supported by the hardware or
  179. * because all dmac channels are currently busy.
  180. *
  181. * >0 vlaue indicates 'dmac' has the capability.
  182. * The bigger the value the more suitable the dmac.
  183. */
  184. #define MAX_SUIT UINT_MAX
  185. #define MIN_SUIT 0
  186. static unsigned suitablility(struct s3c_pl330_dmac *dmac,
  187. struct s3c_pl330_chan *ch)
  188. {
  189. struct pl330_info *pi = dmac->pi;
  190. enum dma_ch *id = dmac->peri;
  191. struct s3c_pl330_dmac *d;
  192. unsigned s;
  193. int i;
  194. s = MIN_SUIT;
  195. /* If all the DMAC channel threads are busy */
  196. if (dmac_busy(dmac))
  197. return s;
  198. for (i = 0; i < PL330_MAX_PERI; i++)
  199. if (id[i] == ch->id)
  200. break;
  201. /* If the 'dmac' can't talk to 'ch' */
  202. if (i == PL330_MAX_PERI)
  203. return s;
  204. s = MAX_SUIT;
  205. list_for_each_entry(d, &dmac_list, node) {
  206. /*
  207. * If some other dmac can talk to this
  208. * peri and has some channel free.
  209. */
  210. if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
  211. s = 0;
  212. break;
  213. }
  214. }
  215. if (s)
  216. return s;
  217. s = 100;
  218. /* Good if free chans are more, bad otherwise */
  219. s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
  220. return s;
  221. }
  222. /* More than one DMAC may have capability to transfer data with the
  223. * peripheral. This function assigns most suitable DMAC to manage the
  224. * channel and hence communicate with the peripheral.
  225. */
  226. static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
  227. {
  228. struct s3c_pl330_dmac *d, *dmac = NULL;
  229. unsigned sn, sl = MIN_SUIT;
  230. list_for_each_entry(d, &dmac_list, node) {
  231. sn = suitablility(d, ch);
  232. if (sn == MAX_SUIT)
  233. return d;
  234. if (sn > sl)
  235. dmac = d;
  236. }
  237. return dmac;
  238. }
  239. /* Acquire the channel for peripheral 'id' */
  240. static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
  241. {
  242. struct s3c_pl330_chan *ch = id_to_chan(id);
  243. struct s3c_pl330_dmac *dmac;
  244. /* If the channel doesn't exist or is already acquired */
  245. if (!ch || !chan_free(ch)) {
  246. ch = NULL;
  247. goto acq_exit;
  248. }
  249. dmac = map_chan_to_dmac(ch);
  250. /* If couldn't map */
  251. if (!dmac) {
  252. ch = NULL;
  253. goto acq_exit;
  254. }
  255. dmac->busy_chan++;
  256. ch->dmac = dmac;
  257. acq_exit:
  258. return ch;
  259. }
  260. /* Delete xfer from the queue */
  261. static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
  262. {
  263. struct s3c_pl330_xfer *t;
  264. struct s3c_pl330_chan *ch;
  265. int found;
  266. if (!xfer)
  267. return;
  268. ch = xfer->chan;
  269. /* Make sure xfer is in the queue */
  270. found = 0;
  271. list_for_each_entry(t, &ch->xfer_list, node)
  272. if (t == xfer) {
  273. found = 1;
  274. break;
  275. }
  276. if (!found)
  277. return;
  278. /* If xfer is last entry in the queue */
  279. if (xfer->node.next == &ch->xfer_list)
  280. t = list_entry(ch->xfer_list.next,
  281. struct s3c_pl330_xfer, node);
  282. else
  283. t = list_entry(xfer->node.next,
  284. struct s3c_pl330_xfer, node);
  285. /* If there was only one node left */
  286. if (t == xfer)
  287. ch->xfer_head = NULL;
  288. else if (ch->xfer_head == xfer)
  289. ch->xfer_head = t;
  290. list_del(&xfer->node);
  291. }
  292. /* Provides pointer to the next xfer in the queue.
  293. * If CIRCULAR option is set, the list is left intact,
  294. * otherwise the xfer is removed from the list.
  295. * Forced delete 'pluck' can be set to override the CIRCULAR option.
  296. */
  297. static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
  298. int pluck)
  299. {
  300. struct s3c_pl330_xfer *xfer = ch->xfer_head;
  301. if (!xfer)
  302. return NULL;
  303. /* If xfer is last entry in the queue */
  304. if (xfer->node.next == &ch->xfer_list)
  305. ch->xfer_head = list_entry(ch->xfer_list.next,
  306. struct s3c_pl330_xfer, node);
  307. else
  308. ch->xfer_head = list_entry(xfer->node.next,
  309. struct s3c_pl330_xfer, node);
  310. if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
  311. del_from_queue(xfer);
  312. return xfer;
  313. }
  314. static inline void add_to_queue(struct s3c_pl330_chan *ch,
  315. struct s3c_pl330_xfer *xfer, int front)
  316. {
  317. struct pl330_xfer *xt;
  318. /* If queue empty */
  319. if (ch->xfer_head == NULL)
  320. ch->xfer_head = xfer;
  321. xt = &ch->xfer_head->px;
  322. /* If the head already submitted (CIRCULAR head) */
  323. if (ch->options & S3C2410_DMAF_CIRCULAR &&
  324. (xt == ch->req[0].x || xt == ch->req[1].x))
  325. ch->xfer_head = xfer;
  326. /* If this is a resubmission, it should go at the head */
  327. if (front) {
  328. ch->xfer_head = xfer;
  329. list_add(&xfer->node, &ch->xfer_list);
  330. } else {
  331. list_add_tail(&xfer->node, &ch->xfer_list);
  332. }
  333. }
  334. static inline void _finish_off(struct s3c_pl330_xfer *xfer,
  335. enum s3c2410_dma_buffresult res, int ffree)
  336. {
  337. struct s3c_pl330_chan *ch;
  338. if (!xfer)
  339. return;
  340. ch = xfer->chan;
  341. /* Do callback */
  342. if (ch->callback_fn)
  343. ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
  344. /* Force Free or if buffer is not needed anymore */
  345. if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
  346. kmem_cache_free(ch->dmac->kmcache, xfer);
  347. }
  348. static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
  349. struct pl330_req *r)
  350. {
  351. struct s3c_pl330_xfer *xfer;
  352. int ret = 0;
  353. /* If already submitted */
  354. if (r->x)
  355. return 0;
  356. xfer = get_from_queue(ch, 0);
  357. if (xfer) {
  358. r->x = &xfer->px;
  359. /* Use max bandwidth for M<->M xfers */
  360. if (r->rqtype == MEMTOMEM) {
  361. struct pl330_info *pi = xfer->chan->dmac->pi;
  362. int burst = 1 << ch->rqcfg.brst_size;
  363. u32 bytes = r->x->bytes;
  364. int bl;
  365. bl = pi->pcfg.data_bus_width / 8;
  366. bl *= pi->pcfg.data_buf_dep;
  367. bl /= burst;
  368. /* src/dst_burst_len can't be more than 16 */
  369. if (bl > 16)
  370. bl = 16;
  371. while (bl > 1) {
  372. if (!(bytes % (bl * burst)))
  373. break;
  374. bl--;
  375. }
  376. ch->rqcfg.brst_len = bl;
  377. } else {
  378. ch->rqcfg.brst_len = 1;
  379. }
  380. ret = pl330_submit_req(ch->pl330_chan_id, r);
  381. /* If submission was successful */
  382. if (!ret) {
  383. ch->lrq = r; /* latest submitted req */
  384. return 0;
  385. }
  386. r->x = NULL;
  387. /* If both of the PL330 ping-pong buffers filled */
  388. if (ret == -EAGAIN) {
  389. dev_err(ch->dmac->pi->dev, "%s:%d!\n",
  390. __func__, __LINE__);
  391. /* Queue back again */
  392. add_to_queue(ch, xfer, 1);
  393. ret = 0;
  394. } else {
  395. dev_err(ch->dmac->pi->dev, "%s:%d!\n",
  396. __func__, __LINE__);
  397. _finish_off(xfer, S3C2410_RES_ERR, 0);
  398. }
  399. }
  400. return ret;
  401. }
  402. static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
  403. struct pl330_req *r, enum pl330_op_err err)
  404. {
  405. unsigned long flags;
  406. struct s3c_pl330_xfer *xfer;
  407. struct pl330_xfer *xl = r->x;
  408. enum s3c2410_dma_buffresult res;
  409. spin_lock_irqsave(&res_lock, flags);
  410. r->x = NULL;
  411. s3c_pl330_submit(ch, r);
  412. spin_unlock_irqrestore(&res_lock, flags);
  413. /* Map result to S3C DMA API */
  414. if (err == PL330_ERR_NONE)
  415. res = S3C2410_RES_OK;
  416. else if (err == PL330_ERR_ABORT)
  417. res = S3C2410_RES_ABORT;
  418. else
  419. res = S3C2410_RES_ERR;
  420. /* If last request had some xfer */
  421. if (xl) {
  422. xfer = container_of(xl, struct s3c_pl330_xfer, px);
  423. _finish_off(xfer, res, 0);
  424. } else {
  425. dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
  426. __func__, __LINE__);
  427. }
  428. }
  429. static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
  430. {
  431. struct pl330_req *r = token;
  432. struct s3c_pl330_chan *ch = container_of(r,
  433. struct s3c_pl330_chan, req[0]);
  434. s3c_pl330_rq(ch, r, err);
  435. }
  436. static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
  437. {
  438. struct pl330_req *r = token;
  439. struct s3c_pl330_chan *ch = container_of(r,
  440. struct s3c_pl330_chan, req[1]);
  441. s3c_pl330_rq(ch, r, err);
  442. }
  443. /* Release an acquired channel */
  444. static void chan_release(struct s3c_pl330_chan *ch)
  445. {
  446. struct s3c_pl330_dmac *dmac;
  447. if (chan_free(ch))
  448. return;
  449. dmac = ch->dmac;
  450. ch->dmac = NULL;
  451. dmac->busy_chan--;
  452. }
  453. int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
  454. {
  455. struct s3c_pl330_xfer *xfer;
  456. enum pl330_chan_op pl330op;
  457. struct s3c_pl330_chan *ch;
  458. unsigned long flags;
  459. int idx, ret;
  460. spin_lock_irqsave(&res_lock, flags);
  461. ch = id_to_chan(id);
  462. if (!ch || chan_free(ch)) {
  463. ret = -EINVAL;
  464. goto ctrl_exit;
  465. }
  466. switch (op) {
  467. case S3C2410_DMAOP_START:
  468. /* Make sure both reqs are enqueued */
  469. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  470. s3c_pl330_submit(ch, &ch->req[idx]);
  471. s3c_pl330_submit(ch, &ch->req[1 - idx]);
  472. pl330op = PL330_OP_START;
  473. break;
  474. case S3C2410_DMAOP_STOP:
  475. pl330op = PL330_OP_ABORT;
  476. break;
  477. case S3C2410_DMAOP_FLUSH:
  478. pl330op = PL330_OP_FLUSH;
  479. break;
  480. case S3C2410_DMAOP_PAUSE:
  481. case S3C2410_DMAOP_RESUME:
  482. case S3C2410_DMAOP_TIMEOUT:
  483. case S3C2410_DMAOP_STARTED:
  484. spin_unlock_irqrestore(&res_lock, flags);
  485. return 0;
  486. default:
  487. spin_unlock_irqrestore(&res_lock, flags);
  488. return -EINVAL;
  489. }
  490. ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
  491. if (pl330op == PL330_OP_START) {
  492. spin_unlock_irqrestore(&res_lock, flags);
  493. return ret;
  494. }
  495. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  496. /* Abort the current xfer */
  497. if (ch->req[idx].x) {
  498. xfer = container_of(ch->req[idx].x,
  499. struct s3c_pl330_xfer, px);
  500. /* Drop xfer during FLUSH */
  501. if (pl330op == PL330_OP_FLUSH)
  502. del_from_queue(xfer);
  503. ch->req[idx].x = NULL;
  504. spin_unlock_irqrestore(&res_lock, flags);
  505. _finish_off(xfer, S3C2410_RES_ABORT,
  506. pl330op == PL330_OP_FLUSH ? 1 : 0);
  507. spin_lock_irqsave(&res_lock, flags);
  508. }
  509. /* Flush the whole queue */
  510. if (pl330op == PL330_OP_FLUSH) {
  511. if (ch->req[1 - idx].x) {
  512. xfer = container_of(ch->req[1 - idx].x,
  513. struct s3c_pl330_xfer, px);
  514. del_from_queue(xfer);
  515. ch->req[1 - idx].x = NULL;
  516. spin_unlock_irqrestore(&res_lock, flags);
  517. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  518. spin_lock_irqsave(&res_lock, flags);
  519. }
  520. /* Finish off the remaining in the queue */
  521. xfer = ch->xfer_head;
  522. while (xfer) {
  523. del_from_queue(xfer);
  524. spin_unlock_irqrestore(&res_lock, flags);
  525. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  526. spin_lock_irqsave(&res_lock, flags);
  527. xfer = ch->xfer_head;
  528. }
  529. }
  530. ctrl_exit:
  531. spin_unlock_irqrestore(&res_lock, flags);
  532. return ret;
  533. }
  534. EXPORT_SYMBOL(s3c2410_dma_ctrl);
  535. int s3c2410_dma_enqueue(enum dma_ch id, void *token,
  536. dma_addr_t addr, int size)
  537. {
  538. struct s3c_pl330_chan *ch;
  539. struct s3c_pl330_xfer *xfer;
  540. unsigned long flags;
  541. int idx, ret = 0;
  542. spin_lock_irqsave(&res_lock, flags);
  543. ch = id_to_chan(id);
  544. /* Error if invalid or free channel */
  545. if (!ch || chan_free(ch)) {
  546. ret = -EINVAL;
  547. goto enq_exit;
  548. }
  549. /* Error if size is unaligned */
  550. if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
  551. ret = -EINVAL;
  552. goto enq_exit;
  553. }
  554. xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
  555. if (!xfer) {
  556. ret = -ENOMEM;
  557. goto enq_exit;
  558. }
  559. xfer->token = token;
  560. xfer->chan = ch;
  561. xfer->px.bytes = size;
  562. xfer->px.next = NULL; /* Single request */
  563. /* For S3C DMA API, direction is always fixed for all xfers */
  564. if (ch->req[0].rqtype == MEMTODEV) {
  565. xfer->px.src_addr = addr;
  566. xfer->px.dst_addr = ch->sdaddr;
  567. } else {
  568. xfer->px.src_addr = ch->sdaddr;
  569. xfer->px.dst_addr = addr;
  570. }
  571. add_to_queue(ch, xfer, 0);
  572. /* Try submitting on either request */
  573. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  574. if (!ch->req[idx].x)
  575. s3c_pl330_submit(ch, &ch->req[idx]);
  576. else
  577. s3c_pl330_submit(ch, &ch->req[1 - idx]);
  578. spin_unlock_irqrestore(&res_lock, flags);
  579. if (ch->options & S3C2410_DMAF_AUTOSTART)
  580. s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
  581. return 0;
  582. enq_exit:
  583. spin_unlock_irqrestore(&res_lock, flags);
  584. return ret;
  585. }
  586. EXPORT_SYMBOL(s3c2410_dma_enqueue);
  587. int s3c2410_dma_request(enum dma_ch id,
  588. struct s3c2410_dma_client *client,
  589. void *dev)
  590. {
  591. struct s3c_pl330_dmac *dmac;
  592. struct s3c_pl330_chan *ch;
  593. unsigned long flags;
  594. int ret = 0;
  595. spin_lock_irqsave(&res_lock, flags);
  596. ch = chan_acquire(id);
  597. if (!ch) {
  598. ret = -EBUSY;
  599. goto req_exit;
  600. }
  601. dmac = ch->dmac;
  602. ch->pl330_chan_id = pl330_request_channel(dmac->pi);
  603. if (!ch->pl330_chan_id) {
  604. chan_release(ch);
  605. ret = -EBUSY;
  606. goto req_exit;
  607. }
  608. ch->client = client;
  609. ch->options = 0; /* Clear any option */
  610. ch->callback_fn = NULL; /* Clear any callback */
  611. ch->lrq = NULL;
  612. ch->rqcfg.brst_size = 2; /* Default word size */
  613. ch->rqcfg.swap = SWAP_NO;
  614. ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
  615. ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
  616. ch->rqcfg.privileged = 0;
  617. ch->rqcfg.insnaccess = 0;
  618. /* Set invalid direction */
  619. ch->req[0].rqtype = DEVTODEV;
  620. ch->req[1].rqtype = ch->req[0].rqtype;
  621. ch->req[0].cfg = &ch->rqcfg;
  622. ch->req[1].cfg = ch->req[0].cfg;
  623. ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
  624. ch->req[1].peri = ch->req[0].peri;
  625. ch->req[0].token = &ch->req[0];
  626. ch->req[0].xfer_cb = s3c_pl330_rq0;
  627. ch->req[1].token = &ch->req[1];
  628. ch->req[1].xfer_cb = s3c_pl330_rq1;
  629. ch->req[0].x = NULL;
  630. ch->req[1].x = NULL;
  631. /* Reset xfer list */
  632. INIT_LIST_HEAD(&ch->xfer_list);
  633. ch->xfer_head = NULL;
  634. req_exit:
  635. spin_unlock_irqrestore(&res_lock, flags);
  636. return ret;
  637. }
  638. EXPORT_SYMBOL(s3c2410_dma_request);
  639. int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
  640. {
  641. struct s3c_pl330_chan *ch;
  642. struct s3c_pl330_xfer *xfer;
  643. unsigned long flags;
  644. int ret = 0;
  645. unsigned idx;
  646. spin_lock_irqsave(&res_lock, flags);
  647. ch = id_to_chan(id);
  648. if (!ch || chan_free(ch))
  649. goto free_exit;
  650. /* Refuse if someone else wanted to free the channel */
  651. if (ch->client != client) {
  652. ret = -EBUSY;
  653. goto free_exit;
  654. }
  655. /* Stop any active xfer, Flushe the queue and do callbacks */
  656. pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
  657. /* Abort the submitted requests */
  658. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  659. if (ch->req[idx].x) {
  660. xfer = container_of(ch->req[idx].x,
  661. struct s3c_pl330_xfer, px);
  662. ch->req[idx].x = NULL;
  663. del_from_queue(xfer);
  664. spin_unlock_irqrestore(&res_lock, flags);
  665. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  666. spin_lock_irqsave(&res_lock, flags);
  667. }
  668. if (ch->req[1 - idx].x) {
  669. xfer = container_of(ch->req[1 - idx].x,
  670. struct s3c_pl330_xfer, px);
  671. ch->req[1 - idx].x = NULL;
  672. del_from_queue(xfer);
  673. spin_unlock_irqrestore(&res_lock, flags);
  674. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  675. spin_lock_irqsave(&res_lock, flags);
  676. }
  677. /* Pluck and Abort the queued requests in order */
  678. do {
  679. xfer = get_from_queue(ch, 1);
  680. spin_unlock_irqrestore(&res_lock, flags);
  681. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  682. spin_lock_irqsave(&res_lock, flags);
  683. } while (xfer);
  684. ch->client = NULL;
  685. pl330_release_channel(ch->pl330_chan_id);
  686. ch->pl330_chan_id = NULL;
  687. chan_release(ch);
  688. free_exit:
  689. spin_unlock_irqrestore(&res_lock, flags);
  690. return ret;
  691. }
  692. EXPORT_SYMBOL(s3c2410_dma_free);
  693. int s3c2410_dma_config(enum dma_ch id, int xferunit)
  694. {
  695. struct s3c_pl330_chan *ch;
  696. struct pl330_info *pi;
  697. unsigned long flags;
  698. int i, dbwidth, ret = 0;
  699. spin_lock_irqsave(&res_lock, flags);
  700. ch = id_to_chan(id);
  701. if (!ch || chan_free(ch)) {
  702. ret = -EINVAL;
  703. goto cfg_exit;
  704. }
  705. pi = ch->dmac->pi;
  706. dbwidth = pi->pcfg.data_bus_width / 8;
  707. /* Max size of xfer can be pcfg.data_bus_width */
  708. if (xferunit > dbwidth) {
  709. ret = -EINVAL;
  710. goto cfg_exit;
  711. }
  712. i = 0;
  713. while (xferunit != (1 << i))
  714. i++;
  715. /* If valid value */
  716. if (xferunit == (1 << i))
  717. ch->rqcfg.brst_size = i;
  718. else
  719. ret = -EINVAL;
  720. cfg_exit:
  721. spin_unlock_irqrestore(&res_lock, flags);
  722. return ret;
  723. }
  724. EXPORT_SYMBOL(s3c2410_dma_config);
  725. /* Options that are supported by this driver */
  726. #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
  727. int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
  728. {
  729. struct s3c_pl330_chan *ch;
  730. unsigned long flags;
  731. int ret = 0;
  732. spin_lock_irqsave(&res_lock, flags);
  733. ch = id_to_chan(id);
  734. if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
  735. ret = -EINVAL;
  736. else
  737. ch->options = options;
  738. spin_unlock_irqrestore(&res_lock, flags);
  739. return 0;
  740. }
  741. EXPORT_SYMBOL(s3c2410_dma_setflags);
  742. int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
  743. {
  744. struct s3c_pl330_chan *ch;
  745. unsigned long flags;
  746. int ret = 0;
  747. spin_lock_irqsave(&res_lock, flags);
  748. ch = id_to_chan(id);
  749. if (!ch || chan_free(ch))
  750. ret = -EINVAL;
  751. else
  752. ch->callback_fn = rtn;
  753. spin_unlock_irqrestore(&res_lock, flags);
  754. return ret;
  755. }
  756. EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
  757. int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
  758. unsigned long address)
  759. {
  760. struct s3c_pl330_chan *ch;
  761. unsigned long flags;
  762. int ret = 0;
  763. spin_lock_irqsave(&res_lock, flags);
  764. ch = id_to_chan(id);
  765. if (!ch || chan_free(ch)) {
  766. ret = -EINVAL;
  767. goto devcfg_exit;
  768. }
  769. switch (source) {
  770. case S3C2410_DMASRC_HW: /* P->M */
  771. ch->req[0].rqtype = DEVTOMEM;
  772. ch->req[1].rqtype = DEVTOMEM;
  773. ch->rqcfg.src_inc = 0;
  774. ch->rqcfg.dst_inc = 1;
  775. break;
  776. case S3C2410_DMASRC_MEM: /* M->P */
  777. ch->req[0].rqtype = MEMTODEV;
  778. ch->req[1].rqtype = MEMTODEV;
  779. ch->rqcfg.src_inc = 1;
  780. ch->rqcfg.dst_inc = 0;
  781. break;
  782. default:
  783. ret = -EINVAL;
  784. goto devcfg_exit;
  785. }
  786. ch->sdaddr = address;
  787. devcfg_exit:
  788. spin_unlock_irqrestore(&res_lock, flags);
  789. return ret;
  790. }
  791. EXPORT_SYMBOL(s3c2410_dma_devconfig);
  792. int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
  793. {
  794. struct s3c_pl330_chan *ch = id_to_chan(id);
  795. struct pl330_chanstatus status;
  796. int ret;
  797. if (!ch || chan_free(ch))
  798. return -EINVAL;
  799. ret = pl330_chan_status(ch->pl330_chan_id, &status);
  800. if (ret < 0)
  801. return ret;
  802. *src = status.src_addr;
  803. *dst = status.dst_addr;
  804. return 0;
  805. }
  806. EXPORT_SYMBOL(s3c2410_dma_getposition);
  807. static irqreturn_t pl330_irq_handler(int irq, void *data)
  808. {
  809. if (pl330_update(data))
  810. return IRQ_HANDLED;
  811. else
  812. return IRQ_NONE;
  813. }
  814. static int pl330_probe(struct platform_device *pdev)
  815. {
  816. struct s3c_pl330_dmac *s3c_pl330_dmac;
  817. struct s3c_pl330_platdata *pl330pd;
  818. struct pl330_info *pl330_info;
  819. struct resource *res;
  820. int i, ret, irq;
  821. pl330pd = pdev->dev.platform_data;
  822. /* Can't do without the list of _32_ peripherals */
  823. if (!pl330pd || !pl330pd->peri) {
  824. dev_err(&pdev->dev, "platform data missing!\n");
  825. return -ENODEV;
  826. }
  827. pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
  828. if (!pl330_info)
  829. return -ENOMEM;
  830. pl330_info->pl330_data = NULL;
  831. pl330_info->dev = &pdev->dev;
  832. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  833. if (!res) {
  834. ret = -ENODEV;
  835. goto probe_err1;
  836. }
  837. request_mem_region(res->start, resource_size(res), pdev->name);
  838. pl330_info->base = ioremap(res->start, resource_size(res));
  839. if (!pl330_info->base) {
  840. ret = -ENXIO;
  841. goto probe_err2;
  842. }
  843. irq = platform_get_irq(pdev, 0);
  844. if (irq < 0) {
  845. ret = irq;
  846. goto probe_err3;
  847. }
  848. ret = request_irq(irq, pl330_irq_handler, 0,
  849. dev_name(&pdev->dev), pl330_info);
  850. if (ret)
  851. goto probe_err4;
  852. ret = pl330_add(pl330_info);
  853. if (ret)
  854. goto probe_err5;
  855. /* Allocate a new DMAC */
  856. s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
  857. if (!s3c_pl330_dmac) {
  858. ret = -ENOMEM;
  859. goto probe_err6;
  860. }
  861. /* Hook the info */
  862. s3c_pl330_dmac->pi = pl330_info;
  863. /* No busy channels */
  864. s3c_pl330_dmac->busy_chan = 0;
  865. s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
  866. sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
  867. if (!s3c_pl330_dmac->kmcache) {
  868. ret = -ENOMEM;
  869. goto probe_err7;
  870. }
  871. /* Get the list of peripherals */
  872. s3c_pl330_dmac->peri = pl330pd->peri;
  873. /* Attach to the list of DMACs */
  874. list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
  875. /* Create a channel for each peripheral in the DMAC
  876. * that is, if it doesn't already exist
  877. */
  878. for (i = 0; i < PL330_MAX_PERI; i++)
  879. if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
  880. chan_add(s3c_pl330_dmac->peri[i]);
  881. printk(KERN_INFO
  882. "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
  883. printk(KERN_INFO
  884. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  885. pl330_info->pcfg.data_buf_dep,
  886. pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
  887. pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
  888. return 0;
  889. probe_err7:
  890. kfree(s3c_pl330_dmac);
  891. probe_err6:
  892. pl330_del(pl330_info);
  893. probe_err5:
  894. free_irq(irq, pl330_info);
  895. probe_err4:
  896. probe_err3:
  897. iounmap(pl330_info->base);
  898. probe_err2:
  899. release_mem_region(res->start, resource_size(res));
  900. probe_err1:
  901. kfree(pl330_info);
  902. return ret;
  903. }
  904. static int pl330_remove(struct platform_device *pdev)
  905. {
  906. struct s3c_pl330_dmac *dmac, *d;
  907. struct s3c_pl330_chan *ch;
  908. unsigned long flags;
  909. int del, found;
  910. if (!pdev->dev.platform_data)
  911. return -EINVAL;
  912. spin_lock_irqsave(&res_lock, flags);
  913. found = 0;
  914. list_for_each_entry(d, &dmac_list, node)
  915. if (d->pi->dev == &pdev->dev) {
  916. found = 1;
  917. break;
  918. }
  919. if (!found) {
  920. spin_unlock_irqrestore(&res_lock, flags);
  921. return 0;
  922. }
  923. dmac = d;
  924. /* Remove all Channels that are managed only by this DMAC */
  925. list_for_each_entry(ch, &chan_list, node) {
  926. /* Only channels that are handled by this DMAC */
  927. if (iface_of_dmac(dmac, ch->id))
  928. del = 1;
  929. else
  930. continue;
  931. /* Don't remove if some other DMAC has it too */
  932. list_for_each_entry(d, &dmac_list, node)
  933. if (d != dmac && iface_of_dmac(d, ch->id)) {
  934. del = 0;
  935. break;
  936. }
  937. if (del) {
  938. spin_unlock_irqrestore(&res_lock, flags);
  939. s3c2410_dma_free(ch->id, ch->client);
  940. spin_lock_irqsave(&res_lock, flags);
  941. list_del(&ch->node);
  942. kfree(ch);
  943. }
  944. }
  945. /* Remove the DMAC */
  946. list_del(&dmac->node);
  947. kfree(dmac);
  948. spin_unlock_irqrestore(&res_lock, flags);
  949. return 0;
  950. }
  951. static struct platform_driver pl330_driver = {
  952. .driver = {
  953. .owner = THIS_MODULE,
  954. .name = "s3c-pl330",
  955. },
  956. .probe = pl330_probe,
  957. .remove = pl330_remove,
  958. };
  959. static int __init pl330_init(void)
  960. {
  961. return platform_driver_register(&pl330_driver);
  962. }
  963. module_init(pl330_init);
  964. static void __exit pl330_exit(void)
  965. {
  966. platform_driver_unregister(&pl330_driver);
  967. return;
  968. }
  969. module_exit(pl330_exit);
  970. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  971. MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
  972. MODULE_LICENSE("GPL");