amba-pl08x.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101
  1. /*
  2. * Copyright (c) 2006 ARM Ltd.
  3. * Copyright (c) 2010 ST-Ericsson SA
  4. *
  5. * Author: Peter Pearse <peter.pearse@arm.com>
  6. * Author: Linus Walleij <linus.walleij@stericsson.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; if not, write to the Free Software Foundation, Inc., 59
  20. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. *
  22. * The full GNU General Public License is in this distribution in the file
  23. * called COPYING.
  24. *
  25. * Documentation: ARM DDI 0196G == PL080
  26. * Documentation: ARM DDI 0218E == PL081
  27. *
  28. * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
  29. * channel.
  30. *
  31. * The PL080 has 8 channels available for simultaneous use, and the PL081
  32. * has only two channels. So on these DMA controllers the number of channels
  33. * and the number of incoming DMA signals are two totally different things.
  34. * It is usually not possible to theoretically handle all physical signals,
  35. * so a multiplexing scheme with possible denial of use is necessary.
  36. *
  37. * The PL080 has a dual bus master, PL081 has a single master.
  38. *
  39. * Memory to peripheral transfer may be visualized as
  40. * Get data from memory to DMAC
  41. * Until no data left
  42. * On burst request from peripheral
  43. * Destination burst from DMAC to peripheral
  44. * Clear burst request
  45. * Raise terminal count interrupt
  46. *
  47. * For peripherals with a FIFO:
  48. * Source burst size == half the depth of the peripheral FIFO
  49. * Destination burst size == the depth of the peripheral FIFO
  50. *
  51. * (Bursts are irrelevant for mem to mem transfers - there are no burst
  52. * signals, the DMA controller will simply facilitate its AHB master.)
  53. *
  54. * ASSUMES default (little) endianness for DMA transfers
  55. *
  56. * The PL08x has two flow control settings:
  57. * - DMAC flow control: the transfer size defines the number of transfers
  58. * which occur for the current LLI entry, and the DMAC raises TC at the
  59. * end of every LLI entry. Observed behaviour shows the DMAC listening
  60. * to both the BREQ and SREQ signals (contrary to documented),
  61. * transferring data if either is active. The LBREQ and LSREQ signals
  62. * are ignored.
  63. *
  64. * - Peripheral flow control: the transfer size is ignored (and should be
  65. * zero). The data is transferred from the current LLI entry, until
  66. * after the final transfer signalled by LBREQ or LSREQ. The DMAC
  67. * will then move to the next LLI entry.
  68. *
  69. * Global TODO:
  70. * - Break out common code from arch/arm/mach-s3c64xx and share
  71. */
  72. #include <linux/amba/bus.h>
  73. #include <linux/amba/pl08x.h>
  74. #include <linux/debugfs.h>
  75. #include <linux/delay.h>
  76. #include <linux/device.h>
  77. #include <linux/dmaengine.h>
  78. #include <linux/dmapool.h>
  79. #include <linux/dma-mapping.h>
  80. #include <linux/init.h>
  81. #include <linux/interrupt.h>
  82. #include <linux/module.h>
  83. #include <linux/pm_runtime.h>
  84. #include <linux/seq_file.h>
  85. #include <linux/slab.h>
  86. #include <linux/amba/pl080.h>
  87. #include "dmaengine.h"
  88. #include "virt-dma.h"
  89. #define DRIVER_NAME "pl08xdmac"
  90. static struct amba_driver pl08x_amba_driver;
  91. struct pl08x_driver_data;
  92. /**
  93. * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  94. * @channels: the number of channels available in this variant
  95. * @dualmaster: whether this version supports dual AHB masters or not.
  96. * @nomadik: whether the channels have Nomadik security extension bits
  97. * that need to be checked for permission before use and some registers are
  98. * missing
  99. */
  100. struct vendor_data {
  101. u8 channels;
  102. bool dualmaster;
  103. bool nomadik;
  104. };
  105. /*
  106. * PL08X private data structures
  107. * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
  108. * start & end do not - their bus bit info is in cctl. Also note that these
  109. * are fixed 32-bit quantities.
  110. */
  111. struct pl08x_lli {
  112. u32 src;
  113. u32 dst;
  114. u32 lli;
  115. u32 cctl;
  116. };
  117. /**
  118. * struct pl08x_bus_data - information of source or destination
  119. * busses for a transfer
  120. * @addr: current address
  121. * @maxwidth: the maximum width of a transfer on this bus
  122. * @buswidth: the width of this bus in bytes: 1, 2 or 4
  123. */
  124. struct pl08x_bus_data {
  125. dma_addr_t addr;
  126. u8 maxwidth;
  127. u8 buswidth;
  128. };
  129. #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
  130. /**
  131. * struct pl08x_phy_chan - holder for the physical channels
  132. * @id: physical index to this channel
  133. * @lock: a lock to use when altering an instance of this struct
  134. * @serving: the virtual channel currently being served by this physical
  135. * channel
  136. * @locked: channel unavailable for the system, e.g. dedicated to secure
  137. * world
  138. */
  139. struct pl08x_phy_chan {
  140. unsigned int id;
  141. void __iomem *base;
  142. spinlock_t lock;
  143. struct pl08x_dma_chan *serving;
  144. bool locked;
  145. };
  146. /**
  147. * struct pl08x_sg - structure containing data per sg
  148. * @src_addr: src address of sg
  149. * @dst_addr: dst address of sg
  150. * @len: transfer len in bytes
  151. * @node: node for txd's dsg_list
  152. */
  153. struct pl08x_sg {
  154. dma_addr_t src_addr;
  155. dma_addr_t dst_addr;
  156. size_t len;
  157. struct list_head node;
  158. };
  159. /**
  160. * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
  161. * @vd: virtual DMA descriptor
  162. * @dsg_list: list of children sg's
  163. * @llis_bus: DMA memory address (physical) start for the LLIs
  164. * @llis_va: virtual memory address start for the LLIs
  165. * @cctl: control reg values for current txd
  166. * @ccfg: config reg values for current txd
  167. * @done: this marks completed descriptors, which should not have their
  168. * mux released.
  169. */
  170. struct pl08x_txd {
  171. struct virt_dma_desc vd;
  172. struct list_head dsg_list;
  173. dma_addr_t llis_bus;
  174. struct pl08x_lli *llis_va;
  175. /* Default cctl value for LLIs */
  176. u32 cctl;
  177. /*
  178. * Settings to be put into the physical channel when we
  179. * trigger this txd. Other registers are in llis_va[0].
  180. */
  181. u32 ccfg;
  182. bool done;
  183. };
  184. /**
  185. * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
  186. * states
  187. * @PL08X_CHAN_IDLE: the channel is idle
  188. * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
  189. * channel and is running a transfer on it
  190. * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
  191. * channel, but the transfer is currently paused
  192. * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
  193. * channel to become available (only pertains to memcpy channels)
  194. */
  195. enum pl08x_dma_chan_state {
  196. PL08X_CHAN_IDLE,
  197. PL08X_CHAN_RUNNING,
  198. PL08X_CHAN_PAUSED,
  199. PL08X_CHAN_WAITING,
  200. };
  201. /**
  202. * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  203. * @vc: wrappped virtual channel
  204. * @phychan: the physical channel utilized by this channel, if there is one
  205. * @name: name of channel
  206. * @cd: channel platform data
  207. * @runtime_addr: address for RX/TX according to the runtime config
  208. * @at: active transaction on this channel
  209. * @lock: a lock for this channel data
  210. * @host: a pointer to the host (internal use)
  211. * @state: whether the channel is idle, paused, running etc
  212. * @slave: whether this channel is a device (slave) or for memcpy
  213. * @signal: the physical DMA request signal which this channel is using
  214. * @mux_use: count of descriptors using this DMA request signal setting
  215. */
  216. struct pl08x_dma_chan {
  217. struct virt_dma_chan vc;
  218. struct pl08x_phy_chan *phychan;
  219. const char *name;
  220. const struct pl08x_channel_data *cd;
  221. struct dma_slave_config cfg;
  222. struct pl08x_txd *at;
  223. struct pl08x_driver_data *host;
  224. enum pl08x_dma_chan_state state;
  225. bool slave;
  226. int signal;
  227. unsigned mux_use;
  228. };
  229. /**
  230. * struct pl08x_driver_data - the local state holder for the PL08x
  231. * @slave: slave engine for this instance
  232. * @memcpy: memcpy engine for this instance
  233. * @base: virtual memory base (remapped) for the PL08x
  234. * @adev: the corresponding AMBA (PrimeCell) bus entry
  235. * @vd: vendor data for this PL08x variant
  236. * @pd: platform data passed in from the platform/machine
  237. * @phy_chans: array of data for the physical channels
  238. * @pool: a pool for the LLI descriptors
  239. * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
  240. * fetches
  241. * @mem_buses: set to indicate memory transfers on AHB2.
  242. * @lock: a spinlock for this struct
  243. */
  244. struct pl08x_driver_data {
  245. struct dma_device slave;
  246. struct dma_device memcpy;
  247. void __iomem *base;
  248. struct amba_device *adev;
  249. const struct vendor_data *vd;
  250. struct pl08x_platform_data *pd;
  251. struct pl08x_phy_chan *phy_chans;
  252. struct dma_pool *pool;
  253. u8 lli_buses;
  254. u8 mem_buses;
  255. };
  256. /*
  257. * PL08X specific defines
  258. */
  259. /* Size (bytes) of each LLI buffer allocated for one transfer */
  260. # define PL08X_LLI_TSFR_SIZE 0x2000
  261. /* Maximum times we call dma_pool_alloc on this pool without freeing */
  262. #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
  263. #define PL08X_ALIGN 8
  264. static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
  265. {
  266. return container_of(chan, struct pl08x_dma_chan, vc.chan);
  267. }
  268. static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
  269. {
  270. return container_of(tx, struct pl08x_txd, vd.tx);
  271. }
  272. /*
  273. * Mux handling.
  274. *
  275. * This gives us the DMA request input to the PL08x primecell which the
  276. * peripheral described by the channel data will be routed to, possibly
  277. * via a board/SoC specific external MUX. One important point to note
  278. * here is that this does not depend on the physical channel.
  279. */
  280. static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
  281. {
  282. const struct pl08x_platform_data *pd = plchan->host->pd;
  283. int ret;
  284. if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
  285. ret = pd->get_xfer_signal(plchan->cd);
  286. if (ret < 0) {
  287. plchan->mux_use = 0;
  288. return ret;
  289. }
  290. plchan->signal = ret;
  291. }
  292. return 0;
  293. }
  294. static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
  295. {
  296. const struct pl08x_platform_data *pd = plchan->host->pd;
  297. if (plchan->signal >= 0) {
  298. WARN_ON(plchan->mux_use == 0);
  299. if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
  300. pd->put_xfer_signal(plchan->cd, plchan->signal);
  301. plchan->signal = -1;
  302. }
  303. }
  304. }
  305. /*
  306. * Physical channel handling
  307. */
  308. /* Whether a certain channel is busy or not */
  309. static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
  310. {
  311. unsigned int val;
  312. val = readl(ch->base + PL080_CH_CONFIG);
  313. return val & PL080_CONFIG_ACTIVE;
  314. }
  315. /*
  316. * Set the initial DMA register values i.e. those for the first LLI
  317. * The next LLI pointer and the configuration interrupt bit have
  318. * been set when the LLIs were constructed. Poke them into the hardware
  319. * and start the transfer.
  320. */
  321. static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
  322. {
  323. struct pl08x_driver_data *pl08x = plchan->host;
  324. struct pl08x_phy_chan *phychan = plchan->phychan;
  325. struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
  326. struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
  327. struct pl08x_lli *lli;
  328. u32 val;
  329. list_del(&txd->vd.node);
  330. plchan->at = txd;
  331. /* Wait for channel inactive */
  332. while (pl08x_phy_channel_busy(phychan))
  333. cpu_relax();
  334. lli = &txd->llis_va[0];
  335. dev_vdbg(&pl08x->adev->dev,
  336. "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
  337. "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
  338. phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
  339. txd->ccfg);
  340. writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
  341. writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
  342. writel(lli->lli, phychan->base + PL080_CH_LLI);
  343. writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
  344. writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
  345. /* Enable the DMA channel */
  346. /* Do not access config register until channel shows as disabled */
  347. while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
  348. cpu_relax();
  349. /* Do not access config register until channel shows as inactive */
  350. val = readl(phychan->base + PL080_CH_CONFIG);
  351. while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
  352. val = readl(phychan->base + PL080_CH_CONFIG);
  353. writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
  354. }
  355. /*
  356. * Pause the channel by setting the HALT bit.
  357. *
  358. * For M->P transfers, pause the DMAC first and then stop the peripheral -
  359. * the FIFO can only drain if the peripheral is still requesting data.
  360. * (note: this can still timeout if the DMAC FIFO never drains of data.)
  361. *
  362. * For P->M transfers, disable the peripheral first to stop it filling
  363. * the DMAC FIFO, and then pause the DMAC.
  364. */
  365. static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
  366. {
  367. u32 val;
  368. int timeout;
  369. /* Set the HALT bit and wait for the FIFO to drain */
  370. val = readl(ch->base + PL080_CH_CONFIG);
  371. val |= PL080_CONFIG_HALT;
  372. writel(val, ch->base + PL080_CH_CONFIG);
  373. /* Wait for channel inactive */
  374. for (timeout = 1000; timeout; timeout--) {
  375. if (!pl08x_phy_channel_busy(ch))
  376. break;
  377. udelay(1);
  378. }
  379. if (pl08x_phy_channel_busy(ch))
  380. pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
  381. }
  382. static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
  383. {
  384. u32 val;
  385. /* Clear the HALT bit */
  386. val = readl(ch->base + PL080_CH_CONFIG);
  387. val &= ~PL080_CONFIG_HALT;
  388. writel(val, ch->base + PL080_CH_CONFIG);
  389. }
  390. /*
  391. * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
  392. * clears any pending interrupt status. This should not be used for
  393. * an on-going transfer, but as a method of shutting down a channel
  394. * (eg, when it's no longer used) or terminating a transfer.
  395. */
  396. static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
  397. struct pl08x_phy_chan *ch)
  398. {
  399. u32 val = readl(ch->base + PL080_CH_CONFIG);
  400. val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
  401. PL080_CONFIG_TC_IRQ_MASK);
  402. writel(val, ch->base + PL080_CH_CONFIG);
  403. writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
  404. writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
  405. }
  406. static inline u32 get_bytes_in_cctl(u32 cctl)
  407. {
  408. /* The source width defines the number of bytes */
  409. u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
  410. switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
  411. case PL080_WIDTH_8BIT:
  412. break;
  413. case PL080_WIDTH_16BIT:
  414. bytes *= 2;
  415. break;
  416. case PL080_WIDTH_32BIT:
  417. bytes *= 4;
  418. break;
  419. }
  420. return bytes;
  421. }
  422. /* The channel should be paused when calling this */
  423. static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
  424. {
  425. struct pl08x_phy_chan *ch;
  426. struct pl08x_txd *txd;
  427. size_t bytes = 0;
  428. ch = plchan->phychan;
  429. txd = plchan->at;
  430. /*
  431. * Follow the LLIs to get the number of remaining
  432. * bytes in the currently active transaction.
  433. */
  434. if (ch && txd) {
  435. u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
  436. /* First get the remaining bytes in the active transfer */
  437. bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
  438. if (clli) {
  439. struct pl08x_lli *llis_va = txd->llis_va;
  440. dma_addr_t llis_bus = txd->llis_bus;
  441. int index;
  442. BUG_ON(clli < llis_bus || clli >= llis_bus +
  443. sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
  444. /*
  445. * Locate the next LLI - as this is an array,
  446. * it's simple maths to find.
  447. */
  448. index = (clli - llis_bus) / sizeof(struct pl08x_lli);
  449. for (; index < MAX_NUM_TSFR_LLIS; index++) {
  450. bytes += get_bytes_in_cctl(llis_va[index].cctl);
  451. /*
  452. * A LLI pointer of 0 terminates the LLI list
  453. */
  454. if (!llis_va[index].lli)
  455. break;
  456. }
  457. }
  458. }
  459. return bytes;
  460. }
  461. /*
  462. * Allocate a physical channel for a virtual channel
  463. *
  464. * Try to locate a physical channel to be used for this transfer. If all
  465. * are taken return NULL and the requester will have to cope by using
  466. * some fallback PIO mode or retrying later.
  467. */
  468. static struct pl08x_phy_chan *
  469. pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
  470. struct pl08x_dma_chan *virt_chan)
  471. {
  472. struct pl08x_phy_chan *ch = NULL;
  473. unsigned long flags;
  474. int i;
  475. for (i = 0; i < pl08x->vd->channels; i++) {
  476. ch = &pl08x->phy_chans[i];
  477. spin_lock_irqsave(&ch->lock, flags);
  478. if (!ch->locked && !ch->serving) {
  479. ch->serving = virt_chan;
  480. spin_unlock_irqrestore(&ch->lock, flags);
  481. break;
  482. }
  483. spin_unlock_irqrestore(&ch->lock, flags);
  484. }
  485. if (i == pl08x->vd->channels) {
  486. /* No physical channel available, cope with it */
  487. return NULL;
  488. }
  489. return ch;
  490. }
  491. /* Mark the physical channel as free. Note, this write is atomic. */
  492. static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
  493. struct pl08x_phy_chan *ch)
  494. {
  495. ch->serving = NULL;
  496. }
  497. /*
  498. * Try to allocate a physical channel. When successful, assign it to
  499. * this virtual channel, and initiate the next descriptor. The
  500. * virtual channel lock must be held at this point.
  501. */
  502. static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
  503. {
  504. struct pl08x_driver_data *pl08x = plchan->host;
  505. struct pl08x_phy_chan *ch;
  506. ch = pl08x_get_phy_channel(pl08x, plchan);
  507. if (!ch) {
  508. dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
  509. plchan->state = PL08X_CHAN_WAITING;
  510. return;
  511. }
  512. dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
  513. ch->id, plchan->name);
  514. plchan->phychan = ch;
  515. plchan->state = PL08X_CHAN_RUNNING;
  516. pl08x_start_next_txd(plchan);
  517. }
  518. static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
  519. struct pl08x_dma_chan *plchan)
  520. {
  521. struct pl08x_driver_data *pl08x = plchan->host;
  522. dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
  523. ch->id, plchan->name);
  524. /*
  525. * We do this without taking the lock; we're really only concerned
  526. * about whether this pointer is NULL or not, and we're guaranteed
  527. * that this will only be called when it _already_ is non-NULL.
  528. */
  529. ch->serving = plchan;
  530. plchan->phychan = ch;
  531. plchan->state = PL08X_CHAN_RUNNING;
  532. pl08x_start_next_txd(plchan);
  533. }
  534. /*
  535. * Free a physical DMA channel, potentially reallocating it to another
  536. * virtual channel if we have any pending.
  537. */
  538. static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
  539. {
  540. struct pl08x_driver_data *pl08x = plchan->host;
  541. struct pl08x_dma_chan *p, *next;
  542. retry:
  543. next = NULL;
  544. /* Find a waiting virtual channel for the next transfer. */
  545. list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
  546. if (p->state == PL08X_CHAN_WAITING) {
  547. next = p;
  548. break;
  549. }
  550. if (!next) {
  551. list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
  552. if (p->state == PL08X_CHAN_WAITING) {
  553. next = p;
  554. break;
  555. }
  556. }
  557. /* Ensure that the physical channel is stopped */
  558. pl08x_terminate_phy_chan(pl08x, plchan->phychan);
  559. if (next) {
  560. bool success;
  561. /*
  562. * Eww. We know this isn't going to deadlock
  563. * but lockdep probably doesn't.
  564. */
  565. spin_lock(&next->vc.lock);
  566. /* Re-check the state now that we have the lock */
  567. success = next->state == PL08X_CHAN_WAITING;
  568. if (success)
  569. pl08x_phy_reassign_start(plchan->phychan, next);
  570. spin_unlock(&next->vc.lock);
  571. /* If the state changed, try to find another channel */
  572. if (!success)
  573. goto retry;
  574. } else {
  575. /* No more jobs, so free up the physical channel */
  576. pl08x_put_phy_channel(pl08x, plchan->phychan);
  577. }
  578. plchan->phychan = NULL;
  579. plchan->state = PL08X_CHAN_IDLE;
  580. }
  581. /*
  582. * LLI handling
  583. */
  584. static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
  585. {
  586. switch (coded) {
  587. case PL080_WIDTH_8BIT:
  588. return 1;
  589. case PL080_WIDTH_16BIT:
  590. return 2;
  591. case PL080_WIDTH_32BIT:
  592. return 4;
  593. default:
  594. break;
  595. }
  596. BUG();
  597. return 0;
  598. }
  599. static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
  600. size_t tsize)
  601. {
  602. u32 retbits = cctl;
  603. /* Remove all src, dst and transfer size bits */
  604. retbits &= ~PL080_CONTROL_DWIDTH_MASK;
  605. retbits &= ~PL080_CONTROL_SWIDTH_MASK;
  606. retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
  607. /* Then set the bits according to the parameters */
  608. switch (srcwidth) {
  609. case 1:
  610. retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
  611. break;
  612. case 2:
  613. retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
  614. break;
  615. case 4:
  616. retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
  617. break;
  618. default:
  619. BUG();
  620. break;
  621. }
  622. switch (dstwidth) {
  623. case 1:
  624. retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
  625. break;
  626. case 2:
  627. retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
  628. break;
  629. case 4:
  630. retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
  631. break;
  632. default:
  633. BUG();
  634. break;
  635. }
  636. retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
  637. return retbits;
  638. }
  639. struct pl08x_lli_build_data {
  640. struct pl08x_txd *txd;
  641. struct pl08x_bus_data srcbus;
  642. struct pl08x_bus_data dstbus;
  643. size_t remainder;
  644. u32 lli_bus;
  645. };
  646. /*
  647. * Autoselect a master bus to use for the transfer. Slave will be the chosen as
  648. * victim in case src & dest are not similarly aligned. i.e. If after aligning
  649. * masters address with width requirements of transfer (by sending few byte by
  650. * byte data), slave is still not aligned, then its width will be reduced to
  651. * BYTE.
  652. * - prefers the destination bus if both available
  653. * - prefers bus with fixed address (i.e. peripheral)
  654. */
  655. static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
  656. struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
  657. {
  658. if (!(cctl & PL080_CONTROL_DST_INCR)) {
  659. *mbus = &bd->dstbus;
  660. *sbus = &bd->srcbus;
  661. } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
  662. *mbus = &bd->srcbus;
  663. *sbus = &bd->dstbus;
  664. } else {
  665. if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
  666. *mbus = &bd->dstbus;
  667. *sbus = &bd->srcbus;
  668. } else {
  669. *mbus = &bd->srcbus;
  670. *sbus = &bd->dstbus;
  671. }
  672. }
  673. }
  674. /*
  675. * Fills in one LLI for a certain transfer descriptor and advance the counter
  676. */
  677. static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
  678. int num_llis, int len, u32 cctl)
  679. {
  680. struct pl08x_lli *llis_va = bd->txd->llis_va;
  681. dma_addr_t llis_bus = bd->txd->llis_bus;
  682. BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
  683. llis_va[num_llis].cctl = cctl;
  684. llis_va[num_llis].src = bd->srcbus.addr;
  685. llis_va[num_llis].dst = bd->dstbus.addr;
  686. llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
  687. sizeof(struct pl08x_lli);
  688. llis_va[num_llis].lli |= bd->lli_bus;
  689. if (cctl & PL080_CONTROL_SRC_INCR)
  690. bd->srcbus.addr += len;
  691. if (cctl & PL080_CONTROL_DST_INCR)
  692. bd->dstbus.addr += len;
  693. BUG_ON(bd->remainder < len);
  694. bd->remainder -= len;
  695. }
  696. static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
  697. u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
  698. {
  699. *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
  700. pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
  701. (*total_bytes) += len;
  702. }
  703. /*
  704. * This fills in the table of LLIs for the transfer descriptor
  705. * Note that we assume we never have to change the burst sizes
  706. * Return 0 for error
  707. */
  708. static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
  709. struct pl08x_txd *txd)
  710. {
  711. struct pl08x_bus_data *mbus, *sbus;
  712. struct pl08x_lli_build_data bd;
  713. int num_llis = 0;
  714. u32 cctl, early_bytes = 0;
  715. size_t max_bytes_per_lli, total_bytes;
  716. struct pl08x_lli *llis_va;
  717. struct pl08x_sg *dsg;
  718. txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
  719. if (!txd->llis_va) {
  720. dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
  721. return 0;
  722. }
  723. bd.txd = txd;
  724. bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
  725. cctl = txd->cctl;
  726. /* Find maximum width of the source bus */
  727. bd.srcbus.maxwidth =
  728. pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
  729. PL080_CONTROL_SWIDTH_SHIFT);
  730. /* Find maximum width of the destination bus */
  731. bd.dstbus.maxwidth =
  732. pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
  733. PL080_CONTROL_DWIDTH_SHIFT);
  734. list_for_each_entry(dsg, &txd->dsg_list, node) {
  735. total_bytes = 0;
  736. cctl = txd->cctl;
  737. bd.srcbus.addr = dsg->src_addr;
  738. bd.dstbus.addr = dsg->dst_addr;
  739. bd.remainder = dsg->len;
  740. bd.srcbus.buswidth = bd.srcbus.maxwidth;
  741. bd.dstbus.buswidth = bd.dstbus.maxwidth;
  742. pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
  743. dev_vdbg(&pl08x->adev->dev,
  744. "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
  745. (u64)bd.srcbus.addr,
  746. cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
  747. bd.srcbus.buswidth,
  748. (u64)bd.dstbus.addr,
  749. cctl & PL080_CONTROL_DST_INCR ? "+" : "",
  750. bd.dstbus.buswidth,
  751. bd.remainder);
  752. dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
  753. mbus == &bd.srcbus ? "src" : "dst",
  754. sbus == &bd.srcbus ? "src" : "dst");
  755. /*
  756. * Zero length is only allowed if all these requirements are
  757. * met:
  758. * - flow controller is peripheral.
  759. * - src.addr is aligned to src.width
  760. * - dst.addr is aligned to dst.width
  761. *
  762. * sg_len == 1 should be true, as there can be two cases here:
  763. *
  764. * - Memory addresses are contiguous and are not scattered.
  765. * Here, Only one sg will be passed by user driver, with
  766. * memory address and zero length. We pass this to controller
  767. * and after the transfer it will receive the last burst
  768. * request from peripheral and so transfer finishes.
  769. *
  770. * - Memory addresses are scattered and are not contiguous.
  771. * Here, Obviously as DMA controller doesn't know when a lli's
  772. * transfer gets over, it can't load next lli. So in this
  773. * case, there has to be an assumption that only one lli is
  774. * supported. Thus, we can't have scattered addresses.
  775. */
  776. if (!bd.remainder) {
  777. u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
  778. PL080_CONFIG_FLOW_CONTROL_SHIFT;
  779. if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
  780. (fc <= PL080_FLOW_SRC2DST_SRC))) {
  781. dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
  782. __func__);
  783. return 0;
  784. }
  785. if (!IS_BUS_ALIGNED(&bd.srcbus) ||
  786. !IS_BUS_ALIGNED(&bd.dstbus)) {
  787. dev_err(&pl08x->adev->dev,
  788. "%s src & dst address must be aligned to src"
  789. " & dst width if peripheral is flow controller",
  790. __func__);
  791. return 0;
  792. }
  793. cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
  794. bd.dstbus.buswidth, 0);
  795. pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
  796. break;
  797. }
  798. /*
  799. * Send byte by byte for following cases
  800. * - Less than a bus width available
  801. * - until master bus is aligned
  802. */
  803. if (bd.remainder < mbus->buswidth)
  804. early_bytes = bd.remainder;
  805. else if (!IS_BUS_ALIGNED(mbus)) {
  806. early_bytes = mbus->buswidth -
  807. (mbus->addr & (mbus->buswidth - 1));
  808. if ((bd.remainder - early_bytes) < mbus->buswidth)
  809. early_bytes = bd.remainder;
  810. }
  811. if (early_bytes) {
  812. dev_vdbg(&pl08x->adev->dev,
  813. "%s byte width LLIs (remain 0x%08x)\n",
  814. __func__, bd.remainder);
  815. prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
  816. &total_bytes);
  817. }
  818. if (bd.remainder) {
  819. /*
  820. * Master now aligned
  821. * - if slave is not then we must set its width down
  822. */
  823. if (!IS_BUS_ALIGNED(sbus)) {
  824. dev_dbg(&pl08x->adev->dev,
  825. "%s set down bus width to one byte\n",
  826. __func__);
  827. sbus->buswidth = 1;
  828. }
  829. /*
  830. * Bytes transferred = tsize * src width, not
  831. * MIN(buswidths)
  832. */
  833. max_bytes_per_lli = bd.srcbus.buswidth *
  834. PL080_CONTROL_TRANSFER_SIZE_MASK;
  835. dev_vdbg(&pl08x->adev->dev,
  836. "%s max bytes per lli = %zu\n",
  837. __func__, max_bytes_per_lli);
  838. /*
  839. * Make largest possible LLIs until less than one bus
  840. * width left
  841. */
  842. while (bd.remainder > (mbus->buswidth - 1)) {
  843. size_t lli_len, tsize, width;
  844. /*
  845. * If enough left try to send max possible,
  846. * otherwise try to send the remainder
  847. */
  848. lli_len = min(bd.remainder, max_bytes_per_lli);
  849. /*
  850. * Check against maximum bus alignment:
  851. * Calculate actual transfer size in relation to
  852. * bus width an get a maximum remainder of the
  853. * highest bus width - 1
  854. */
  855. width = max(mbus->buswidth, sbus->buswidth);
  856. lli_len = (lli_len / width) * width;
  857. tsize = lli_len / bd.srcbus.buswidth;
  858. dev_vdbg(&pl08x->adev->dev,
  859. "%s fill lli with single lli chunk of "
  860. "size 0x%08zx (remainder 0x%08zx)\n",
  861. __func__, lli_len, bd.remainder);
  862. cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
  863. bd.dstbus.buswidth, tsize);
  864. pl08x_fill_lli_for_desc(&bd, num_llis++,
  865. lli_len, cctl);
  866. total_bytes += lli_len;
  867. }
  868. /*
  869. * Send any odd bytes
  870. */
  871. if (bd.remainder) {
  872. dev_vdbg(&pl08x->adev->dev,
  873. "%s align with boundary, send odd bytes (remain %zu)\n",
  874. __func__, bd.remainder);
  875. prep_byte_width_lli(&bd, &cctl, bd.remainder,
  876. num_llis++, &total_bytes);
  877. }
  878. }
  879. if (total_bytes != dsg->len) {
  880. dev_err(&pl08x->adev->dev,
  881. "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
  882. __func__, total_bytes, dsg->len);
  883. return 0;
  884. }
  885. if (num_llis >= MAX_NUM_TSFR_LLIS) {
  886. dev_err(&pl08x->adev->dev,
  887. "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
  888. __func__, (u32) MAX_NUM_TSFR_LLIS);
  889. return 0;
  890. }
  891. }
  892. llis_va = txd->llis_va;
  893. /* The final LLI terminates the LLI. */
  894. llis_va[num_llis - 1].lli = 0;
  895. /* The final LLI element shall also fire an interrupt. */
  896. llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
  897. #ifdef VERBOSE_DEBUG
  898. {
  899. int i;
  900. dev_vdbg(&pl08x->adev->dev,
  901. "%-3s %-9s %-10s %-10s %-10s %s\n",
  902. "lli", "", "csrc", "cdst", "clli", "cctl");
  903. for (i = 0; i < num_llis; i++) {
  904. dev_vdbg(&pl08x->adev->dev,
  905. "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  906. i, &llis_va[i], llis_va[i].src,
  907. llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
  908. );
  909. }
  910. }
  911. #endif
  912. return num_llis;
  913. }
  914. static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
  915. struct pl08x_txd *txd)
  916. {
  917. struct pl08x_sg *dsg, *_dsg;
  918. if (txd->llis_va)
  919. dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
  920. list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
  921. list_del(&dsg->node);
  922. kfree(dsg);
  923. }
  924. kfree(txd);
  925. }
  926. static void pl08x_unmap_buffers(struct pl08x_txd *txd)
  927. {
  928. struct device *dev = txd->vd.tx.chan->device->dev;
  929. struct pl08x_sg *dsg;
  930. if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  931. if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
  932. list_for_each_entry(dsg, &txd->dsg_list, node)
  933. dma_unmap_single(dev, dsg->src_addr, dsg->len,
  934. DMA_TO_DEVICE);
  935. else {
  936. list_for_each_entry(dsg, &txd->dsg_list, node)
  937. dma_unmap_page(dev, dsg->src_addr, dsg->len,
  938. DMA_TO_DEVICE);
  939. }
  940. }
  941. if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  942. if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
  943. list_for_each_entry(dsg, &txd->dsg_list, node)
  944. dma_unmap_single(dev, dsg->dst_addr, dsg->len,
  945. DMA_FROM_DEVICE);
  946. else
  947. list_for_each_entry(dsg, &txd->dsg_list, node)
  948. dma_unmap_page(dev, dsg->dst_addr, dsg->len,
  949. DMA_FROM_DEVICE);
  950. }
  951. }
  952. static void pl08x_desc_free(struct virt_dma_desc *vd)
  953. {
  954. struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
  955. struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
  956. if (!plchan->slave)
  957. pl08x_unmap_buffers(txd);
  958. if (!txd->done)
  959. pl08x_release_mux(plchan);
  960. pl08x_free_txd(plchan->host, txd);
  961. }
  962. static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
  963. struct pl08x_dma_chan *plchan)
  964. {
  965. LIST_HEAD(head);
  966. vchan_get_all_descriptors(&plchan->vc, &head);
  967. vchan_dma_desc_free_list(&plchan->vc, &head);
  968. }
  969. /*
  970. * The DMA ENGINE API
  971. */
  972. static int pl08x_alloc_chan_resources(struct dma_chan *chan)
  973. {
  974. return 0;
  975. }
  976. static void pl08x_free_chan_resources(struct dma_chan *chan)
  977. {
  978. /* Ensure all queued descriptors are freed */
  979. vchan_free_chan_resources(to_virt_chan(chan));
  980. }
  981. static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
  982. struct dma_chan *chan, unsigned long flags)
  983. {
  984. struct dma_async_tx_descriptor *retval = NULL;
  985. return retval;
  986. }
  987. /*
  988. * Code accessing dma_async_is_complete() in a tight loop may give problems.
  989. * If slaves are relying on interrupts to signal completion this function
  990. * must not be called with interrupts disabled.
  991. */
  992. static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
  993. dma_cookie_t cookie, struct dma_tx_state *txstate)
  994. {
  995. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  996. struct virt_dma_desc *vd;
  997. unsigned long flags;
  998. enum dma_status ret;
  999. size_t bytes = 0;
  1000. ret = dma_cookie_status(chan, cookie, txstate);
  1001. if (ret == DMA_SUCCESS)
  1002. return ret;
  1003. /*
  1004. * There's no point calculating the residue if there's
  1005. * no txstate to store the value.
  1006. */
  1007. if (!txstate) {
  1008. if (plchan->state == PL08X_CHAN_PAUSED)
  1009. ret = DMA_PAUSED;
  1010. return ret;
  1011. }
  1012. spin_lock_irqsave(&plchan->vc.lock, flags);
  1013. ret = dma_cookie_status(chan, cookie, txstate);
  1014. if (ret != DMA_SUCCESS) {
  1015. vd = vchan_find_desc(&plchan->vc, cookie);
  1016. if (vd) {
  1017. /* On the issued list, so hasn't been processed yet */
  1018. struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
  1019. struct pl08x_sg *dsg;
  1020. list_for_each_entry(dsg, &txd->dsg_list, node)
  1021. bytes += dsg->len;
  1022. } else {
  1023. bytes = pl08x_getbytes_chan(plchan);
  1024. }
  1025. }
  1026. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1027. /*
  1028. * This cookie not complete yet
  1029. * Get number of bytes left in the active transactions and queue
  1030. */
  1031. dma_set_residue(txstate, bytes);
  1032. if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
  1033. ret = DMA_PAUSED;
  1034. /* Whether waiting or running, we're in progress */
  1035. return ret;
  1036. }
  1037. /* PrimeCell DMA extension */
  1038. struct burst_table {
  1039. u32 burstwords;
  1040. u32 reg;
  1041. };
  1042. static const struct burst_table burst_sizes[] = {
  1043. {
  1044. .burstwords = 256,
  1045. .reg = PL080_BSIZE_256,
  1046. },
  1047. {
  1048. .burstwords = 128,
  1049. .reg = PL080_BSIZE_128,
  1050. },
  1051. {
  1052. .burstwords = 64,
  1053. .reg = PL080_BSIZE_64,
  1054. },
  1055. {
  1056. .burstwords = 32,
  1057. .reg = PL080_BSIZE_32,
  1058. },
  1059. {
  1060. .burstwords = 16,
  1061. .reg = PL080_BSIZE_16,
  1062. },
  1063. {
  1064. .burstwords = 8,
  1065. .reg = PL080_BSIZE_8,
  1066. },
  1067. {
  1068. .burstwords = 4,
  1069. .reg = PL080_BSIZE_4,
  1070. },
  1071. {
  1072. .burstwords = 0,
  1073. .reg = PL080_BSIZE_1,
  1074. },
  1075. };
  1076. /*
  1077. * Given the source and destination available bus masks, select which
  1078. * will be routed to each port. We try to have source and destination
  1079. * on separate ports, but always respect the allowable settings.
  1080. */
  1081. static u32 pl08x_select_bus(u8 src, u8 dst)
  1082. {
  1083. u32 cctl = 0;
  1084. if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
  1085. cctl |= PL080_CONTROL_DST_AHB2;
  1086. if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
  1087. cctl |= PL080_CONTROL_SRC_AHB2;
  1088. return cctl;
  1089. }
  1090. static u32 pl08x_cctl(u32 cctl)
  1091. {
  1092. cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
  1093. PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
  1094. PL080_CONTROL_PROT_MASK);
  1095. /* Access the cell in privileged mode, non-bufferable, non-cacheable */
  1096. return cctl | PL080_CONTROL_PROT_SYS;
  1097. }
  1098. static u32 pl08x_width(enum dma_slave_buswidth width)
  1099. {
  1100. switch (width) {
  1101. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  1102. return PL080_WIDTH_8BIT;
  1103. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  1104. return PL080_WIDTH_16BIT;
  1105. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  1106. return PL080_WIDTH_32BIT;
  1107. default:
  1108. return ~0;
  1109. }
  1110. }
  1111. static u32 pl08x_burst(u32 maxburst)
  1112. {
  1113. int i;
  1114. for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
  1115. if (burst_sizes[i].burstwords <= maxburst)
  1116. break;
  1117. return burst_sizes[i].reg;
  1118. }
  1119. static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
  1120. enum dma_slave_buswidth addr_width, u32 maxburst)
  1121. {
  1122. u32 width, burst, cctl = 0;
  1123. width = pl08x_width(addr_width);
  1124. if (width == ~0)
  1125. return ~0;
  1126. cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
  1127. cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
  1128. /*
  1129. * If this channel will only request single transfers, set this
  1130. * down to ONE element. Also select one element if no maxburst
  1131. * is specified.
  1132. */
  1133. if (plchan->cd->single)
  1134. maxburst = 1;
  1135. burst = pl08x_burst(maxburst);
  1136. cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
  1137. cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
  1138. return pl08x_cctl(cctl);
  1139. }
  1140. static int dma_set_runtime_config(struct dma_chan *chan,
  1141. struct dma_slave_config *config)
  1142. {
  1143. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1144. if (!plchan->slave)
  1145. return -EINVAL;
  1146. /* Reject definitely invalid configurations */
  1147. if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  1148. config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  1149. return -EINVAL;
  1150. plchan->cfg = *config;
  1151. return 0;
  1152. }
  1153. /*
  1154. * Slave transactions callback to the slave device to allow
  1155. * synchronization of slave DMA signals with the DMAC enable
  1156. */
  1157. static void pl08x_issue_pending(struct dma_chan *chan)
  1158. {
  1159. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1160. unsigned long flags;
  1161. spin_lock_irqsave(&plchan->vc.lock, flags);
  1162. if (vchan_issue_pending(&plchan->vc)) {
  1163. if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
  1164. pl08x_phy_alloc_and_start(plchan);
  1165. }
  1166. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1167. }
  1168. static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
  1169. {
  1170. struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  1171. if (txd) {
  1172. INIT_LIST_HEAD(&txd->dsg_list);
  1173. /* Always enable error and terminal interrupts */
  1174. txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
  1175. PL080_CONFIG_TC_IRQ_MASK;
  1176. }
  1177. return txd;
  1178. }
  1179. /*
  1180. * Initialize a descriptor to be used by memcpy submit
  1181. */
  1182. static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
  1183. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  1184. size_t len, unsigned long flags)
  1185. {
  1186. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1187. struct pl08x_driver_data *pl08x = plchan->host;
  1188. struct pl08x_txd *txd;
  1189. struct pl08x_sg *dsg;
  1190. int ret;
  1191. txd = pl08x_get_txd(plchan);
  1192. if (!txd) {
  1193. dev_err(&pl08x->adev->dev,
  1194. "%s no memory for descriptor\n", __func__);
  1195. return NULL;
  1196. }
  1197. dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
  1198. if (!dsg) {
  1199. pl08x_free_txd(pl08x, txd);
  1200. dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
  1201. __func__);
  1202. return NULL;
  1203. }
  1204. list_add_tail(&dsg->node, &txd->dsg_list);
  1205. dsg->src_addr = src;
  1206. dsg->dst_addr = dest;
  1207. dsg->len = len;
  1208. /* Set platform data for m2m */
  1209. txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  1210. txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
  1211. ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
  1212. /* Both to be incremented or the code will break */
  1213. txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
  1214. if (pl08x->vd->dualmaster)
  1215. txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
  1216. pl08x->mem_buses);
  1217. ret = pl08x_fill_llis_for_desc(plchan->host, txd);
  1218. if (!ret) {
  1219. pl08x_free_txd(pl08x, txd);
  1220. return NULL;
  1221. }
  1222. return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
  1223. }
  1224. static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
  1225. struct dma_chan *chan, struct scatterlist *sgl,
  1226. unsigned int sg_len, enum dma_transfer_direction direction,
  1227. unsigned long flags, void *context)
  1228. {
  1229. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1230. struct pl08x_driver_data *pl08x = plchan->host;
  1231. struct pl08x_txd *txd;
  1232. struct pl08x_sg *dsg;
  1233. struct scatterlist *sg;
  1234. enum dma_slave_buswidth addr_width;
  1235. dma_addr_t slave_addr;
  1236. int ret, tmp;
  1237. u8 src_buses, dst_buses;
  1238. u32 maxburst, cctl;
  1239. dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
  1240. __func__, sg_dma_len(sgl), plchan->name);
  1241. txd = pl08x_get_txd(plchan);
  1242. if (!txd) {
  1243. dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
  1244. return NULL;
  1245. }
  1246. /*
  1247. * Set up addresses, the PrimeCell configured address
  1248. * will take precedence since this may configure the
  1249. * channel target address dynamically at runtime.
  1250. */
  1251. if (direction == DMA_MEM_TO_DEV) {
  1252. cctl = PL080_CONTROL_SRC_INCR;
  1253. slave_addr = plchan->cfg.dst_addr;
  1254. addr_width = plchan->cfg.dst_addr_width;
  1255. maxburst = plchan->cfg.dst_maxburst;
  1256. src_buses = pl08x->mem_buses;
  1257. dst_buses = plchan->cd->periph_buses;
  1258. } else if (direction == DMA_DEV_TO_MEM) {
  1259. cctl = PL080_CONTROL_DST_INCR;
  1260. slave_addr = plchan->cfg.src_addr;
  1261. addr_width = plchan->cfg.src_addr_width;
  1262. maxburst = plchan->cfg.src_maxburst;
  1263. src_buses = plchan->cd->periph_buses;
  1264. dst_buses = pl08x->mem_buses;
  1265. } else {
  1266. pl08x_free_txd(pl08x, txd);
  1267. dev_err(&pl08x->adev->dev,
  1268. "%s direction unsupported\n", __func__);
  1269. return NULL;
  1270. }
  1271. cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
  1272. if (cctl == ~0) {
  1273. pl08x_free_txd(pl08x, txd);
  1274. dev_err(&pl08x->adev->dev,
  1275. "DMA slave configuration botched?\n");
  1276. return NULL;
  1277. }
  1278. txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
  1279. if (plchan->cfg.device_fc)
  1280. tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
  1281. PL080_FLOW_PER2MEM_PER;
  1282. else
  1283. tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
  1284. PL080_FLOW_PER2MEM;
  1285. txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  1286. ret = pl08x_request_mux(plchan);
  1287. if (ret < 0) {
  1288. pl08x_free_txd(pl08x, txd);
  1289. dev_dbg(&pl08x->adev->dev,
  1290. "unable to mux for transfer on %s due to platform restrictions\n",
  1291. plchan->name);
  1292. return NULL;
  1293. }
  1294. dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
  1295. plchan->signal, plchan->name);
  1296. /* Assign the flow control signal to this channel */
  1297. if (direction == DMA_MEM_TO_DEV)
  1298. txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
  1299. else
  1300. txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
  1301. for_each_sg(sgl, sg, sg_len, tmp) {
  1302. dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
  1303. if (!dsg) {
  1304. pl08x_release_mux(plchan);
  1305. pl08x_free_txd(pl08x, txd);
  1306. dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
  1307. __func__);
  1308. return NULL;
  1309. }
  1310. list_add_tail(&dsg->node, &txd->dsg_list);
  1311. dsg->len = sg_dma_len(sg);
  1312. if (direction == DMA_MEM_TO_DEV) {
  1313. dsg->src_addr = sg_dma_address(sg);
  1314. dsg->dst_addr = slave_addr;
  1315. } else {
  1316. dsg->src_addr = slave_addr;
  1317. dsg->dst_addr = sg_dma_address(sg);
  1318. }
  1319. }
  1320. ret = pl08x_fill_llis_for_desc(plchan->host, txd);
  1321. if (!ret) {
  1322. pl08x_release_mux(plchan);
  1323. pl08x_free_txd(pl08x, txd);
  1324. return NULL;
  1325. }
  1326. return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
  1327. }
  1328. static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  1329. unsigned long arg)
  1330. {
  1331. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1332. struct pl08x_driver_data *pl08x = plchan->host;
  1333. unsigned long flags;
  1334. int ret = 0;
  1335. /* Controls applicable to inactive channels */
  1336. if (cmd == DMA_SLAVE_CONFIG) {
  1337. return dma_set_runtime_config(chan,
  1338. (struct dma_slave_config *)arg);
  1339. }
  1340. /*
  1341. * Anything succeeds on channels with no physical allocation and
  1342. * no queued transfers.
  1343. */
  1344. spin_lock_irqsave(&plchan->vc.lock, flags);
  1345. if (!plchan->phychan && !plchan->at) {
  1346. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1347. return 0;
  1348. }
  1349. switch (cmd) {
  1350. case DMA_TERMINATE_ALL:
  1351. plchan->state = PL08X_CHAN_IDLE;
  1352. if (plchan->phychan) {
  1353. /*
  1354. * Mark physical channel as free and free any slave
  1355. * signal
  1356. */
  1357. pl08x_phy_free(plchan);
  1358. }
  1359. /* Dequeue jobs and free LLIs */
  1360. if (plchan->at) {
  1361. pl08x_desc_free(&plchan->at->vd);
  1362. plchan->at = NULL;
  1363. }
  1364. /* Dequeue jobs not yet fired as well */
  1365. pl08x_free_txd_list(pl08x, plchan);
  1366. break;
  1367. case DMA_PAUSE:
  1368. pl08x_pause_phy_chan(plchan->phychan);
  1369. plchan->state = PL08X_CHAN_PAUSED;
  1370. break;
  1371. case DMA_RESUME:
  1372. pl08x_resume_phy_chan(plchan->phychan);
  1373. plchan->state = PL08X_CHAN_RUNNING;
  1374. break;
  1375. default:
  1376. /* Unknown command */
  1377. ret = -ENXIO;
  1378. break;
  1379. }
  1380. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1381. return ret;
  1382. }
  1383. bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
  1384. {
  1385. struct pl08x_dma_chan *plchan;
  1386. char *name = chan_id;
  1387. /* Reject channels for devices not bound to this driver */
  1388. if (chan->device->dev->driver != &pl08x_amba_driver.drv)
  1389. return false;
  1390. plchan = to_pl08x_chan(chan);
  1391. /* Check that the channel is not taken! */
  1392. if (!strcmp(plchan->name, name))
  1393. return true;
  1394. return false;
  1395. }
  1396. /*
  1397. * Just check that the device is there and active
  1398. * TODO: turn this bit on/off depending on the number of physical channels
  1399. * actually used, if it is zero... well shut it off. That will save some
  1400. * power. Cut the clock at the same time.
  1401. */
  1402. static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
  1403. {
  1404. /* The Nomadik variant does not have the config register */
  1405. if (pl08x->vd->nomadik)
  1406. return;
  1407. writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
  1408. }
  1409. static irqreturn_t pl08x_irq(int irq, void *dev)
  1410. {
  1411. struct pl08x_driver_data *pl08x = dev;
  1412. u32 mask = 0, err, tc, i;
  1413. /* check & clear - ERR & TC interrupts */
  1414. err = readl(pl08x->base + PL080_ERR_STATUS);
  1415. if (err) {
  1416. dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
  1417. __func__, err);
  1418. writel(err, pl08x->base + PL080_ERR_CLEAR);
  1419. }
  1420. tc = readl(pl08x->base + PL080_TC_STATUS);
  1421. if (tc)
  1422. writel(tc, pl08x->base + PL080_TC_CLEAR);
  1423. if (!err && !tc)
  1424. return IRQ_NONE;
  1425. for (i = 0; i < pl08x->vd->channels; i++) {
  1426. if (((1 << i) & err) || ((1 << i) & tc)) {
  1427. /* Locate physical channel */
  1428. struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
  1429. struct pl08x_dma_chan *plchan = phychan->serving;
  1430. struct pl08x_txd *tx;
  1431. if (!plchan) {
  1432. dev_err(&pl08x->adev->dev,
  1433. "%s Error TC interrupt on unused channel: 0x%08x\n",
  1434. __func__, i);
  1435. continue;
  1436. }
  1437. spin_lock(&plchan->vc.lock);
  1438. tx = plchan->at;
  1439. if (tx) {
  1440. plchan->at = NULL;
  1441. /*
  1442. * This descriptor is done, release its mux
  1443. * reservation.
  1444. */
  1445. pl08x_release_mux(plchan);
  1446. tx->done = true;
  1447. vchan_cookie_complete(&tx->vd);
  1448. /*
  1449. * And start the next descriptor (if any),
  1450. * otherwise free this channel.
  1451. */
  1452. if (vchan_next_desc(&plchan->vc))
  1453. pl08x_start_next_txd(plchan);
  1454. else
  1455. pl08x_phy_free(plchan);
  1456. }
  1457. spin_unlock(&plchan->vc.lock);
  1458. mask |= (1 << i);
  1459. }
  1460. }
  1461. return mask ? IRQ_HANDLED : IRQ_NONE;
  1462. }
  1463. static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
  1464. {
  1465. chan->slave = true;
  1466. chan->name = chan->cd->bus_id;
  1467. chan->cfg.src_addr = chan->cd->addr;
  1468. chan->cfg.dst_addr = chan->cd->addr;
  1469. }
  1470. /*
  1471. * Initialise the DMAC memcpy/slave channels.
  1472. * Make a local wrapper to hold required data
  1473. */
  1474. static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
  1475. struct dma_device *dmadev, unsigned int channels, bool slave)
  1476. {
  1477. struct pl08x_dma_chan *chan;
  1478. int i;
  1479. INIT_LIST_HEAD(&dmadev->channels);
  1480. /*
  1481. * Register as many many memcpy as we have physical channels,
  1482. * we won't always be able to use all but the code will have
  1483. * to cope with that situation.
  1484. */
  1485. for (i = 0; i < channels; i++) {
  1486. chan = kzalloc(sizeof(*chan), GFP_KERNEL);
  1487. if (!chan) {
  1488. dev_err(&pl08x->adev->dev,
  1489. "%s no memory for channel\n", __func__);
  1490. return -ENOMEM;
  1491. }
  1492. chan->host = pl08x;
  1493. chan->state = PL08X_CHAN_IDLE;
  1494. chan->signal = -1;
  1495. if (slave) {
  1496. chan->cd = &pl08x->pd->slave_channels[i];
  1497. pl08x_dma_slave_init(chan);
  1498. } else {
  1499. chan->cd = &pl08x->pd->memcpy_channel;
  1500. chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
  1501. if (!chan->name) {
  1502. kfree(chan);
  1503. return -ENOMEM;
  1504. }
  1505. }
  1506. dev_dbg(&pl08x->adev->dev,
  1507. "initialize virtual channel \"%s\"\n",
  1508. chan->name);
  1509. chan->vc.desc_free = pl08x_desc_free;
  1510. vchan_init(&chan->vc, dmadev);
  1511. }
  1512. dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
  1513. i, slave ? "slave" : "memcpy");
  1514. return i;
  1515. }
  1516. static void pl08x_free_virtual_channels(struct dma_device *dmadev)
  1517. {
  1518. struct pl08x_dma_chan *chan = NULL;
  1519. struct pl08x_dma_chan *next;
  1520. list_for_each_entry_safe(chan,
  1521. next, &dmadev->channels, vc.chan.device_node) {
  1522. list_del(&chan->vc.chan.device_node);
  1523. kfree(chan);
  1524. }
  1525. }
  1526. #ifdef CONFIG_DEBUG_FS
  1527. static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
  1528. {
  1529. switch (state) {
  1530. case PL08X_CHAN_IDLE:
  1531. return "idle";
  1532. case PL08X_CHAN_RUNNING:
  1533. return "running";
  1534. case PL08X_CHAN_PAUSED:
  1535. return "paused";
  1536. case PL08X_CHAN_WAITING:
  1537. return "waiting";
  1538. default:
  1539. break;
  1540. }
  1541. return "UNKNOWN STATE";
  1542. }
  1543. static int pl08x_debugfs_show(struct seq_file *s, void *data)
  1544. {
  1545. struct pl08x_driver_data *pl08x = s->private;
  1546. struct pl08x_dma_chan *chan;
  1547. struct pl08x_phy_chan *ch;
  1548. unsigned long flags;
  1549. int i;
  1550. seq_printf(s, "PL08x physical channels:\n");
  1551. seq_printf(s, "CHANNEL:\tUSER:\n");
  1552. seq_printf(s, "--------\t-----\n");
  1553. for (i = 0; i < pl08x->vd->channels; i++) {
  1554. struct pl08x_dma_chan *virt_chan;
  1555. ch = &pl08x->phy_chans[i];
  1556. spin_lock_irqsave(&ch->lock, flags);
  1557. virt_chan = ch->serving;
  1558. seq_printf(s, "%d\t\t%s%s\n",
  1559. ch->id,
  1560. virt_chan ? virt_chan->name : "(none)",
  1561. ch->locked ? " LOCKED" : "");
  1562. spin_unlock_irqrestore(&ch->lock, flags);
  1563. }
  1564. seq_printf(s, "\nPL08x virtual memcpy channels:\n");
  1565. seq_printf(s, "CHANNEL:\tSTATE:\n");
  1566. seq_printf(s, "--------\t------\n");
  1567. list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
  1568. seq_printf(s, "%s\t\t%s\n", chan->name,
  1569. pl08x_state_str(chan->state));
  1570. }
  1571. seq_printf(s, "\nPL08x virtual slave channels:\n");
  1572. seq_printf(s, "CHANNEL:\tSTATE:\n");
  1573. seq_printf(s, "--------\t------\n");
  1574. list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
  1575. seq_printf(s, "%s\t\t%s\n", chan->name,
  1576. pl08x_state_str(chan->state));
  1577. }
  1578. return 0;
  1579. }
  1580. static int pl08x_debugfs_open(struct inode *inode, struct file *file)
  1581. {
  1582. return single_open(file, pl08x_debugfs_show, inode->i_private);
  1583. }
  1584. static const struct file_operations pl08x_debugfs_operations = {
  1585. .open = pl08x_debugfs_open,
  1586. .read = seq_read,
  1587. .llseek = seq_lseek,
  1588. .release = single_release,
  1589. };
  1590. static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
  1591. {
  1592. /* Expose a simple debugfs interface to view all clocks */
  1593. (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
  1594. S_IFREG | S_IRUGO, NULL, pl08x,
  1595. &pl08x_debugfs_operations);
  1596. }
  1597. #else
  1598. static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
  1599. {
  1600. }
  1601. #endif
  1602. static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
  1603. {
  1604. struct pl08x_driver_data *pl08x;
  1605. const struct vendor_data *vd = id->data;
  1606. int ret = 0;
  1607. int i;
  1608. ret = amba_request_regions(adev, NULL);
  1609. if (ret)
  1610. return ret;
  1611. /* Create the driver state holder */
  1612. pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
  1613. if (!pl08x) {
  1614. ret = -ENOMEM;
  1615. goto out_no_pl08x;
  1616. }
  1617. /* Initialize memcpy engine */
  1618. dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
  1619. pl08x->memcpy.dev = &adev->dev;
  1620. pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
  1621. pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
  1622. pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
  1623. pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
  1624. pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
  1625. pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
  1626. pl08x->memcpy.device_control = pl08x_control;
  1627. /* Initialize slave engine */
  1628. dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
  1629. pl08x->slave.dev = &adev->dev;
  1630. pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
  1631. pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
  1632. pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
  1633. pl08x->slave.device_tx_status = pl08x_dma_tx_status;
  1634. pl08x->slave.device_issue_pending = pl08x_issue_pending;
  1635. pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
  1636. pl08x->slave.device_control = pl08x_control;
  1637. /* Get the platform data */
  1638. pl08x->pd = dev_get_platdata(&adev->dev);
  1639. if (!pl08x->pd) {
  1640. dev_err(&adev->dev, "no platform data supplied\n");
  1641. ret = -EINVAL;
  1642. goto out_no_platdata;
  1643. }
  1644. /* Assign useful pointers to the driver state */
  1645. pl08x->adev = adev;
  1646. pl08x->vd = vd;
  1647. /* By default, AHB1 only. If dualmaster, from platform */
  1648. pl08x->lli_buses = PL08X_AHB1;
  1649. pl08x->mem_buses = PL08X_AHB1;
  1650. if (pl08x->vd->dualmaster) {
  1651. pl08x->lli_buses = pl08x->pd->lli_buses;
  1652. pl08x->mem_buses = pl08x->pd->mem_buses;
  1653. }
  1654. /* A DMA memory pool for LLIs, align on 1-byte boundary */
  1655. pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
  1656. PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
  1657. if (!pl08x->pool) {
  1658. ret = -ENOMEM;
  1659. goto out_no_lli_pool;
  1660. }
  1661. pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
  1662. if (!pl08x->base) {
  1663. ret = -ENOMEM;
  1664. goto out_no_ioremap;
  1665. }
  1666. /* Turn on the PL08x */
  1667. pl08x_ensure_on(pl08x);
  1668. /* Attach the interrupt handler */
  1669. writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
  1670. writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
  1671. ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
  1672. DRIVER_NAME, pl08x);
  1673. if (ret) {
  1674. dev_err(&adev->dev, "%s failed to request interrupt %d\n",
  1675. __func__, adev->irq[0]);
  1676. goto out_no_irq;
  1677. }
  1678. /* Initialize physical channels */
  1679. pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
  1680. GFP_KERNEL);
  1681. if (!pl08x->phy_chans) {
  1682. dev_err(&adev->dev, "%s failed to allocate "
  1683. "physical channel holders\n",
  1684. __func__);
  1685. ret = -ENOMEM;
  1686. goto out_no_phychans;
  1687. }
  1688. for (i = 0; i < vd->channels; i++) {
  1689. struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
  1690. ch->id = i;
  1691. ch->base = pl08x->base + PL080_Cx_BASE(i);
  1692. spin_lock_init(&ch->lock);
  1693. /*
  1694. * Nomadik variants can have channels that are locked
  1695. * down for the secure world only. Lock up these channels
  1696. * by perpetually serving a dummy virtual channel.
  1697. */
  1698. if (vd->nomadik) {
  1699. u32 val;
  1700. val = readl(ch->base + PL080_CH_CONFIG);
  1701. if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
  1702. dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
  1703. ch->locked = true;
  1704. }
  1705. }
  1706. dev_dbg(&adev->dev, "physical channel %d is %s\n",
  1707. i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
  1708. }
  1709. /* Register as many memcpy channels as there are physical channels */
  1710. ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
  1711. pl08x->vd->channels, false);
  1712. if (ret <= 0) {
  1713. dev_warn(&pl08x->adev->dev,
  1714. "%s failed to enumerate memcpy channels - %d\n",
  1715. __func__, ret);
  1716. goto out_no_memcpy;
  1717. }
  1718. pl08x->memcpy.chancnt = ret;
  1719. /* Register slave channels */
  1720. ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
  1721. pl08x->pd->num_slave_channels, true);
  1722. if (ret <= 0) {
  1723. dev_warn(&pl08x->adev->dev,
  1724. "%s failed to enumerate slave channels - %d\n",
  1725. __func__, ret);
  1726. goto out_no_slave;
  1727. }
  1728. pl08x->slave.chancnt = ret;
  1729. ret = dma_async_device_register(&pl08x->memcpy);
  1730. if (ret) {
  1731. dev_warn(&pl08x->adev->dev,
  1732. "%s failed to register memcpy as an async device - %d\n",
  1733. __func__, ret);
  1734. goto out_no_memcpy_reg;
  1735. }
  1736. ret = dma_async_device_register(&pl08x->slave);
  1737. if (ret) {
  1738. dev_warn(&pl08x->adev->dev,
  1739. "%s failed to register slave as an async device - %d\n",
  1740. __func__, ret);
  1741. goto out_no_slave_reg;
  1742. }
  1743. amba_set_drvdata(adev, pl08x);
  1744. init_pl08x_debugfs(pl08x);
  1745. dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
  1746. amba_part(adev), amba_rev(adev),
  1747. (unsigned long long)adev->res.start, adev->irq[0]);
  1748. return 0;
  1749. out_no_slave_reg:
  1750. dma_async_device_unregister(&pl08x->memcpy);
  1751. out_no_memcpy_reg:
  1752. pl08x_free_virtual_channels(&pl08x->slave);
  1753. out_no_slave:
  1754. pl08x_free_virtual_channels(&pl08x->memcpy);
  1755. out_no_memcpy:
  1756. kfree(pl08x->phy_chans);
  1757. out_no_phychans:
  1758. free_irq(adev->irq[0], pl08x);
  1759. out_no_irq:
  1760. iounmap(pl08x->base);
  1761. out_no_ioremap:
  1762. dma_pool_destroy(pl08x->pool);
  1763. out_no_lli_pool:
  1764. out_no_platdata:
  1765. kfree(pl08x);
  1766. out_no_pl08x:
  1767. amba_release_regions(adev);
  1768. return ret;
  1769. }
  1770. /* PL080 has 8 channels and the PL080 have just 2 */
  1771. static struct vendor_data vendor_pl080 = {
  1772. .channels = 8,
  1773. .dualmaster = true,
  1774. };
  1775. static struct vendor_data vendor_nomadik = {
  1776. .channels = 8,
  1777. .dualmaster = true,
  1778. .nomadik = true,
  1779. };
  1780. static struct vendor_data vendor_pl081 = {
  1781. .channels = 2,
  1782. .dualmaster = false,
  1783. };
  1784. static struct amba_id pl08x_ids[] = {
  1785. /* PL080 */
  1786. {
  1787. .id = 0x00041080,
  1788. .mask = 0x000fffff,
  1789. .data = &vendor_pl080,
  1790. },
  1791. /* PL081 */
  1792. {
  1793. .id = 0x00041081,
  1794. .mask = 0x000fffff,
  1795. .data = &vendor_pl081,
  1796. },
  1797. /* Nomadik 8815 PL080 variant */
  1798. {
  1799. .id = 0x00280080,
  1800. .mask = 0x00ffffff,
  1801. .data = &vendor_nomadik,
  1802. },
  1803. { 0, 0 },
  1804. };
  1805. MODULE_DEVICE_TABLE(amba, pl08x_ids);
  1806. static struct amba_driver pl08x_amba_driver = {
  1807. .drv.name = DRIVER_NAME,
  1808. .id_table = pl08x_ids,
  1809. .probe = pl08x_probe,
  1810. };
  1811. static int __init pl08x_init(void)
  1812. {
  1813. int retval;
  1814. retval = amba_driver_register(&pl08x_amba_driver);
  1815. if (retval)
  1816. printk(KERN_WARNING DRIVER_NAME
  1817. "failed to register as an AMBA device (%d)\n",
  1818. retval);
  1819. return retval;
  1820. }
  1821. subsys_initcall(pl08x_init);