ste_dma40.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987
  1. /*
  2. * Copyright (C) Ericsson AB 2007-2008
  3. * Copyright (C) ST-Ericsson SA 2008-2010
  4. * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  5. * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  6. * License terms: GNU General Public License (GPL) version 2
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/err.h>
  15. #include <plat/ste_dma40.h>
  16. #include "ste_dma40_ll.h"
  17. #define D40_NAME "dma40"
  18. #define D40_PHY_CHAN -1
  19. /* For masking out/in 2 bit channel positions */
  20. #define D40_CHAN_POS(chan) (2 * (chan / 2))
  21. #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  22. /* Maximum iterations taken before giving up suspending a channel */
  23. #define D40_SUSPEND_MAX_IT 500
  24. /* Hardware requirement on LCLA alignment */
  25. #define LCLA_ALIGNMENT 0x40000
  26. /* Max number of links per event group */
  27. #define D40_LCLA_LINK_PER_EVENT_GRP 128
  28. #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  29. /* Attempts before giving up to trying to get pages that are aligned */
  30. #define MAX_LCLA_ALLOC_ATTEMPTS 256
  31. /* Bit markings for allocation map */
  32. #define D40_ALLOC_FREE (1 << 31)
  33. #define D40_ALLOC_PHY (1 << 30)
  34. #define D40_ALLOC_LOG_FREE 0
  35. /* Hardware designer of the block */
  36. #define D40_HW_DESIGNER 0x8
  37. /**
  38. * enum 40_command - The different commands and/or statuses.
  39. *
  40. * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
  41. * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
  42. * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
  43. * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
  44. */
  45. enum d40_command {
  46. D40_DMA_STOP = 0,
  47. D40_DMA_RUN = 1,
  48. D40_DMA_SUSPEND_REQ = 2,
  49. D40_DMA_SUSPENDED = 3
  50. };
  51. /**
  52. * struct d40_lli_pool - Structure for keeping LLIs in memory
  53. *
  54. * @base: Pointer to memory area when the pre_alloc_lli's are not large
  55. * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
  56. * pre_alloc_lli is used.
  57. * @dma_addr: DMA address, if mapped
  58. * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
  59. * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
  60. * one buffer to one buffer.
  61. */
  62. struct d40_lli_pool {
  63. void *base;
  64. int size;
  65. dma_addr_t dma_addr;
  66. /* Space for dst and src, plus an extra for padding */
  67. u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
  68. };
  69. /**
  70. * struct d40_desc - A descriptor is one DMA job.
  71. *
  72. * @lli_phy: LLI settings for physical channel. Both src and dst=
  73. * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
  74. * lli_len equals one.
  75. * @lli_log: Same as above but for logical channels.
  76. * @lli_pool: The pool with two entries pre-allocated.
  77. * @lli_len: Number of llis of current descriptor.
  78. * @lli_current: Number of transferred llis.
  79. * @lcla_alloc: Number of LCLA entries allocated.
  80. * @txd: DMA engine struct. Used for among other things for communication
  81. * during a transfer.
  82. * @node: List entry.
  83. * @is_in_client_list: true if the client owns this descriptor.
  84. * the previous one.
  85. *
  86. * This descriptor is used for both logical and physical transfers.
  87. */
  88. struct d40_desc {
  89. /* LLI physical */
  90. struct d40_phy_lli_bidir lli_phy;
  91. /* LLI logical */
  92. struct d40_log_lli_bidir lli_log;
  93. struct d40_lli_pool lli_pool;
  94. int lli_len;
  95. int lli_current;
  96. int lcla_alloc;
  97. struct dma_async_tx_descriptor txd;
  98. struct list_head node;
  99. bool is_in_client_list;
  100. bool cyclic;
  101. };
  102. /**
  103. * struct d40_lcla_pool - LCLA pool settings and data.
  104. *
  105. * @base: The virtual address of LCLA. 18 bit aligned.
  106. * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
  107. * This pointer is only there for clean-up on error.
  108. * @pages: The number of pages needed for all physical channels.
  109. * Only used later for clean-up on error
  110. * @lock: Lock to protect the content in this struct.
  111. * @alloc_map: big map over which LCLA entry is own by which job.
  112. */
  113. struct d40_lcla_pool {
  114. void *base;
  115. dma_addr_t dma_addr;
  116. void *base_unaligned;
  117. int pages;
  118. spinlock_t lock;
  119. struct d40_desc **alloc_map;
  120. };
  121. /**
  122. * struct d40_phy_res - struct for handling eventlines mapped to physical
  123. * channels.
  124. *
  125. * @lock: A lock protection this entity.
  126. * @num: The physical channel number of this entity.
  127. * @allocated_src: Bit mapped to show which src event line's are mapped to
  128. * this physical channel. Can also be free or physically allocated.
  129. * @allocated_dst: Same as for src but is dst.
  130. * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
  131. * event line number.
  132. */
  133. struct d40_phy_res {
  134. spinlock_t lock;
  135. int num;
  136. u32 allocated_src;
  137. u32 allocated_dst;
  138. };
  139. struct d40_base;
  140. /**
  141. * struct d40_chan - Struct that describes a channel.
  142. *
  143. * @lock: A spinlock to protect this struct.
  144. * @log_num: The logical number, if any of this channel.
  145. * @completed: Starts with 1, after first interrupt it is set to dma engine's
  146. * current cookie.
  147. * @pending_tx: The number of pending transfers. Used between interrupt handler
  148. * and tasklet.
  149. * @busy: Set to true when transfer is ongoing on this channel.
  150. * @phy_chan: Pointer to physical channel which this instance runs on. If this
  151. * point is NULL, then the channel is not allocated.
  152. * @chan: DMA engine handle.
  153. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  154. * transfer and call client callback.
  155. * @client: Cliented owned descriptor list.
  156. * @active: Active descriptor.
  157. * @queue: Queued jobs.
  158. * @dma_cfg: The client configuration of this dma channel.
  159. * @configured: whether the dma_cfg configuration is valid
  160. * @base: Pointer to the device instance struct.
  161. * @src_def_cfg: Default cfg register setting for src.
  162. * @dst_def_cfg: Default cfg register setting for dst.
  163. * @log_def: Default logical channel settings.
  164. * @lcla: Space for one dst src pair for logical channel transfers.
  165. * @lcpa: Pointer to dst and src lcpa settings.
  166. *
  167. * This struct can either "be" a logical or a physical channel.
  168. */
  169. struct d40_chan {
  170. spinlock_t lock;
  171. int log_num;
  172. /* ID of the most recent completed transfer */
  173. int completed;
  174. int pending_tx;
  175. bool busy;
  176. struct d40_phy_res *phy_chan;
  177. struct dma_chan chan;
  178. struct tasklet_struct tasklet;
  179. struct list_head client;
  180. struct list_head pending_queue;
  181. struct list_head active;
  182. struct list_head queue;
  183. struct stedma40_chan_cfg dma_cfg;
  184. bool configured;
  185. struct d40_base *base;
  186. /* Default register configurations */
  187. u32 src_def_cfg;
  188. u32 dst_def_cfg;
  189. struct d40_def_lcsp log_def;
  190. struct d40_log_lli_full *lcpa;
  191. /* Runtime reconfiguration */
  192. dma_addr_t runtime_addr;
  193. enum dma_data_direction runtime_direction;
  194. };
  195. /**
  196. * struct d40_base - The big global struct, one for each probe'd instance.
  197. *
  198. * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
  199. * @execmd_lock: Lock for execute command usage since several channels share
  200. * the same physical register.
  201. * @dev: The device structure.
  202. * @virtbase: The virtual base address of the DMA's register.
  203. * @rev: silicon revision detected.
  204. * @clk: Pointer to the DMA clock structure.
  205. * @phy_start: Physical memory start of the DMA registers.
  206. * @phy_size: Size of the DMA register map.
  207. * @irq: The IRQ number.
  208. * @num_phy_chans: The number of physical channels. Read from HW. This
  209. * is the number of available channels for this driver, not counting "Secure
  210. * mode" allocated physical channels.
  211. * @num_log_chans: The number of logical channels. Calculated from
  212. * num_phy_chans.
  213. * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  214. * @dma_slave: dma_device channels that can do only do slave transfers.
  215. * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
  216. * @log_chans: Room for all possible logical channels in system.
  217. * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  218. * to log_chans entries.
  219. * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
  220. * to phy_chans entries.
  221. * @plat_data: Pointer to provided platform_data which is the driver
  222. * configuration.
  223. * @phy_res: Vector containing all physical channels.
  224. * @lcla_pool: lcla pool settings and data.
  225. * @lcpa_base: The virtual mapped address of LCPA.
  226. * @phy_lcpa: The physical address of the LCPA.
  227. * @lcpa_size: The size of the LCPA area.
  228. * @desc_slab: cache for descriptors.
  229. */
  230. struct d40_base {
  231. spinlock_t interrupt_lock;
  232. spinlock_t execmd_lock;
  233. struct device *dev;
  234. void __iomem *virtbase;
  235. u8 rev:4;
  236. struct clk *clk;
  237. phys_addr_t phy_start;
  238. resource_size_t phy_size;
  239. int irq;
  240. int num_phy_chans;
  241. int num_log_chans;
  242. struct dma_device dma_both;
  243. struct dma_device dma_slave;
  244. struct dma_device dma_memcpy;
  245. struct d40_chan *phy_chans;
  246. struct d40_chan *log_chans;
  247. struct d40_chan **lookup_log_chans;
  248. struct d40_chan **lookup_phy_chans;
  249. struct stedma40_platform_data *plat_data;
  250. /* Physical half channels */
  251. struct d40_phy_res *phy_res;
  252. struct d40_lcla_pool lcla_pool;
  253. void *lcpa_base;
  254. dma_addr_t phy_lcpa;
  255. resource_size_t lcpa_size;
  256. struct kmem_cache *desc_slab;
  257. };
  258. /**
  259. * struct d40_interrupt_lookup - lookup table for interrupt handler
  260. *
  261. * @src: Interrupt mask register.
  262. * @clr: Interrupt clear register.
  263. * @is_error: true if this is an error interrupt.
  264. * @offset: start delta in the lookup_log_chans in d40_base. If equals to
  265. * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
  266. */
  267. struct d40_interrupt_lookup {
  268. u32 src;
  269. u32 clr;
  270. bool is_error;
  271. int offset;
  272. };
  273. /**
  274. * struct d40_reg_val - simple lookup struct
  275. *
  276. * @reg: The register.
  277. * @val: The value that belongs to the register in reg.
  278. */
  279. struct d40_reg_val {
  280. unsigned int reg;
  281. unsigned int val;
  282. };
  283. static struct device *chan2dev(struct d40_chan *d40c)
  284. {
  285. return &d40c->chan.dev->device;
  286. }
  287. static bool chan_is_physical(struct d40_chan *chan)
  288. {
  289. return chan->log_num == D40_PHY_CHAN;
  290. }
  291. static bool chan_is_logical(struct d40_chan *chan)
  292. {
  293. return !chan_is_physical(chan);
  294. }
  295. static void __iomem *chan_base(struct d40_chan *chan)
  296. {
  297. return chan->base->virtbase + D40_DREG_PCBASE +
  298. chan->phy_chan->num * D40_DREG_PCDELTA;
  299. }
  300. #define d40_err(dev, format, arg...) \
  301. dev_err(dev, "[%s] " format, __func__, ## arg)
  302. #define chan_err(d40c, format, arg...) \
  303. d40_err(chan2dev(d40c), format, ## arg)
  304. static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
  305. int lli_len)
  306. {
  307. bool is_log = chan_is_logical(d40c);
  308. u32 align;
  309. void *base;
  310. if (is_log)
  311. align = sizeof(struct d40_log_lli);
  312. else
  313. align = sizeof(struct d40_phy_lli);
  314. if (lli_len == 1) {
  315. base = d40d->lli_pool.pre_alloc_lli;
  316. d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
  317. d40d->lli_pool.base = NULL;
  318. } else {
  319. d40d->lli_pool.size = lli_len * 2 * align;
  320. base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
  321. d40d->lli_pool.base = base;
  322. if (d40d->lli_pool.base == NULL)
  323. return -ENOMEM;
  324. }
  325. if (is_log) {
  326. d40d->lli_log.src = PTR_ALIGN(base, align);
  327. d40d->lli_log.dst = d40d->lli_log.src + lli_len;
  328. d40d->lli_pool.dma_addr = 0;
  329. } else {
  330. d40d->lli_phy.src = PTR_ALIGN(base, align);
  331. d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
  332. d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
  333. d40d->lli_phy.src,
  334. d40d->lli_pool.size,
  335. DMA_TO_DEVICE);
  336. if (dma_mapping_error(d40c->base->dev,
  337. d40d->lli_pool.dma_addr)) {
  338. kfree(d40d->lli_pool.base);
  339. d40d->lli_pool.base = NULL;
  340. d40d->lli_pool.dma_addr = 0;
  341. return -ENOMEM;
  342. }
  343. }
  344. return 0;
  345. }
  346. static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
  347. {
  348. if (d40d->lli_pool.dma_addr)
  349. dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
  350. d40d->lli_pool.size, DMA_TO_DEVICE);
  351. kfree(d40d->lli_pool.base);
  352. d40d->lli_pool.base = NULL;
  353. d40d->lli_pool.size = 0;
  354. d40d->lli_log.src = NULL;
  355. d40d->lli_log.dst = NULL;
  356. d40d->lli_phy.src = NULL;
  357. d40d->lli_phy.dst = NULL;
  358. }
  359. static int d40_lcla_alloc_one(struct d40_chan *d40c,
  360. struct d40_desc *d40d)
  361. {
  362. unsigned long flags;
  363. int i;
  364. int ret = -EINVAL;
  365. int p;
  366. spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
  367. p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
  368. /*
  369. * Allocate both src and dst at the same time, therefore the half
  370. * start on 1 since 0 can't be used since zero is used as end marker.
  371. */
  372. for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
  373. if (!d40c->base->lcla_pool.alloc_map[p + i]) {
  374. d40c->base->lcla_pool.alloc_map[p + i] = d40d;
  375. d40d->lcla_alloc++;
  376. ret = i;
  377. break;
  378. }
  379. }
  380. spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
  381. return ret;
  382. }
  383. static int d40_lcla_free_all(struct d40_chan *d40c,
  384. struct d40_desc *d40d)
  385. {
  386. unsigned long flags;
  387. int i;
  388. int ret = -EINVAL;
  389. if (chan_is_physical(d40c))
  390. return 0;
  391. spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
  392. for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
  393. if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
  394. D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
  395. d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
  396. D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
  397. d40d->lcla_alloc--;
  398. if (d40d->lcla_alloc == 0) {
  399. ret = 0;
  400. break;
  401. }
  402. }
  403. }
  404. spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
  405. return ret;
  406. }
  407. static void d40_desc_remove(struct d40_desc *d40d)
  408. {
  409. list_del(&d40d->node);
  410. }
  411. static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
  412. {
  413. struct d40_desc *desc = NULL;
  414. if (!list_empty(&d40c->client)) {
  415. struct d40_desc *d;
  416. struct d40_desc *_d;
  417. list_for_each_entry_safe(d, _d, &d40c->client, node)
  418. if (async_tx_test_ack(&d->txd)) {
  419. d40_pool_lli_free(d40c, d);
  420. d40_desc_remove(d);
  421. desc = d;
  422. memset(desc, 0, sizeof(*desc));
  423. break;
  424. }
  425. }
  426. if (!desc)
  427. desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
  428. if (desc)
  429. INIT_LIST_HEAD(&desc->node);
  430. return desc;
  431. }
  432. static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
  433. {
  434. d40_pool_lli_free(d40c, d40d);
  435. d40_lcla_free_all(d40c, d40d);
  436. kmem_cache_free(d40c->base->desc_slab, d40d);
  437. }
  438. static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
  439. {
  440. list_add_tail(&desc->node, &d40c->active);
  441. }
  442. static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
  443. {
  444. struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
  445. struct d40_phy_lli *lli_src = desc->lli_phy.src;
  446. void __iomem *base = chan_base(chan);
  447. writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
  448. writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
  449. writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
  450. writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
  451. writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
  452. writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
  453. writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
  454. writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
  455. }
  456. static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
  457. {
  458. struct d40_lcla_pool *pool = &chan->base->lcla_pool;
  459. struct d40_log_lli_bidir *lli = &desc->lli_log;
  460. int lli_current = desc->lli_current;
  461. int lli_len = desc->lli_len;
  462. bool cyclic = desc->cyclic;
  463. int curr_lcla = -EINVAL;
  464. int first_lcla = 0;
  465. bool linkback;
  466. /*
  467. * We may have partially running cyclic transfers, in case we did't get
  468. * enough LCLA entries.
  469. */
  470. linkback = cyclic && lli_current == 0;
  471. /*
  472. * For linkback, we need one LCLA even with only one link, because we
  473. * can't link back to the one in LCPA space
  474. */
  475. if (linkback || (lli_len - lli_current > 1)) {
  476. curr_lcla = d40_lcla_alloc_one(chan, desc);
  477. first_lcla = curr_lcla;
  478. }
  479. /*
  480. * For linkback, we normally load the LCPA in the loop since we need to
  481. * link it to the second LCLA and not the first. However, if we
  482. * couldn't even get a first LCLA, then we have to run in LCPA and
  483. * reload manually.
  484. */
  485. if (!linkback || curr_lcla == -EINVAL) {
  486. unsigned int flags = 0;
  487. if (curr_lcla == -EINVAL)
  488. flags |= LLI_TERM_INT;
  489. d40_log_lli_lcpa_write(chan->lcpa,
  490. &lli->dst[lli_current],
  491. &lli->src[lli_current],
  492. curr_lcla,
  493. flags);
  494. lli_current++;
  495. }
  496. if (curr_lcla < 0)
  497. goto out;
  498. for (; lli_current < lli_len; lli_current++) {
  499. unsigned int lcla_offset = chan->phy_chan->num * 1024 +
  500. 8 * curr_lcla * 2;
  501. struct d40_log_lli *lcla = pool->base + lcla_offset;
  502. unsigned int flags = 0;
  503. int next_lcla;
  504. if (lli_current + 1 < lli_len)
  505. next_lcla = d40_lcla_alloc_one(chan, desc);
  506. else
  507. next_lcla = linkback ? first_lcla : -EINVAL;
  508. if (cyclic || next_lcla == -EINVAL)
  509. flags |= LLI_TERM_INT;
  510. if (linkback && curr_lcla == first_lcla) {
  511. /* First link goes in both LCPA and LCLA */
  512. d40_log_lli_lcpa_write(chan->lcpa,
  513. &lli->dst[lli_current],
  514. &lli->src[lli_current],
  515. next_lcla, flags);
  516. }
  517. /*
  518. * One unused LCLA in the cyclic case if the very first
  519. * next_lcla fails...
  520. */
  521. d40_log_lli_lcla_write(lcla,
  522. &lli->dst[lli_current],
  523. &lli->src[lli_current],
  524. next_lcla, flags);
  525. dma_sync_single_range_for_device(chan->base->dev,
  526. pool->dma_addr, lcla_offset,
  527. 2 * sizeof(struct d40_log_lli),
  528. DMA_TO_DEVICE);
  529. curr_lcla = next_lcla;
  530. if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
  531. lli_current++;
  532. break;
  533. }
  534. }
  535. out:
  536. desc->lli_current = lli_current;
  537. }
  538. static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
  539. {
  540. if (chan_is_physical(d40c)) {
  541. d40_phy_lli_load(d40c, d40d);
  542. d40d->lli_current = d40d->lli_len;
  543. } else
  544. d40_log_lli_to_lcxa(d40c, d40d);
  545. }
  546. static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
  547. {
  548. struct d40_desc *d;
  549. if (list_empty(&d40c->active))
  550. return NULL;
  551. d = list_first_entry(&d40c->active,
  552. struct d40_desc,
  553. node);
  554. return d;
  555. }
  556. static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
  557. {
  558. list_add_tail(&desc->node, &d40c->pending_queue);
  559. }
  560. static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
  561. {
  562. struct d40_desc *d;
  563. if (list_empty(&d40c->pending_queue))
  564. return NULL;
  565. d = list_first_entry(&d40c->pending_queue,
  566. struct d40_desc,
  567. node);
  568. return d;
  569. }
  570. static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
  571. {
  572. struct d40_desc *d;
  573. if (list_empty(&d40c->queue))
  574. return NULL;
  575. d = list_first_entry(&d40c->queue,
  576. struct d40_desc,
  577. node);
  578. return d;
  579. }
  580. static int d40_psize_2_burst_size(bool is_log, int psize)
  581. {
  582. if (is_log) {
  583. if (psize == STEDMA40_PSIZE_LOG_1)
  584. return 1;
  585. } else {
  586. if (psize == STEDMA40_PSIZE_PHY_1)
  587. return 1;
  588. }
  589. return 2 << psize;
  590. }
  591. /*
  592. * The dma only supports transmitting packages up to
  593. * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
  594. * dma elements required to send the entire sg list
  595. */
  596. static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
  597. {
  598. int dmalen;
  599. u32 max_w = max(data_width1, data_width2);
  600. u32 min_w = min(data_width1, data_width2);
  601. u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
  602. if (seg_max > STEDMA40_MAX_SEG_SIZE)
  603. seg_max -= (1 << max_w);
  604. if (!IS_ALIGNED(size, 1 << max_w))
  605. return -EINVAL;
  606. if (size <= seg_max)
  607. dmalen = 1;
  608. else {
  609. dmalen = size / seg_max;
  610. if (dmalen * seg_max < size)
  611. dmalen++;
  612. }
  613. return dmalen;
  614. }
  615. static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
  616. u32 data_width1, u32 data_width2)
  617. {
  618. struct scatterlist *sg;
  619. int i;
  620. int len = 0;
  621. int ret;
  622. for_each_sg(sgl, sg, sg_len, i) {
  623. ret = d40_size_2_dmalen(sg_dma_len(sg),
  624. data_width1, data_width2);
  625. if (ret < 0)
  626. return ret;
  627. len += ret;
  628. }
  629. return len;
  630. }
  631. /* Support functions for logical channels */
  632. static int d40_channel_execute_command(struct d40_chan *d40c,
  633. enum d40_command command)
  634. {
  635. u32 status;
  636. int i;
  637. void __iomem *active_reg;
  638. int ret = 0;
  639. unsigned long flags;
  640. u32 wmask;
  641. spin_lock_irqsave(&d40c->base->execmd_lock, flags);
  642. if (d40c->phy_chan->num % 2 == 0)
  643. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  644. else
  645. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  646. if (command == D40_DMA_SUSPEND_REQ) {
  647. status = (readl(active_reg) &
  648. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  649. D40_CHAN_POS(d40c->phy_chan->num);
  650. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  651. goto done;
  652. }
  653. wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
  654. writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
  655. active_reg);
  656. if (command == D40_DMA_SUSPEND_REQ) {
  657. for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
  658. status = (readl(active_reg) &
  659. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  660. D40_CHAN_POS(d40c->phy_chan->num);
  661. cpu_relax();
  662. /*
  663. * Reduce the number of bus accesses while
  664. * waiting for the DMA to suspend.
  665. */
  666. udelay(3);
  667. if (status == D40_DMA_STOP ||
  668. status == D40_DMA_SUSPENDED)
  669. break;
  670. }
  671. if (i == D40_SUSPEND_MAX_IT) {
  672. chan_err(d40c,
  673. "unable to suspend the chl %d (log: %d) status %x\n",
  674. d40c->phy_chan->num, d40c->log_num,
  675. status);
  676. dump_stack();
  677. ret = -EBUSY;
  678. }
  679. }
  680. done:
  681. spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
  682. return ret;
  683. }
  684. static void d40_term_all(struct d40_chan *d40c)
  685. {
  686. struct d40_desc *d40d;
  687. /* Release active descriptors */
  688. while ((d40d = d40_first_active_get(d40c))) {
  689. d40_desc_remove(d40d);
  690. d40_desc_free(d40c, d40d);
  691. }
  692. /* Release queued descriptors waiting for transfer */
  693. while ((d40d = d40_first_queued(d40c))) {
  694. d40_desc_remove(d40d);
  695. d40_desc_free(d40c, d40d);
  696. }
  697. /* Release pending descriptors */
  698. while ((d40d = d40_first_pending(d40c))) {
  699. d40_desc_remove(d40d);
  700. d40_desc_free(d40c, d40d);
  701. }
  702. d40c->pending_tx = 0;
  703. d40c->busy = false;
  704. }
  705. static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
  706. u32 event, int reg)
  707. {
  708. void __iomem *addr = chan_base(d40c) + reg;
  709. int tries;
  710. if (!enable) {
  711. writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
  712. | ~D40_EVENTLINE_MASK(event), addr);
  713. return;
  714. }
  715. /*
  716. * The hardware sometimes doesn't register the enable when src and dst
  717. * event lines are active on the same logical channel. Retry to ensure
  718. * it does. Usually only one retry is sufficient.
  719. */
  720. tries = 100;
  721. while (--tries) {
  722. writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
  723. | ~D40_EVENTLINE_MASK(event), addr);
  724. if (readl(addr) & D40_EVENTLINE_MASK(event))
  725. break;
  726. }
  727. if (tries != 99)
  728. dev_dbg(chan2dev(d40c),
  729. "[%s] workaround enable S%cLNK (%d tries)\n",
  730. __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
  731. 100 - tries);
  732. WARN_ON(!tries);
  733. }
  734. static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
  735. {
  736. unsigned long flags;
  737. spin_lock_irqsave(&d40c->phy_chan->lock, flags);
  738. /* Enable event line connected to device (or memcpy) */
  739. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  740. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
  741. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  742. __d40_config_set_event(d40c, do_enable, event,
  743. D40_CHAN_REG_SSLNK);
  744. }
  745. if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
  746. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  747. __d40_config_set_event(d40c, do_enable, event,
  748. D40_CHAN_REG_SDLNK);
  749. }
  750. spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
  751. }
  752. static u32 d40_chan_has_events(struct d40_chan *d40c)
  753. {
  754. void __iomem *chanbase = chan_base(d40c);
  755. u32 val;
  756. val = readl(chanbase + D40_CHAN_REG_SSLNK);
  757. val |= readl(chanbase + D40_CHAN_REG_SDLNK);
  758. return val;
  759. }
  760. static u32 d40_get_prmo(struct d40_chan *d40c)
  761. {
  762. static const unsigned int phy_map[] = {
  763. [STEDMA40_PCHAN_BASIC_MODE]
  764. = D40_DREG_PRMO_PCHAN_BASIC,
  765. [STEDMA40_PCHAN_MODULO_MODE]
  766. = D40_DREG_PRMO_PCHAN_MODULO,
  767. [STEDMA40_PCHAN_DOUBLE_DST_MODE]
  768. = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
  769. };
  770. static const unsigned int log_map[] = {
  771. [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
  772. = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
  773. [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
  774. = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
  775. [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
  776. = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
  777. };
  778. if (chan_is_physical(d40c))
  779. return phy_map[d40c->dma_cfg.mode_opt];
  780. else
  781. return log_map[d40c->dma_cfg.mode_opt];
  782. }
  783. static void d40_config_write(struct d40_chan *d40c)
  784. {
  785. u32 addr_base;
  786. u32 var;
  787. /* Odd addresses are even addresses + 4 */
  788. addr_base = (d40c->phy_chan->num % 2) * 4;
  789. /* Setup channel mode to logical or physical */
  790. var = ((u32)(chan_is_logical(d40c)) + 1) <<
  791. D40_CHAN_POS(d40c->phy_chan->num);
  792. writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
  793. /* Setup operational mode option register */
  794. var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
  795. writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
  796. if (chan_is_logical(d40c)) {
  797. int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
  798. & D40_SREG_ELEM_LOG_LIDX_MASK;
  799. void __iomem *chanbase = chan_base(d40c);
  800. /* Set default config for CFG reg */
  801. writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
  802. writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
  803. /* Set LIDX for lcla */
  804. writel(lidx, chanbase + D40_CHAN_REG_SSELT);
  805. writel(lidx, chanbase + D40_CHAN_REG_SDELT);
  806. }
  807. }
  808. static u32 d40_residue(struct d40_chan *d40c)
  809. {
  810. u32 num_elt;
  811. if (chan_is_logical(d40c))
  812. num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
  813. >> D40_MEM_LCSP2_ECNT_POS;
  814. else {
  815. u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
  816. num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
  817. >> D40_SREG_ELEM_PHY_ECNT_POS;
  818. }
  819. return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
  820. }
  821. static bool d40_tx_is_linked(struct d40_chan *d40c)
  822. {
  823. bool is_link;
  824. if (chan_is_logical(d40c))
  825. is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
  826. else
  827. is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
  828. & D40_SREG_LNK_PHYS_LNK_MASK;
  829. return is_link;
  830. }
  831. static int d40_pause(struct d40_chan *d40c)
  832. {
  833. int res = 0;
  834. unsigned long flags;
  835. if (!d40c->busy)
  836. return 0;
  837. spin_lock_irqsave(&d40c->lock, flags);
  838. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  839. if (res == 0) {
  840. if (chan_is_logical(d40c)) {
  841. d40_config_set_event(d40c, false);
  842. /* Resume the other logical channels if any */
  843. if (d40_chan_has_events(d40c))
  844. res = d40_channel_execute_command(d40c,
  845. D40_DMA_RUN);
  846. }
  847. }
  848. spin_unlock_irqrestore(&d40c->lock, flags);
  849. return res;
  850. }
  851. static int d40_resume(struct d40_chan *d40c)
  852. {
  853. int res = 0;
  854. unsigned long flags;
  855. if (!d40c->busy)
  856. return 0;
  857. spin_lock_irqsave(&d40c->lock, flags);
  858. if (d40c->base->rev == 0)
  859. if (chan_is_logical(d40c)) {
  860. res = d40_channel_execute_command(d40c,
  861. D40_DMA_SUSPEND_REQ);
  862. goto no_suspend;
  863. }
  864. /* If bytes left to transfer or linked tx resume job */
  865. if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
  866. if (chan_is_logical(d40c))
  867. d40_config_set_event(d40c, true);
  868. res = d40_channel_execute_command(d40c, D40_DMA_RUN);
  869. }
  870. no_suspend:
  871. spin_unlock_irqrestore(&d40c->lock, flags);
  872. return res;
  873. }
  874. static int d40_terminate_all(struct d40_chan *chan)
  875. {
  876. unsigned long flags;
  877. int ret = 0;
  878. ret = d40_pause(chan);
  879. if (!ret && chan_is_physical(chan))
  880. ret = d40_channel_execute_command(chan, D40_DMA_STOP);
  881. spin_lock_irqsave(&chan->lock, flags);
  882. d40_term_all(chan);
  883. spin_unlock_irqrestore(&chan->lock, flags);
  884. return ret;
  885. }
  886. static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
  887. {
  888. struct d40_chan *d40c = container_of(tx->chan,
  889. struct d40_chan,
  890. chan);
  891. struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
  892. unsigned long flags;
  893. spin_lock_irqsave(&d40c->lock, flags);
  894. d40c->chan.cookie++;
  895. if (d40c->chan.cookie < 0)
  896. d40c->chan.cookie = 1;
  897. d40d->txd.cookie = d40c->chan.cookie;
  898. d40_desc_queue(d40c, d40d);
  899. spin_unlock_irqrestore(&d40c->lock, flags);
  900. return tx->cookie;
  901. }
  902. static int d40_start(struct d40_chan *d40c)
  903. {
  904. if (d40c->base->rev == 0) {
  905. int err;
  906. if (chan_is_logical(d40c)) {
  907. err = d40_channel_execute_command(d40c,
  908. D40_DMA_SUSPEND_REQ);
  909. if (err)
  910. return err;
  911. }
  912. }
  913. if (chan_is_logical(d40c))
  914. d40_config_set_event(d40c, true);
  915. return d40_channel_execute_command(d40c, D40_DMA_RUN);
  916. }
  917. static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
  918. {
  919. struct d40_desc *d40d;
  920. int err;
  921. /* Start queued jobs, if any */
  922. d40d = d40_first_queued(d40c);
  923. if (d40d != NULL) {
  924. d40c->busy = true;
  925. /* Remove from queue */
  926. d40_desc_remove(d40d);
  927. /* Add to active queue */
  928. d40_desc_submit(d40c, d40d);
  929. /* Initiate DMA job */
  930. d40_desc_load(d40c, d40d);
  931. /* Start dma job */
  932. err = d40_start(d40c);
  933. if (err)
  934. return NULL;
  935. }
  936. return d40d;
  937. }
  938. /* called from interrupt context */
  939. static void dma_tc_handle(struct d40_chan *d40c)
  940. {
  941. struct d40_desc *d40d;
  942. /* Get first active entry from list */
  943. d40d = d40_first_active_get(d40c);
  944. if (d40d == NULL)
  945. return;
  946. if (d40d->cyclic) {
  947. /*
  948. * If this was a paritially loaded list, we need to reloaded
  949. * it, and only when the list is completed. We need to check
  950. * for done because the interrupt will hit for every link, and
  951. * not just the last one.
  952. */
  953. if (d40d->lli_current < d40d->lli_len
  954. && !d40_tx_is_linked(d40c)
  955. && !d40_residue(d40c)) {
  956. d40_lcla_free_all(d40c, d40d);
  957. d40_desc_load(d40c, d40d);
  958. (void) d40_start(d40c);
  959. if (d40d->lli_current == d40d->lli_len)
  960. d40d->lli_current = 0;
  961. }
  962. } else {
  963. d40_lcla_free_all(d40c, d40d);
  964. if (d40d->lli_current < d40d->lli_len) {
  965. d40_desc_load(d40c, d40d);
  966. /* Start dma job */
  967. (void) d40_start(d40c);
  968. return;
  969. }
  970. if (d40_queue_start(d40c) == NULL)
  971. d40c->busy = false;
  972. }
  973. d40c->pending_tx++;
  974. tasklet_schedule(&d40c->tasklet);
  975. }
  976. static void dma_tasklet(unsigned long data)
  977. {
  978. struct d40_chan *d40c = (struct d40_chan *) data;
  979. struct d40_desc *d40d;
  980. unsigned long flags;
  981. dma_async_tx_callback callback;
  982. void *callback_param;
  983. spin_lock_irqsave(&d40c->lock, flags);
  984. /* Get first active entry from list */
  985. d40d = d40_first_active_get(d40c);
  986. if (d40d == NULL)
  987. goto err;
  988. if (!d40d->cyclic)
  989. d40c->completed = d40d->txd.cookie;
  990. /*
  991. * If terminating a channel pending_tx is set to zero.
  992. * This prevents any finished active jobs to return to the client.
  993. */
  994. if (d40c->pending_tx == 0) {
  995. spin_unlock_irqrestore(&d40c->lock, flags);
  996. return;
  997. }
  998. /* Callback to client */
  999. callback = d40d->txd.callback;
  1000. callback_param = d40d->txd.callback_param;
  1001. if (!d40d->cyclic) {
  1002. if (async_tx_test_ack(&d40d->txd)) {
  1003. d40_pool_lli_free(d40c, d40d);
  1004. d40_desc_remove(d40d);
  1005. d40_desc_free(d40c, d40d);
  1006. } else {
  1007. if (!d40d->is_in_client_list) {
  1008. d40_desc_remove(d40d);
  1009. d40_lcla_free_all(d40c, d40d);
  1010. list_add_tail(&d40d->node, &d40c->client);
  1011. d40d->is_in_client_list = true;
  1012. }
  1013. }
  1014. }
  1015. d40c->pending_tx--;
  1016. if (d40c->pending_tx)
  1017. tasklet_schedule(&d40c->tasklet);
  1018. spin_unlock_irqrestore(&d40c->lock, flags);
  1019. if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
  1020. callback(callback_param);
  1021. return;
  1022. err:
  1023. /* Rescue manoeuvre if receiving double interrupts */
  1024. if (d40c->pending_tx > 0)
  1025. d40c->pending_tx--;
  1026. spin_unlock_irqrestore(&d40c->lock, flags);
  1027. }
  1028. static irqreturn_t d40_handle_interrupt(int irq, void *data)
  1029. {
  1030. static const struct d40_interrupt_lookup il[] = {
  1031. {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
  1032. {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
  1033. {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
  1034. {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
  1035. {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
  1036. {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
  1037. {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
  1038. {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
  1039. {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
  1040. {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
  1041. };
  1042. int i;
  1043. u32 regs[ARRAY_SIZE(il)];
  1044. u32 idx;
  1045. u32 row;
  1046. long chan = -1;
  1047. struct d40_chan *d40c;
  1048. unsigned long flags;
  1049. struct d40_base *base = data;
  1050. spin_lock_irqsave(&base->interrupt_lock, flags);
  1051. /* Read interrupt status of both logical and physical channels */
  1052. for (i = 0; i < ARRAY_SIZE(il); i++)
  1053. regs[i] = readl(base->virtbase + il[i].src);
  1054. for (;;) {
  1055. chan = find_next_bit((unsigned long *)regs,
  1056. BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
  1057. /* No more set bits found? */
  1058. if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
  1059. break;
  1060. row = chan / BITS_PER_LONG;
  1061. idx = chan & (BITS_PER_LONG - 1);
  1062. /* ACK interrupt */
  1063. writel(1 << idx, base->virtbase + il[row].clr);
  1064. if (il[row].offset == D40_PHY_CHAN)
  1065. d40c = base->lookup_phy_chans[idx];
  1066. else
  1067. d40c = base->lookup_log_chans[il[row].offset + idx];
  1068. spin_lock(&d40c->lock);
  1069. if (!il[row].is_error)
  1070. dma_tc_handle(d40c);
  1071. else
  1072. d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
  1073. chan, il[row].offset, idx);
  1074. spin_unlock(&d40c->lock);
  1075. }
  1076. spin_unlock_irqrestore(&base->interrupt_lock, flags);
  1077. return IRQ_HANDLED;
  1078. }
  1079. static int d40_validate_conf(struct d40_chan *d40c,
  1080. struct stedma40_chan_cfg *conf)
  1081. {
  1082. int res = 0;
  1083. u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
  1084. u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
  1085. bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
  1086. if (!conf->dir) {
  1087. chan_err(d40c, "Invalid direction.\n");
  1088. res = -EINVAL;
  1089. }
  1090. if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
  1091. d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
  1092. d40c->runtime_addr == 0) {
  1093. chan_err(d40c, "Invalid TX channel address (%d)\n",
  1094. conf->dst_dev_type);
  1095. res = -EINVAL;
  1096. }
  1097. if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
  1098. d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
  1099. d40c->runtime_addr == 0) {
  1100. chan_err(d40c, "Invalid RX channel address (%d)\n",
  1101. conf->src_dev_type);
  1102. res = -EINVAL;
  1103. }
  1104. if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
  1105. dst_event_group == STEDMA40_DEV_DST_MEMORY) {
  1106. chan_err(d40c, "Invalid dst\n");
  1107. res = -EINVAL;
  1108. }
  1109. if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
  1110. src_event_group == STEDMA40_DEV_SRC_MEMORY) {
  1111. chan_err(d40c, "Invalid src\n");
  1112. res = -EINVAL;
  1113. }
  1114. if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
  1115. dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
  1116. chan_err(d40c, "No event line\n");
  1117. res = -EINVAL;
  1118. }
  1119. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
  1120. (src_event_group != dst_event_group)) {
  1121. chan_err(d40c, "Invalid event group\n");
  1122. res = -EINVAL;
  1123. }
  1124. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
  1125. /*
  1126. * DMAC HW supports it. Will be added to this driver,
  1127. * in case any dma client requires it.
  1128. */
  1129. chan_err(d40c, "periph to periph not supported\n");
  1130. res = -EINVAL;
  1131. }
  1132. if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
  1133. (1 << conf->src_info.data_width) !=
  1134. d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
  1135. (1 << conf->dst_info.data_width)) {
  1136. /*
  1137. * The DMAC hardware only supports
  1138. * src (burst x width) == dst (burst x width)
  1139. */
  1140. chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
  1141. res = -EINVAL;
  1142. }
  1143. return res;
  1144. }
  1145. static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
  1146. int log_event_line, bool is_log)
  1147. {
  1148. unsigned long flags;
  1149. spin_lock_irqsave(&phy->lock, flags);
  1150. if (!is_log) {
  1151. /* Physical interrupts are masked per physical full channel */
  1152. if (phy->allocated_src == D40_ALLOC_FREE &&
  1153. phy->allocated_dst == D40_ALLOC_FREE) {
  1154. phy->allocated_dst = D40_ALLOC_PHY;
  1155. phy->allocated_src = D40_ALLOC_PHY;
  1156. goto found;
  1157. } else
  1158. goto not_found;
  1159. }
  1160. /* Logical channel */
  1161. if (is_src) {
  1162. if (phy->allocated_src == D40_ALLOC_PHY)
  1163. goto not_found;
  1164. if (phy->allocated_src == D40_ALLOC_FREE)
  1165. phy->allocated_src = D40_ALLOC_LOG_FREE;
  1166. if (!(phy->allocated_src & (1 << log_event_line))) {
  1167. phy->allocated_src |= 1 << log_event_line;
  1168. goto found;
  1169. } else
  1170. goto not_found;
  1171. } else {
  1172. if (phy->allocated_dst == D40_ALLOC_PHY)
  1173. goto not_found;
  1174. if (phy->allocated_dst == D40_ALLOC_FREE)
  1175. phy->allocated_dst = D40_ALLOC_LOG_FREE;
  1176. if (!(phy->allocated_dst & (1 << log_event_line))) {
  1177. phy->allocated_dst |= 1 << log_event_line;
  1178. goto found;
  1179. } else
  1180. goto not_found;
  1181. }
  1182. not_found:
  1183. spin_unlock_irqrestore(&phy->lock, flags);
  1184. return false;
  1185. found:
  1186. spin_unlock_irqrestore(&phy->lock, flags);
  1187. return true;
  1188. }
  1189. static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
  1190. int log_event_line)
  1191. {
  1192. unsigned long flags;
  1193. bool is_free = false;
  1194. spin_lock_irqsave(&phy->lock, flags);
  1195. if (!log_event_line) {
  1196. phy->allocated_dst = D40_ALLOC_FREE;
  1197. phy->allocated_src = D40_ALLOC_FREE;
  1198. is_free = true;
  1199. goto out;
  1200. }
  1201. /* Logical channel */
  1202. if (is_src) {
  1203. phy->allocated_src &= ~(1 << log_event_line);
  1204. if (phy->allocated_src == D40_ALLOC_LOG_FREE)
  1205. phy->allocated_src = D40_ALLOC_FREE;
  1206. } else {
  1207. phy->allocated_dst &= ~(1 << log_event_line);
  1208. if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
  1209. phy->allocated_dst = D40_ALLOC_FREE;
  1210. }
  1211. is_free = ((phy->allocated_src | phy->allocated_dst) ==
  1212. D40_ALLOC_FREE);
  1213. out:
  1214. spin_unlock_irqrestore(&phy->lock, flags);
  1215. return is_free;
  1216. }
  1217. static int d40_allocate_channel(struct d40_chan *d40c)
  1218. {
  1219. int dev_type;
  1220. int event_group;
  1221. int event_line;
  1222. struct d40_phy_res *phys;
  1223. int i;
  1224. int j;
  1225. int log_num;
  1226. bool is_src;
  1227. bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
  1228. phys = d40c->base->phy_res;
  1229. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1230. dev_type = d40c->dma_cfg.src_dev_type;
  1231. log_num = 2 * dev_type;
  1232. is_src = true;
  1233. } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1234. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1235. /* dst event lines are used for logical memcpy */
  1236. dev_type = d40c->dma_cfg.dst_dev_type;
  1237. log_num = 2 * dev_type + 1;
  1238. is_src = false;
  1239. } else
  1240. return -EINVAL;
  1241. event_group = D40_TYPE_TO_GROUP(dev_type);
  1242. event_line = D40_TYPE_TO_EVENT(dev_type);
  1243. if (!is_log) {
  1244. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1245. /* Find physical half channel */
  1246. for (i = 0; i < d40c->base->num_phy_chans; i++) {
  1247. if (d40_alloc_mask_set(&phys[i], is_src,
  1248. 0, is_log))
  1249. goto found_phy;
  1250. }
  1251. } else
  1252. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  1253. int phy_num = j + event_group * 2;
  1254. for (i = phy_num; i < phy_num + 2; i++) {
  1255. if (d40_alloc_mask_set(&phys[i],
  1256. is_src,
  1257. 0,
  1258. is_log))
  1259. goto found_phy;
  1260. }
  1261. }
  1262. return -EINVAL;
  1263. found_phy:
  1264. d40c->phy_chan = &phys[i];
  1265. d40c->log_num = D40_PHY_CHAN;
  1266. goto out;
  1267. }
  1268. if (dev_type == -1)
  1269. return -EINVAL;
  1270. /* Find logical channel */
  1271. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  1272. int phy_num = j + event_group * 2;
  1273. /*
  1274. * Spread logical channels across all available physical rather
  1275. * than pack every logical channel at the first available phy
  1276. * channels.
  1277. */
  1278. if (is_src) {
  1279. for (i = phy_num; i < phy_num + 2; i++) {
  1280. if (d40_alloc_mask_set(&phys[i], is_src,
  1281. event_line, is_log))
  1282. goto found_log;
  1283. }
  1284. } else {
  1285. for (i = phy_num + 1; i >= phy_num; i--) {
  1286. if (d40_alloc_mask_set(&phys[i], is_src,
  1287. event_line, is_log))
  1288. goto found_log;
  1289. }
  1290. }
  1291. }
  1292. return -EINVAL;
  1293. found_log:
  1294. d40c->phy_chan = &phys[i];
  1295. d40c->log_num = log_num;
  1296. out:
  1297. if (is_log)
  1298. d40c->base->lookup_log_chans[d40c->log_num] = d40c;
  1299. else
  1300. d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
  1301. return 0;
  1302. }
  1303. static int d40_config_memcpy(struct d40_chan *d40c)
  1304. {
  1305. dma_cap_mask_t cap = d40c->chan.device->cap_mask;
  1306. if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
  1307. d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
  1308. d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
  1309. d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
  1310. memcpy[d40c->chan.chan_id];
  1311. } else if (dma_has_cap(DMA_MEMCPY, cap) &&
  1312. dma_has_cap(DMA_SLAVE, cap)) {
  1313. d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
  1314. } else {
  1315. chan_err(d40c, "No memcpy\n");
  1316. return -EINVAL;
  1317. }
  1318. return 0;
  1319. }
  1320. static int d40_free_dma(struct d40_chan *d40c)
  1321. {
  1322. int res = 0;
  1323. u32 event;
  1324. struct d40_phy_res *phy = d40c->phy_chan;
  1325. bool is_src;
  1326. struct d40_desc *d;
  1327. struct d40_desc *_d;
  1328. /* Terminate all queued and active transfers */
  1329. d40_term_all(d40c);
  1330. /* Release client owned descriptors */
  1331. if (!list_empty(&d40c->client))
  1332. list_for_each_entry_safe(d, _d, &d40c->client, node) {
  1333. d40_pool_lli_free(d40c, d);
  1334. d40_desc_remove(d);
  1335. d40_desc_free(d40c, d);
  1336. }
  1337. if (phy == NULL) {
  1338. chan_err(d40c, "phy == null\n");
  1339. return -EINVAL;
  1340. }
  1341. if (phy->allocated_src == D40_ALLOC_FREE &&
  1342. phy->allocated_dst == D40_ALLOC_FREE) {
  1343. chan_err(d40c, "channel already free\n");
  1344. return -EINVAL;
  1345. }
  1346. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1347. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1348. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  1349. is_src = false;
  1350. } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1351. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  1352. is_src = true;
  1353. } else {
  1354. chan_err(d40c, "Unknown direction\n");
  1355. return -EINVAL;
  1356. }
  1357. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1358. if (res) {
  1359. chan_err(d40c, "suspend failed\n");
  1360. return res;
  1361. }
  1362. if (chan_is_logical(d40c)) {
  1363. /* Release logical channel, deactivate the event line */
  1364. d40_config_set_event(d40c, false);
  1365. d40c->base->lookup_log_chans[d40c->log_num] = NULL;
  1366. /*
  1367. * Check if there are more logical allocation
  1368. * on this phy channel.
  1369. */
  1370. if (!d40_alloc_mask_free(phy, is_src, event)) {
  1371. /* Resume the other logical channels if any */
  1372. if (d40_chan_has_events(d40c)) {
  1373. res = d40_channel_execute_command(d40c,
  1374. D40_DMA_RUN);
  1375. if (res) {
  1376. chan_err(d40c,
  1377. "Executing RUN command\n");
  1378. return res;
  1379. }
  1380. }
  1381. return 0;
  1382. }
  1383. } else {
  1384. (void) d40_alloc_mask_free(phy, is_src, 0);
  1385. }
  1386. /* Release physical channel */
  1387. res = d40_channel_execute_command(d40c, D40_DMA_STOP);
  1388. if (res) {
  1389. chan_err(d40c, "Failed to stop channel\n");
  1390. return res;
  1391. }
  1392. d40c->phy_chan = NULL;
  1393. d40c->configured = false;
  1394. d40c->base->lookup_phy_chans[phy->num] = NULL;
  1395. return 0;
  1396. }
  1397. static bool d40_is_paused(struct d40_chan *d40c)
  1398. {
  1399. void __iomem *chanbase = chan_base(d40c);
  1400. bool is_paused = false;
  1401. unsigned long flags;
  1402. void __iomem *active_reg;
  1403. u32 status;
  1404. u32 event;
  1405. spin_lock_irqsave(&d40c->lock, flags);
  1406. if (chan_is_physical(d40c)) {
  1407. if (d40c->phy_chan->num % 2 == 0)
  1408. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  1409. else
  1410. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  1411. status = (readl(active_reg) &
  1412. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  1413. D40_CHAN_POS(d40c->phy_chan->num);
  1414. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  1415. is_paused = true;
  1416. goto _exit;
  1417. }
  1418. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1419. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1420. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  1421. status = readl(chanbase + D40_CHAN_REG_SDLNK);
  1422. } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1423. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  1424. status = readl(chanbase + D40_CHAN_REG_SSLNK);
  1425. } else {
  1426. chan_err(d40c, "Unknown direction\n");
  1427. goto _exit;
  1428. }
  1429. status = (status & D40_EVENTLINE_MASK(event)) >>
  1430. D40_EVENTLINE_POS(event);
  1431. if (status != D40_DMA_RUN)
  1432. is_paused = true;
  1433. _exit:
  1434. spin_unlock_irqrestore(&d40c->lock, flags);
  1435. return is_paused;
  1436. }
  1437. static u32 stedma40_residue(struct dma_chan *chan)
  1438. {
  1439. struct d40_chan *d40c =
  1440. container_of(chan, struct d40_chan, chan);
  1441. u32 bytes_left;
  1442. unsigned long flags;
  1443. spin_lock_irqsave(&d40c->lock, flags);
  1444. bytes_left = d40_residue(d40c);
  1445. spin_unlock_irqrestore(&d40c->lock, flags);
  1446. return bytes_left;
  1447. }
  1448. static int
  1449. d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
  1450. struct scatterlist *sg_src, struct scatterlist *sg_dst,
  1451. unsigned int sg_len, dma_addr_t src_dev_addr,
  1452. dma_addr_t dst_dev_addr)
  1453. {
  1454. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1455. struct stedma40_half_channel_info *src_info = &cfg->src_info;
  1456. struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
  1457. int ret;
  1458. ret = d40_log_sg_to_lli(sg_src, sg_len,
  1459. src_dev_addr,
  1460. desc->lli_log.src,
  1461. chan->log_def.lcsp1,
  1462. src_info->data_width,
  1463. dst_info->data_width);
  1464. ret = d40_log_sg_to_lli(sg_dst, sg_len,
  1465. dst_dev_addr,
  1466. desc->lli_log.dst,
  1467. chan->log_def.lcsp3,
  1468. dst_info->data_width,
  1469. src_info->data_width);
  1470. return ret < 0 ? ret : 0;
  1471. }
  1472. static int
  1473. d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
  1474. struct scatterlist *sg_src, struct scatterlist *sg_dst,
  1475. unsigned int sg_len, dma_addr_t src_dev_addr,
  1476. dma_addr_t dst_dev_addr)
  1477. {
  1478. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1479. struct stedma40_half_channel_info *src_info = &cfg->src_info;
  1480. struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
  1481. unsigned long flags = 0;
  1482. int ret;
  1483. if (desc->cyclic)
  1484. flags |= LLI_CYCLIC | LLI_TERM_INT;
  1485. ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
  1486. desc->lli_phy.src,
  1487. virt_to_phys(desc->lli_phy.src),
  1488. chan->src_def_cfg,
  1489. src_info, dst_info, flags);
  1490. ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
  1491. desc->lli_phy.dst,
  1492. virt_to_phys(desc->lli_phy.dst),
  1493. chan->dst_def_cfg,
  1494. dst_info, src_info, flags);
  1495. dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
  1496. desc->lli_pool.size, DMA_TO_DEVICE);
  1497. return ret < 0 ? ret : 0;
  1498. }
  1499. static struct d40_desc *
  1500. d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
  1501. unsigned int sg_len, unsigned long dma_flags)
  1502. {
  1503. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1504. struct d40_desc *desc;
  1505. int ret;
  1506. desc = d40_desc_get(chan);
  1507. if (!desc)
  1508. return NULL;
  1509. desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
  1510. cfg->dst_info.data_width);
  1511. if (desc->lli_len < 0) {
  1512. chan_err(chan, "Unaligned size\n");
  1513. goto err;
  1514. }
  1515. ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
  1516. if (ret < 0) {
  1517. chan_err(chan, "Could not allocate lli\n");
  1518. goto err;
  1519. }
  1520. desc->lli_current = 0;
  1521. desc->txd.flags = dma_flags;
  1522. desc->txd.tx_submit = d40_tx_submit;
  1523. dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
  1524. return desc;
  1525. err:
  1526. d40_desc_free(chan, desc);
  1527. return NULL;
  1528. }
  1529. static dma_addr_t
  1530. d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
  1531. {
  1532. struct stedma40_platform_data *plat = chan->base->plat_data;
  1533. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1534. dma_addr_t addr = 0;
  1535. if (chan->runtime_addr)
  1536. return chan->runtime_addr;
  1537. if (direction == DMA_FROM_DEVICE)
  1538. addr = plat->dev_rx[cfg->src_dev_type];
  1539. else if (direction == DMA_TO_DEVICE)
  1540. addr = plat->dev_tx[cfg->dst_dev_type];
  1541. return addr;
  1542. }
  1543. static struct dma_async_tx_descriptor *
  1544. d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
  1545. struct scatterlist *sg_dst, unsigned int sg_len,
  1546. enum dma_data_direction direction, unsigned long dma_flags)
  1547. {
  1548. struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
  1549. dma_addr_t src_dev_addr = 0;
  1550. dma_addr_t dst_dev_addr = 0;
  1551. struct d40_desc *desc;
  1552. unsigned long flags;
  1553. int ret;
  1554. if (!chan->phy_chan) {
  1555. chan_err(chan, "Cannot prepare unallocated channel\n");
  1556. return NULL;
  1557. }
  1558. spin_lock_irqsave(&chan->lock, flags);
  1559. desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
  1560. if (desc == NULL)
  1561. goto err;
  1562. if (sg_next(&sg_src[sg_len - 1]) == sg_src)
  1563. desc->cyclic = true;
  1564. if (direction != DMA_NONE) {
  1565. dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
  1566. if (direction == DMA_FROM_DEVICE)
  1567. src_dev_addr = dev_addr;
  1568. else if (direction == DMA_TO_DEVICE)
  1569. dst_dev_addr = dev_addr;
  1570. }
  1571. if (chan_is_logical(chan))
  1572. ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
  1573. sg_len, src_dev_addr, dst_dev_addr);
  1574. else
  1575. ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
  1576. sg_len, src_dev_addr, dst_dev_addr);
  1577. if (ret) {
  1578. chan_err(chan, "Failed to prepare %s sg job: %d\n",
  1579. chan_is_logical(chan) ? "log" : "phy", ret);
  1580. goto err;
  1581. }
  1582. spin_unlock_irqrestore(&chan->lock, flags);
  1583. return &desc->txd;
  1584. err:
  1585. if (desc)
  1586. d40_desc_free(chan, desc);
  1587. spin_unlock_irqrestore(&chan->lock, flags);
  1588. return NULL;
  1589. }
  1590. bool stedma40_filter(struct dma_chan *chan, void *data)
  1591. {
  1592. struct stedma40_chan_cfg *info = data;
  1593. struct d40_chan *d40c =
  1594. container_of(chan, struct d40_chan, chan);
  1595. int err;
  1596. if (data) {
  1597. err = d40_validate_conf(d40c, info);
  1598. if (!err)
  1599. d40c->dma_cfg = *info;
  1600. } else
  1601. err = d40_config_memcpy(d40c);
  1602. if (!err)
  1603. d40c->configured = true;
  1604. return err == 0;
  1605. }
  1606. EXPORT_SYMBOL(stedma40_filter);
  1607. static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
  1608. {
  1609. bool realtime = d40c->dma_cfg.realtime;
  1610. bool highprio = d40c->dma_cfg.high_priority;
  1611. u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
  1612. u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
  1613. u32 event = D40_TYPE_TO_EVENT(dev_type);
  1614. u32 group = D40_TYPE_TO_GROUP(dev_type);
  1615. u32 bit = 1 << event;
  1616. /* Destination event lines are stored in the upper halfword */
  1617. if (!src)
  1618. bit <<= 16;
  1619. writel(bit, d40c->base->virtbase + prioreg + group * 4);
  1620. writel(bit, d40c->base->virtbase + rtreg + group * 4);
  1621. }
  1622. static void d40_set_prio_realtime(struct d40_chan *d40c)
  1623. {
  1624. if (d40c->base->rev < 3)
  1625. return;
  1626. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  1627. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  1628. __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
  1629. if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
  1630. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  1631. __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
  1632. }
  1633. /* DMA ENGINE functions */
  1634. static int d40_alloc_chan_resources(struct dma_chan *chan)
  1635. {
  1636. int err;
  1637. unsigned long flags;
  1638. struct d40_chan *d40c =
  1639. container_of(chan, struct d40_chan, chan);
  1640. bool is_free_phy;
  1641. spin_lock_irqsave(&d40c->lock, flags);
  1642. d40c->completed = chan->cookie = 1;
  1643. /* If no dma configuration is set use default configuration (memcpy) */
  1644. if (!d40c->configured) {
  1645. err = d40_config_memcpy(d40c);
  1646. if (err) {
  1647. chan_err(d40c, "Failed to configure memcpy channel\n");
  1648. goto fail;
  1649. }
  1650. }
  1651. is_free_phy = (d40c->phy_chan == NULL);
  1652. err = d40_allocate_channel(d40c);
  1653. if (err) {
  1654. chan_err(d40c, "Failed to allocate channel\n");
  1655. goto fail;
  1656. }
  1657. /* Fill in basic CFG register values */
  1658. d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
  1659. &d40c->dst_def_cfg, chan_is_logical(d40c));
  1660. d40_set_prio_realtime(d40c);
  1661. if (chan_is_logical(d40c)) {
  1662. d40_log_cfg(&d40c->dma_cfg,
  1663. &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  1664. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
  1665. d40c->lcpa = d40c->base->lcpa_base +
  1666. d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
  1667. else
  1668. d40c->lcpa = d40c->base->lcpa_base +
  1669. d40c->dma_cfg.dst_dev_type *
  1670. D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
  1671. }
  1672. /*
  1673. * Only write channel configuration to the DMA if the physical
  1674. * resource is free. In case of multiple logical channels
  1675. * on the same physical resource, only the first write is necessary.
  1676. */
  1677. if (is_free_phy)
  1678. d40_config_write(d40c);
  1679. fail:
  1680. spin_unlock_irqrestore(&d40c->lock, flags);
  1681. return err;
  1682. }
  1683. static void d40_free_chan_resources(struct dma_chan *chan)
  1684. {
  1685. struct d40_chan *d40c =
  1686. container_of(chan, struct d40_chan, chan);
  1687. int err;
  1688. unsigned long flags;
  1689. if (d40c->phy_chan == NULL) {
  1690. chan_err(d40c, "Cannot free unallocated channel\n");
  1691. return;
  1692. }
  1693. spin_lock_irqsave(&d40c->lock, flags);
  1694. err = d40_free_dma(d40c);
  1695. if (err)
  1696. chan_err(d40c, "Failed to free channel\n");
  1697. spin_unlock_irqrestore(&d40c->lock, flags);
  1698. }
  1699. static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
  1700. dma_addr_t dst,
  1701. dma_addr_t src,
  1702. size_t size,
  1703. unsigned long dma_flags)
  1704. {
  1705. struct scatterlist dst_sg;
  1706. struct scatterlist src_sg;
  1707. sg_init_table(&dst_sg, 1);
  1708. sg_init_table(&src_sg, 1);
  1709. sg_dma_address(&dst_sg) = dst;
  1710. sg_dma_address(&src_sg) = src;
  1711. sg_dma_len(&dst_sg) = size;
  1712. sg_dma_len(&src_sg) = size;
  1713. return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
  1714. }
  1715. static struct dma_async_tx_descriptor *
  1716. d40_prep_memcpy_sg(struct dma_chan *chan,
  1717. struct scatterlist *dst_sg, unsigned int dst_nents,
  1718. struct scatterlist *src_sg, unsigned int src_nents,
  1719. unsigned long dma_flags)
  1720. {
  1721. if (dst_nents != src_nents)
  1722. return NULL;
  1723. return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
  1724. }
  1725. static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
  1726. struct scatterlist *sgl,
  1727. unsigned int sg_len,
  1728. enum dma_data_direction direction,
  1729. unsigned long dma_flags)
  1730. {
  1731. if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
  1732. return NULL;
  1733. return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
  1734. }
  1735. static struct dma_async_tx_descriptor *
  1736. dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
  1737. size_t buf_len, size_t period_len,
  1738. enum dma_data_direction direction)
  1739. {
  1740. unsigned int periods = buf_len / period_len;
  1741. struct dma_async_tx_descriptor *txd;
  1742. struct scatterlist *sg;
  1743. int i;
  1744. sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
  1745. for (i = 0; i < periods; i++) {
  1746. sg_dma_address(&sg[i]) = dma_addr;
  1747. sg_dma_len(&sg[i]) = period_len;
  1748. dma_addr += period_len;
  1749. }
  1750. sg[periods].offset = 0;
  1751. sg[periods].length = 0;
  1752. sg[periods].page_link =
  1753. ((unsigned long)sg | 0x01) & ~0x02;
  1754. txd = d40_prep_sg(chan, sg, sg, periods, direction,
  1755. DMA_PREP_INTERRUPT);
  1756. kfree(sg);
  1757. return txd;
  1758. }
  1759. static enum dma_status d40_tx_status(struct dma_chan *chan,
  1760. dma_cookie_t cookie,
  1761. struct dma_tx_state *txstate)
  1762. {
  1763. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1764. dma_cookie_t last_used;
  1765. dma_cookie_t last_complete;
  1766. int ret;
  1767. if (d40c->phy_chan == NULL) {
  1768. chan_err(d40c, "Cannot read status of unallocated channel\n");
  1769. return -EINVAL;
  1770. }
  1771. last_complete = d40c->completed;
  1772. last_used = chan->cookie;
  1773. if (d40_is_paused(d40c))
  1774. ret = DMA_PAUSED;
  1775. else
  1776. ret = dma_async_is_complete(cookie, last_complete, last_used);
  1777. dma_set_tx_state(txstate, last_complete, last_used,
  1778. stedma40_residue(chan));
  1779. return ret;
  1780. }
  1781. static void d40_issue_pending(struct dma_chan *chan)
  1782. {
  1783. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1784. unsigned long flags;
  1785. if (d40c->phy_chan == NULL) {
  1786. chan_err(d40c, "Channel is not allocated!\n");
  1787. return;
  1788. }
  1789. spin_lock_irqsave(&d40c->lock, flags);
  1790. list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
  1791. /* Busy means that queued jobs are already being processed */
  1792. if (!d40c->busy)
  1793. (void) d40_queue_start(d40c);
  1794. spin_unlock_irqrestore(&d40c->lock, flags);
  1795. }
  1796. /* Runtime reconfiguration extension */
  1797. static void d40_set_runtime_config(struct dma_chan *chan,
  1798. struct dma_slave_config *config)
  1799. {
  1800. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1801. struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
  1802. enum dma_slave_buswidth config_addr_width;
  1803. dma_addr_t config_addr;
  1804. u32 config_maxburst;
  1805. enum stedma40_periph_data_width addr_width;
  1806. int psize;
  1807. if (config->direction == DMA_FROM_DEVICE) {
  1808. dma_addr_t dev_addr_rx =
  1809. d40c->base->plat_data->dev_rx[cfg->src_dev_type];
  1810. config_addr = config->src_addr;
  1811. if (dev_addr_rx)
  1812. dev_dbg(d40c->base->dev,
  1813. "channel has a pre-wired RX address %08x "
  1814. "overriding with %08x\n",
  1815. dev_addr_rx, config_addr);
  1816. if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
  1817. dev_dbg(d40c->base->dev,
  1818. "channel was not configured for peripheral "
  1819. "to memory transfer (%d) overriding\n",
  1820. cfg->dir);
  1821. cfg->dir = STEDMA40_PERIPH_TO_MEM;
  1822. config_addr_width = config->src_addr_width;
  1823. config_maxburst = config->src_maxburst;
  1824. } else if (config->direction == DMA_TO_DEVICE) {
  1825. dma_addr_t dev_addr_tx =
  1826. d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
  1827. config_addr = config->dst_addr;
  1828. if (dev_addr_tx)
  1829. dev_dbg(d40c->base->dev,
  1830. "channel has a pre-wired TX address %08x "
  1831. "overriding with %08x\n",
  1832. dev_addr_tx, config_addr);
  1833. if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
  1834. dev_dbg(d40c->base->dev,
  1835. "channel was not configured for memory "
  1836. "to peripheral transfer (%d) overriding\n",
  1837. cfg->dir);
  1838. cfg->dir = STEDMA40_MEM_TO_PERIPH;
  1839. config_addr_width = config->dst_addr_width;
  1840. config_maxburst = config->dst_maxburst;
  1841. } else {
  1842. dev_err(d40c->base->dev,
  1843. "unrecognized channel direction %d\n",
  1844. config->direction);
  1845. return;
  1846. }
  1847. switch (config_addr_width) {
  1848. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  1849. addr_width = STEDMA40_BYTE_WIDTH;
  1850. break;
  1851. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  1852. addr_width = STEDMA40_HALFWORD_WIDTH;
  1853. break;
  1854. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  1855. addr_width = STEDMA40_WORD_WIDTH;
  1856. break;
  1857. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  1858. addr_width = STEDMA40_DOUBLEWORD_WIDTH;
  1859. break;
  1860. default:
  1861. dev_err(d40c->base->dev,
  1862. "illegal peripheral address width "
  1863. "requested (%d)\n",
  1864. config->src_addr_width);
  1865. return;
  1866. }
  1867. if (chan_is_logical(d40c)) {
  1868. if (config_maxburst >= 16)
  1869. psize = STEDMA40_PSIZE_LOG_16;
  1870. else if (config_maxburst >= 8)
  1871. psize = STEDMA40_PSIZE_LOG_8;
  1872. else if (config_maxburst >= 4)
  1873. psize = STEDMA40_PSIZE_LOG_4;
  1874. else
  1875. psize = STEDMA40_PSIZE_LOG_1;
  1876. } else {
  1877. if (config_maxburst >= 16)
  1878. psize = STEDMA40_PSIZE_PHY_16;
  1879. else if (config_maxburst >= 8)
  1880. psize = STEDMA40_PSIZE_PHY_8;
  1881. else if (config_maxburst >= 4)
  1882. psize = STEDMA40_PSIZE_PHY_4;
  1883. else if (config_maxburst >= 2)
  1884. psize = STEDMA40_PSIZE_PHY_2;
  1885. else
  1886. psize = STEDMA40_PSIZE_PHY_1;
  1887. }
  1888. /* Set up all the endpoint configs */
  1889. cfg->src_info.data_width = addr_width;
  1890. cfg->src_info.psize = psize;
  1891. cfg->src_info.big_endian = false;
  1892. cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
  1893. cfg->dst_info.data_width = addr_width;
  1894. cfg->dst_info.psize = psize;
  1895. cfg->dst_info.big_endian = false;
  1896. cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
  1897. /* Fill in register values */
  1898. if (chan_is_logical(d40c))
  1899. d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  1900. else
  1901. d40_phy_cfg(cfg, &d40c->src_def_cfg,
  1902. &d40c->dst_def_cfg, false);
  1903. /* These settings will take precedence later */
  1904. d40c->runtime_addr = config_addr;
  1905. d40c->runtime_direction = config->direction;
  1906. dev_dbg(d40c->base->dev,
  1907. "configured channel %s for %s, data width %d, "
  1908. "maxburst %d bytes, LE, no flow control\n",
  1909. dma_chan_name(chan),
  1910. (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
  1911. config_addr_width,
  1912. config_maxburst);
  1913. }
  1914. static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  1915. unsigned long arg)
  1916. {
  1917. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1918. if (d40c->phy_chan == NULL) {
  1919. chan_err(d40c, "Channel is not allocated!\n");
  1920. return -EINVAL;
  1921. }
  1922. switch (cmd) {
  1923. case DMA_TERMINATE_ALL:
  1924. return d40_terminate_all(d40c);
  1925. case DMA_PAUSE:
  1926. return d40_pause(d40c);
  1927. case DMA_RESUME:
  1928. return d40_resume(d40c);
  1929. case DMA_SLAVE_CONFIG:
  1930. d40_set_runtime_config(chan,
  1931. (struct dma_slave_config *) arg);
  1932. return 0;
  1933. default:
  1934. break;
  1935. }
  1936. /* Other commands are unimplemented */
  1937. return -ENXIO;
  1938. }
  1939. /* Initialization functions */
  1940. static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
  1941. struct d40_chan *chans, int offset,
  1942. int num_chans)
  1943. {
  1944. int i = 0;
  1945. struct d40_chan *d40c;
  1946. INIT_LIST_HEAD(&dma->channels);
  1947. for (i = offset; i < offset + num_chans; i++) {
  1948. d40c = &chans[i];
  1949. d40c->base = base;
  1950. d40c->chan.device = dma;
  1951. spin_lock_init(&d40c->lock);
  1952. d40c->log_num = D40_PHY_CHAN;
  1953. INIT_LIST_HEAD(&d40c->active);
  1954. INIT_LIST_HEAD(&d40c->queue);
  1955. INIT_LIST_HEAD(&d40c->pending_queue);
  1956. INIT_LIST_HEAD(&d40c->client);
  1957. tasklet_init(&d40c->tasklet, dma_tasklet,
  1958. (unsigned long) d40c);
  1959. list_add_tail(&d40c->chan.device_node,
  1960. &dma->channels);
  1961. }
  1962. }
  1963. static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
  1964. {
  1965. if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
  1966. dev->device_prep_slave_sg = d40_prep_slave_sg;
  1967. if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
  1968. dev->device_prep_dma_memcpy = d40_prep_memcpy;
  1969. /*
  1970. * This controller can only access address at even
  1971. * 32bit boundaries, i.e. 2^2
  1972. */
  1973. dev->copy_align = 2;
  1974. }
  1975. if (dma_has_cap(DMA_SG, dev->cap_mask))
  1976. dev->device_prep_dma_sg = d40_prep_memcpy_sg;
  1977. if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
  1978. dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
  1979. dev->device_alloc_chan_resources = d40_alloc_chan_resources;
  1980. dev->device_free_chan_resources = d40_free_chan_resources;
  1981. dev->device_issue_pending = d40_issue_pending;
  1982. dev->device_tx_status = d40_tx_status;
  1983. dev->device_control = d40_control;
  1984. dev->dev = base->dev;
  1985. }
  1986. static int __init d40_dmaengine_init(struct d40_base *base,
  1987. int num_reserved_chans)
  1988. {
  1989. int err ;
  1990. d40_chan_init(base, &base->dma_slave, base->log_chans,
  1991. 0, base->num_log_chans);
  1992. dma_cap_zero(base->dma_slave.cap_mask);
  1993. dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
  1994. dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
  1995. d40_ops_init(base, &base->dma_slave);
  1996. err = dma_async_device_register(&base->dma_slave);
  1997. if (err) {
  1998. d40_err(base->dev, "Failed to register slave channels\n");
  1999. goto failure1;
  2000. }
  2001. d40_chan_init(base, &base->dma_memcpy, base->log_chans,
  2002. base->num_log_chans, base->plat_data->memcpy_len);
  2003. dma_cap_zero(base->dma_memcpy.cap_mask);
  2004. dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
  2005. dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
  2006. d40_ops_init(base, &base->dma_memcpy);
  2007. err = dma_async_device_register(&base->dma_memcpy);
  2008. if (err) {
  2009. d40_err(base->dev,
  2010. "Failed to regsiter memcpy only channels\n");
  2011. goto failure2;
  2012. }
  2013. d40_chan_init(base, &base->dma_both, base->phy_chans,
  2014. 0, num_reserved_chans);
  2015. dma_cap_zero(base->dma_both.cap_mask);
  2016. dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
  2017. dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
  2018. dma_cap_set(DMA_SG, base->dma_both.cap_mask);
  2019. dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
  2020. d40_ops_init(base, &base->dma_both);
  2021. err = dma_async_device_register(&base->dma_both);
  2022. if (err) {
  2023. d40_err(base->dev,
  2024. "Failed to register logical and physical capable channels\n");
  2025. goto failure3;
  2026. }
  2027. return 0;
  2028. failure3:
  2029. dma_async_device_unregister(&base->dma_memcpy);
  2030. failure2:
  2031. dma_async_device_unregister(&base->dma_slave);
  2032. failure1:
  2033. return err;
  2034. }
  2035. /* Initialization functions. */
  2036. static int __init d40_phy_res_init(struct d40_base *base)
  2037. {
  2038. int i;
  2039. int num_phy_chans_avail = 0;
  2040. u32 val[2];
  2041. int odd_even_bit = -2;
  2042. val[0] = readl(base->virtbase + D40_DREG_PRSME);
  2043. val[1] = readl(base->virtbase + D40_DREG_PRSMO);
  2044. for (i = 0; i < base->num_phy_chans; i++) {
  2045. base->phy_res[i].num = i;
  2046. odd_even_bit += 2 * ((i % 2) == 0);
  2047. if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
  2048. /* Mark security only channels as occupied */
  2049. base->phy_res[i].allocated_src = D40_ALLOC_PHY;
  2050. base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
  2051. } else {
  2052. base->phy_res[i].allocated_src = D40_ALLOC_FREE;
  2053. base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
  2054. num_phy_chans_avail++;
  2055. }
  2056. spin_lock_init(&base->phy_res[i].lock);
  2057. }
  2058. /* Mark disabled channels as occupied */
  2059. for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
  2060. int chan = base->plat_data->disabled_channels[i];
  2061. base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
  2062. base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
  2063. num_phy_chans_avail--;
  2064. }
  2065. dev_info(base->dev, "%d of %d physical DMA channels available\n",
  2066. num_phy_chans_avail, base->num_phy_chans);
  2067. /* Verify settings extended vs standard */
  2068. val[0] = readl(base->virtbase + D40_DREG_PRTYP);
  2069. for (i = 0; i < base->num_phy_chans; i++) {
  2070. if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
  2071. (val[0] & 0x3) != 1)
  2072. dev_info(base->dev,
  2073. "[%s] INFO: channel %d is misconfigured (%d)\n",
  2074. __func__, i, val[0] & 0x3);
  2075. val[0] = val[0] >> 2;
  2076. }
  2077. return num_phy_chans_avail;
  2078. }
  2079. static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
  2080. {
  2081. static const struct d40_reg_val dma_id_regs[] = {
  2082. /* Peripheral Id */
  2083. { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
  2084. { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
  2085. /*
  2086. * D40_DREG_PERIPHID2 Depends on HW revision:
  2087. * DB8500ed has 0x0008,
  2088. * ? has 0x0018,
  2089. * DB8500v1 has 0x0028
  2090. * DB8500v2 has 0x0038
  2091. */
  2092. { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
  2093. /* PCell Id */
  2094. { .reg = D40_DREG_CELLID0, .val = 0x000d},
  2095. { .reg = D40_DREG_CELLID1, .val = 0x00f0},
  2096. { .reg = D40_DREG_CELLID2, .val = 0x0005},
  2097. { .reg = D40_DREG_CELLID3, .val = 0x00b1}
  2098. };
  2099. struct stedma40_platform_data *plat_data;
  2100. struct clk *clk = NULL;
  2101. void __iomem *virtbase = NULL;
  2102. struct resource *res = NULL;
  2103. struct d40_base *base = NULL;
  2104. int num_log_chans = 0;
  2105. int num_phy_chans;
  2106. int i;
  2107. u32 val;
  2108. u32 rev;
  2109. clk = clk_get(&pdev->dev, NULL);
  2110. if (IS_ERR(clk)) {
  2111. d40_err(&pdev->dev, "No matching clock found\n");
  2112. goto failure;
  2113. }
  2114. clk_enable(clk);
  2115. /* Get IO for DMAC base address */
  2116. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
  2117. if (!res)
  2118. goto failure;
  2119. if (request_mem_region(res->start, resource_size(res),
  2120. D40_NAME " I/O base") == NULL)
  2121. goto failure;
  2122. virtbase = ioremap(res->start, resource_size(res));
  2123. if (!virtbase)
  2124. goto failure;
  2125. /* HW version check */
  2126. for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
  2127. if (dma_id_regs[i].val !=
  2128. readl(virtbase + dma_id_regs[i].reg)) {
  2129. d40_err(&pdev->dev,
  2130. "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
  2131. dma_id_regs[i].val,
  2132. dma_id_regs[i].reg,
  2133. readl(virtbase + dma_id_regs[i].reg));
  2134. goto failure;
  2135. }
  2136. }
  2137. /* Get silicon revision and designer */
  2138. val = readl(virtbase + D40_DREG_PERIPHID2);
  2139. if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
  2140. D40_HW_DESIGNER) {
  2141. d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
  2142. val & D40_DREG_PERIPHID2_DESIGNER_MASK,
  2143. D40_HW_DESIGNER);
  2144. goto failure;
  2145. }
  2146. rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
  2147. D40_DREG_PERIPHID2_REV_POS;
  2148. /* The number of physical channels on this HW */
  2149. num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
  2150. dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
  2151. rev, res->start);
  2152. plat_data = pdev->dev.platform_data;
  2153. /* Count the number of logical channels in use */
  2154. for (i = 0; i < plat_data->dev_len; i++)
  2155. if (plat_data->dev_rx[i] != 0)
  2156. num_log_chans++;
  2157. for (i = 0; i < plat_data->dev_len; i++)
  2158. if (plat_data->dev_tx[i] != 0)
  2159. num_log_chans++;
  2160. base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
  2161. (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
  2162. sizeof(struct d40_chan), GFP_KERNEL);
  2163. if (base == NULL) {
  2164. d40_err(&pdev->dev, "Out of memory\n");
  2165. goto failure;
  2166. }
  2167. base->rev = rev;
  2168. base->clk = clk;
  2169. base->num_phy_chans = num_phy_chans;
  2170. base->num_log_chans = num_log_chans;
  2171. base->phy_start = res->start;
  2172. base->phy_size = resource_size(res);
  2173. base->virtbase = virtbase;
  2174. base->plat_data = plat_data;
  2175. base->dev = &pdev->dev;
  2176. base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
  2177. base->log_chans = &base->phy_chans[num_phy_chans];
  2178. base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
  2179. GFP_KERNEL);
  2180. if (!base->phy_res)
  2181. goto failure;
  2182. base->lookup_phy_chans = kzalloc(num_phy_chans *
  2183. sizeof(struct d40_chan *),
  2184. GFP_KERNEL);
  2185. if (!base->lookup_phy_chans)
  2186. goto failure;
  2187. if (num_log_chans + plat_data->memcpy_len) {
  2188. /*
  2189. * The max number of logical channels are event lines for all
  2190. * src devices and dst devices
  2191. */
  2192. base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
  2193. sizeof(struct d40_chan *),
  2194. GFP_KERNEL);
  2195. if (!base->lookup_log_chans)
  2196. goto failure;
  2197. }
  2198. base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
  2199. sizeof(struct d40_desc *) *
  2200. D40_LCLA_LINK_PER_EVENT_GRP,
  2201. GFP_KERNEL);
  2202. if (!base->lcla_pool.alloc_map)
  2203. goto failure;
  2204. base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
  2205. 0, SLAB_HWCACHE_ALIGN,
  2206. NULL);
  2207. if (base->desc_slab == NULL)
  2208. goto failure;
  2209. return base;
  2210. failure:
  2211. if (!IS_ERR(clk)) {
  2212. clk_disable(clk);
  2213. clk_put(clk);
  2214. }
  2215. if (virtbase)
  2216. iounmap(virtbase);
  2217. if (res)
  2218. release_mem_region(res->start,
  2219. resource_size(res));
  2220. if (virtbase)
  2221. iounmap(virtbase);
  2222. if (base) {
  2223. kfree(base->lcla_pool.alloc_map);
  2224. kfree(base->lookup_log_chans);
  2225. kfree(base->lookup_phy_chans);
  2226. kfree(base->phy_res);
  2227. kfree(base);
  2228. }
  2229. return NULL;
  2230. }
  2231. static void __init d40_hw_init(struct d40_base *base)
  2232. {
  2233. static const struct d40_reg_val dma_init_reg[] = {
  2234. /* Clock every part of the DMA block from start */
  2235. { .reg = D40_DREG_GCC, .val = 0x0000ff01},
  2236. /* Interrupts on all logical channels */
  2237. { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
  2238. { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
  2239. { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
  2240. { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
  2241. { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
  2242. { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
  2243. { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
  2244. { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
  2245. { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
  2246. { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
  2247. { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
  2248. { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
  2249. };
  2250. int i;
  2251. u32 prmseo[2] = {0, 0};
  2252. u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
  2253. u32 pcmis = 0;
  2254. u32 pcicr = 0;
  2255. for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
  2256. writel(dma_init_reg[i].val,
  2257. base->virtbase + dma_init_reg[i].reg);
  2258. /* Configure all our dma channels to default settings */
  2259. for (i = 0; i < base->num_phy_chans; i++) {
  2260. activeo[i % 2] = activeo[i % 2] << 2;
  2261. if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
  2262. == D40_ALLOC_PHY) {
  2263. activeo[i % 2] |= 3;
  2264. continue;
  2265. }
  2266. /* Enable interrupt # */
  2267. pcmis = (pcmis << 1) | 1;
  2268. /* Clear interrupt # */
  2269. pcicr = (pcicr << 1) | 1;
  2270. /* Set channel to physical mode */
  2271. prmseo[i % 2] = prmseo[i % 2] << 2;
  2272. prmseo[i % 2] |= 1;
  2273. }
  2274. writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
  2275. writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
  2276. writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
  2277. writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
  2278. /* Write which interrupt to enable */
  2279. writel(pcmis, base->virtbase + D40_DREG_PCMIS);
  2280. /* Write which interrupt to clear */
  2281. writel(pcicr, base->virtbase + D40_DREG_PCICR);
  2282. }
  2283. static int __init d40_lcla_allocate(struct d40_base *base)
  2284. {
  2285. struct d40_lcla_pool *pool = &base->lcla_pool;
  2286. unsigned long *page_list;
  2287. int i, j;
  2288. int ret = 0;
  2289. /*
  2290. * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
  2291. * To full fill this hardware requirement without wasting 256 kb
  2292. * we allocate pages until we get an aligned one.
  2293. */
  2294. page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
  2295. GFP_KERNEL);
  2296. if (!page_list) {
  2297. ret = -ENOMEM;
  2298. goto failure;
  2299. }
  2300. /* Calculating how many pages that are required */
  2301. base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
  2302. for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
  2303. page_list[i] = __get_free_pages(GFP_KERNEL,
  2304. base->lcla_pool.pages);
  2305. if (!page_list[i]) {
  2306. d40_err(base->dev, "Failed to allocate %d pages.\n",
  2307. base->lcla_pool.pages);
  2308. for (j = 0; j < i; j++)
  2309. free_pages(page_list[j], base->lcla_pool.pages);
  2310. goto failure;
  2311. }
  2312. if ((virt_to_phys((void *)page_list[i]) &
  2313. (LCLA_ALIGNMENT - 1)) == 0)
  2314. break;
  2315. }
  2316. for (j = 0; j < i; j++)
  2317. free_pages(page_list[j], base->lcla_pool.pages);
  2318. if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
  2319. base->lcla_pool.base = (void *)page_list[i];
  2320. } else {
  2321. /*
  2322. * After many attempts and no succees with finding the correct
  2323. * alignment, try with allocating a big buffer.
  2324. */
  2325. dev_warn(base->dev,
  2326. "[%s] Failed to get %d pages @ 18 bit align.\n",
  2327. __func__, base->lcla_pool.pages);
  2328. base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
  2329. base->num_phy_chans +
  2330. LCLA_ALIGNMENT,
  2331. GFP_KERNEL);
  2332. if (!base->lcla_pool.base_unaligned) {
  2333. ret = -ENOMEM;
  2334. goto failure;
  2335. }
  2336. base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
  2337. LCLA_ALIGNMENT);
  2338. }
  2339. pool->dma_addr = dma_map_single(base->dev, pool->base,
  2340. SZ_1K * base->num_phy_chans,
  2341. DMA_TO_DEVICE);
  2342. if (dma_mapping_error(base->dev, pool->dma_addr)) {
  2343. pool->dma_addr = 0;
  2344. ret = -ENOMEM;
  2345. goto failure;
  2346. }
  2347. writel(virt_to_phys(base->lcla_pool.base),
  2348. base->virtbase + D40_DREG_LCLA);
  2349. failure:
  2350. kfree(page_list);
  2351. return ret;
  2352. }
  2353. static int __init d40_probe(struct platform_device *pdev)
  2354. {
  2355. int err;
  2356. int ret = -ENOENT;
  2357. struct d40_base *base;
  2358. struct resource *res = NULL;
  2359. int num_reserved_chans;
  2360. u32 val;
  2361. base = d40_hw_detect_init(pdev);
  2362. if (!base)
  2363. goto failure;
  2364. num_reserved_chans = d40_phy_res_init(base);
  2365. platform_set_drvdata(pdev, base);
  2366. spin_lock_init(&base->interrupt_lock);
  2367. spin_lock_init(&base->execmd_lock);
  2368. /* Get IO for logical channel parameter address */
  2369. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
  2370. if (!res) {
  2371. ret = -ENOENT;
  2372. d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
  2373. goto failure;
  2374. }
  2375. base->lcpa_size = resource_size(res);
  2376. base->phy_lcpa = res->start;
  2377. if (request_mem_region(res->start, resource_size(res),
  2378. D40_NAME " I/O lcpa") == NULL) {
  2379. ret = -EBUSY;
  2380. d40_err(&pdev->dev,
  2381. "Failed to request LCPA region 0x%x-0x%x\n",
  2382. res->start, res->end);
  2383. goto failure;
  2384. }
  2385. /* We make use of ESRAM memory for this. */
  2386. val = readl(base->virtbase + D40_DREG_LCPA);
  2387. if (res->start != val && val != 0) {
  2388. dev_warn(&pdev->dev,
  2389. "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
  2390. __func__, val, res->start);
  2391. } else
  2392. writel(res->start, base->virtbase + D40_DREG_LCPA);
  2393. base->lcpa_base = ioremap(res->start, resource_size(res));
  2394. if (!base->lcpa_base) {
  2395. ret = -ENOMEM;
  2396. d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
  2397. goto failure;
  2398. }
  2399. ret = d40_lcla_allocate(base);
  2400. if (ret) {
  2401. d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
  2402. goto failure;
  2403. }
  2404. spin_lock_init(&base->lcla_pool.lock);
  2405. base->irq = platform_get_irq(pdev, 0);
  2406. ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
  2407. if (ret) {
  2408. d40_err(&pdev->dev, "No IRQ defined\n");
  2409. goto failure;
  2410. }
  2411. err = d40_dmaengine_init(base, num_reserved_chans);
  2412. if (err)
  2413. goto failure;
  2414. d40_hw_init(base);
  2415. dev_info(base->dev, "initialized\n");
  2416. return 0;
  2417. failure:
  2418. if (base) {
  2419. if (base->desc_slab)
  2420. kmem_cache_destroy(base->desc_slab);
  2421. if (base->virtbase)
  2422. iounmap(base->virtbase);
  2423. if (base->lcla_pool.dma_addr)
  2424. dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
  2425. SZ_1K * base->num_phy_chans,
  2426. DMA_TO_DEVICE);
  2427. if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
  2428. free_pages((unsigned long)base->lcla_pool.base,
  2429. base->lcla_pool.pages);
  2430. kfree(base->lcla_pool.base_unaligned);
  2431. if (base->phy_lcpa)
  2432. release_mem_region(base->phy_lcpa,
  2433. base->lcpa_size);
  2434. if (base->phy_start)
  2435. release_mem_region(base->phy_start,
  2436. base->phy_size);
  2437. if (base->clk) {
  2438. clk_disable(base->clk);
  2439. clk_put(base->clk);
  2440. }
  2441. kfree(base->lcla_pool.alloc_map);
  2442. kfree(base->lookup_log_chans);
  2443. kfree(base->lookup_phy_chans);
  2444. kfree(base->phy_res);
  2445. kfree(base);
  2446. }
  2447. d40_err(&pdev->dev, "probe failed\n");
  2448. return ret;
  2449. }
  2450. static struct platform_driver d40_driver = {
  2451. .driver = {
  2452. .owner = THIS_MODULE,
  2453. .name = D40_NAME,
  2454. },
  2455. };
  2456. static int __init stedma40_init(void)
  2457. {
  2458. return platform_driver_probe(&d40_driver, d40_probe);
  2459. }
  2460. subsys_initcall(stedma40_init);