ste_dma40.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657
  1. /*
  2. * driver/dma/ste_dma40.c
  3. *
  4. * Copyright (C) ST-Ericsson 2007-2010
  5. * License terms: GNU General Public License (GPL) version 2
  6. * Author: Per Friden <per.friden@stericsson.com>
  7. * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
  8. *
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/slab.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/clk.h>
  15. #include <linux/delay.h>
  16. #include <plat/ste_dma40.h>
  17. #include "ste_dma40_ll.h"
  18. #define D40_NAME "dma40"
  19. #define D40_PHY_CHAN -1
  20. /* For masking out/in 2 bit channel positions */
  21. #define D40_CHAN_POS(chan) (2 * (chan / 2))
  22. #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  23. /* Maximum iterations taken before giving up suspending a channel */
  24. #define D40_SUSPEND_MAX_IT 500
  25. #define D40_ALLOC_FREE (1 << 31)
  26. #define D40_ALLOC_PHY (1 << 30)
  27. #define D40_ALLOC_LOG_FREE 0
  28. /* The number of free d40_desc to keep in memory before starting
  29. * to kfree() them */
  30. #define D40_DESC_CACHE_SIZE 50
  31. /* Hardware designer of the block */
  32. #define D40_PERIPHID2_DESIGNER 0x8
  33. /**
  34. * enum 40_command - The different commands and/or statuses.
  35. *
  36. * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
  37. * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
  38. * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
  39. * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
  40. */
  41. enum d40_command {
  42. D40_DMA_STOP = 0,
  43. D40_DMA_RUN = 1,
  44. D40_DMA_SUSPEND_REQ = 2,
  45. D40_DMA_SUSPENDED = 3
  46. };
  47. /**
  48. * struct d40_lli_pool - Structure for keeping LLIs in memory
  49. *
  50. * @base: Pointer to memory area when the pre_alloc_lli's are not large
  51. * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
  52. * pre_alloc_lli is used.
  53. * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
  54. * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
  55. * one buffer to one buffer.
  56. */
  57. struct d40_lli_pool {
  58. void *base;
  59. int size;
  60. /* Space for dst and src, plus an extra for padding */
  61. u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
  62. };
  63. /**
  64. * struct d40_desc - A descriptor is one DMA job.
  65. *
  66. * @lli_phy: LLI settings for physical channel. Both src and dst=
  67. * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
  68. * lli_len equals one.
  69. * @lli_log: Same as above but for logical channels.
  70. * @lli_pool: The pool with two entries pre-allocated.
  71. * @lli_len: Number of LLI's in lli_pool
  72. * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
  73. * then this transfer job is done.
  74. * @txd: DMA engine struct. Used for among other things for communication
  75. * during a transfer.
  76. * @node: List entry.
  77. * @dir: The transfer direction of this job.
  78. * @is_in_client_list: true if the client owns this descriptor.
  79. *
  80. * This descriptor is used for both logical and physical transfers.
  81. */
  82. struct d40_desc {
  83. /* LLI physical */
  84. struct d40_phy_lli_bidir lli_phy;
  85. /* LLI logical */
  86. struct d40_log_lli_bidir lli_log;
  87. struct d40_lli_pool lli_pool;
  88. u32 lli_len;
  89. u32 lli_tcount;
  90. struct dma_async_tx_descriptor txd;
  91. struct list_head node;
  92. enum dma_data_direction dir;
  93. bool is_in_client_list;
  94. };
  95. /**
  96. * struct d40_lcla_pool - LCLA pool settings and data.
  97. *
  98. * @base: The virtual address of LCLA.
  99. * @phy: Physical base address of LCLA.
  100. * @base_size: size of lcla.
  101. * @lock: Lock to protect the content in this struct.
  102. * @alloc_map: Mapping between physical channel and LCLA entries.
  103. * @num_blocks: The number of entries of alloc_map. Equals to the
  104. * number of physical channels.
  105. */
  106. struct d40_lcla_pool {
  107. void *base;
  108. dma_addr_t phy;
  109. resource_size_t base_size;
  110. spinlock_t lock;
  111. u32 *alloc_map;
  112. int num_blocks;
  113. };
  114. /**
  115. * struct d40_phy_res - struct for handling eventlines mapped to physical
  116. * channels.
  117. *
  118. * @lock: A lock protection this entity.
  119. * @num: The physical channel number of this entity.
  120. * @allocated_src: Bit mapped to show which src event line's are mapped to
  121. * this physical channel. Can also be free or physically allocated.
  122. * @allocated_dst: Same as for src but is dst.
  123. * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
  124. * event line number. Both allocated_src and allocated_dst can not be
  125. * allocated to a physical channel, since the interrupt handler has then
  126. * no way of figure out which one the interrupt belongs to.
  127. */
  128. struct d40_phy_res {
  129. spinlock_t lock;
  130. int num;
  131. u32 allocated_src;
  132. u32 allocated_dst;
  133. };
  134. struct d40_base;
  135. /**
  136. * struct d40_chan - Struct that describes a channel.
  137. *
  138. * @lock: A spinlock to protect this struct.
  139. * @log_num: The logical number, if any of this channel.
  140. * @completed: Starts with 1, after first interrupt it is set to dma engine's
  141. * current cookie.
  142. * @pending_tx: The number of pending transfers. Used between interrupt handler
  143. * and tasklet.
  144. * @busy: Set to true when transfer is ongoing on this channel.
  145. * @phy_chan: Pointer to physical channel which this instance runs on.
  146. * @chan: DMA engine handle.
  147. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  148. * transfer and call client callback.
  149. * @client: Cliented owned descriptor list.
  150. * @active: Active descriptor.
  151. * @queue: Queued jobs.
  152. * @free: List of free descripts, ready to be reused.
  153. * @free_len: Number of descriptors in the free list.
  154. * @dma_cfg: The client configuration of this dma channel.
  155. * @base: Pointer to the device instance struct.
  156. * @src_def_cfg: Default cfg register setting for src.
  157. * @dst_def_cfg: Default cfg register setting for dst.
  158. * @log_def: Default logical channel settings.
  159. * @lcla: Space for one dst src pair for logical channel transfers.
  160. * @lcpa: Pointer to dst and src lcpa settings.
  161. *
  162. * This struct can either "be" a logical or a physical channel.
  163. */
  164. struct d40_chan {
  165. spinlock_t lock;
  166. int log_num;
  167. /* ID of the most recent completed transfer */
  168. int completed;
  169. int pending_tx;
  170. bool busy;
  171. struct d40_phy_res *phy_chan;
  172. struct dma_chan chan;
  173. struct tasklet_struct tasklet;
  174. struct list_head client;
  175. struct list_head active;
  176. struct list_head queue;
  177. struct list_head free;
  178. int free_len;
  179. struct stedma40_chan_cfg dma_cfg;
  180. struct d40_base *base;
  181. /* Default register configurations */
  182. u32 src_def_cfg;
  183. u32 dst_def_cfg;
  184. struct d40_def_lcsp log_def;
  185. struct d40_lcla_elem lcla;
  186. struct d40_log_lli_full *lcpa;
  187. };
  188. /**
  189. * struct d40_base - The big global struct, one for each probe'd instance.
  190. *
  191. * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
  192. * @execmd_lock: Lock for execute command usage since several channels share
  193. * the same physical register.
  194. * @dev: The device structure.
  195. * @virtbase: The virtual base address of the DMA's register.
  196. * @clk: Pointer to the DMA clock structure.
  197. * @phy_start: Physical memory start of the DMA registers.
  198. * @phy_size: Size of the DMA register map.
  199. * @irq: The IRQ number.
  200. * @num_phy_chans: The number of physical channels. Read from HW. This
  201. * is the number of available channels for this driver, not counting "Secure
  202. * mode" allocated physical channels.
  203. * @num_log_chans: The number of logical channels. Calculated from
  204. * num_phy_chans.
  205. * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  206. * @dma_slave: dma_device channels that can do only do slave transfers.
  207. * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
  208. * @phy_chans: Room for all possible physical channels in system.
  209. * @log_chans: Room for all possible logical channels in system.
  210. * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  211. * to log_chans entries.
  212. * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
  213. * to phy_chans entries.
  214. * @plat_data: Pointer to provided platform_data which is the driver
  215. * configuration.
  216. * @phy_res: Vector containing all physical channels.
  217. * @lcla_pool: lcla pool settings and data.
  218. * @lcpa_base: The virtual mapped address of LCPA.
  219. * @phy_lcpa: The physical address of the LCPA.
  220. * @lcpa_size: The size of the LCPA area.
  221. */
  222. struct d40_base {
  223. spinlock_t interrupt_lock;
  224. spinlock_t execmd_lock;
  225. struct device *dev;
  226. void __iomem *virtbase;
  227. struct clk *clk;
  228. phys_addr_t phy_start;
  229. resource_size_t phy_size;
  230. int irq;
  231. int num_phy_chans;
  232. int num_log_chans;
  233. struct dma_device dma_both;
  234. struct dma_device dma_slave;
  235. struct dma_device dma_memcpy;
  236. struct d40_chan *phy_chans;
  237. struct d40_chan *log_chans;
  238. struct d40_chan **lookup_log_chans;
  239. struct d40_chan **lookup_phy_chans;
  240. struct stedma40_platform_data *plat_data;
  241. /* Physical half channels */
  242. struct d40_phy_res *phy_res;
  243. struct d40_lcla_pool lcla_pool;
  244. void *lcpa_base;
  245. dma_addr_t phy_lcpa;
  246. resource_size_t lcpa_size;
  247. };
  248. /**
  249. * struct d40_interrupt_lookup - lookup table for interrupt handler
  250. *
  251. * @src: Interrupt mask register.
  252. * @clr: Interrupt clear register.
  253. * @is_error: true if this is an error interrupt.
  254. * @offset: start delta in the lookup_log_chans in d40_base. If equals to
  255. * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
  256. */
  257. struct d40_interrupt_lookup {
  258. u32 src;
  259. u32 clr;
  260. bool is_error;
  261. int offset;
  262. };
  263. /**
  264. * struct d40_reg_val - simple lookup struct
  265. *
  266. * @reg: The register.
  267. * @val: The value that belongs to the register in reg.
  268. */
  269. struct d40_reg_val {
  270. unsigned int reg;
  271. unsigned int val;
  272. };
  273. static int d40_pool_lli_alloc(struct d40_desc *d40d,
  274. int lli_len, bool is_log)
  275. {
  276. u32 align;
  277. void *base;
  278. if (is_log)
  279. align = sizeof(struct d40_log_lli);
  280. else
  281. align = sizeof(struct d40_phy_lli);
  282. if (lli_len == 1) {
  283. base = d40d->lli_pool.pre_alloc_lli;
  284. d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
  285. d40d->lli_pool.base = NULL;
  286. } else {
  287. d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
  288. base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
  289. d40d->lli_pool.base = base;
  290. if (d40d->lli_pool.base == NULL)
  291. return -ENOMEM;
  292. }
  293. if (is_log) {
  294. d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
  295. align);
  296. d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
  297. align);
  298. } else {
  299. d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
  300. align);
  301. d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
  302. align);
  303. d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
  304. d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
  305. }
  306. return 0;
  307. }
  308. static void d40_pool_lli_free(struct d40_desc *d40d)
  309. {
  310. kfree(d40d->lli_pool.base);
  311. d40d->lli_pool.base = NULL;
  312. d40d->lli_pool.size = 0;
  313. d40d->lli_log.src = NULL;
  314. d40d->lli_log.dst = NULL;
  315. d40d->lli_phy.src = NULL;
  316. d40d->lli_phy.dst = NULL;
  317. d40d->lli_phy.src_addr = 0;
  318. d40d->lli_phy.dst_addr = 0;
  319. }
  320. static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
  321. struct d40_desc *desc)
  322. {
  323. dma_cookie_t cookie = d40c->chan.cookie;
  324. if (++cookie < 0)
  325. cookie = 1;
  326. d40c->chan.cookie = cookie;
  327. desc->txd.cookie = cookie;
  328. return cookie;
  329. }
  330. static void d40_desc_reset(struct d40_desc *d40d)
  331. {
  332. d40d->lli_tcount = 0;
  333. }
  334. static void d40_desc_remove(struct d40_desc *d40d)
  335. {
  336. list_del(&d40d->node);
  337. }
  338. static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
  339. {
  340. struct d40_desc *desc;
  341. struct d40_desc *d;
  342. struct d40_desc *_d;
  343. if (!list_empty(&d40c->client)) {
  344. list_for_each_entry_safe(d, _d, &d40c->client, node)
  345. if (async_tx_test_ack(&d->txd)) {
  346. d40_pool_lli_free(d);
  347. d40_desc_remove(d);
  348. desc = d;
  349. goto out;
  350. }
  351. }
  352. if (list_empty(&d40c->free)) {
  353. /* Alloc new desc because we're out of used ones */
  354. desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
  355. if (desc == NULL)
  356. goto out;
  357. INIT_LIST_HEAD(&desc->node);
  358. } else {
  359. /* Reuse an old desc. */
  360. desc = list_first_entry(&d40c->free,
  361. struct d40_desc,
  362. node);
  363. list_del(&desc->node);
  364. d40c->free_len--;
  365. }
  366. out:
  367. return desc;
  368. }
  369. static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
  370. {
  371. if (d40c->free_len < D40_DESC_CACHE_SIZE) {
  372. list_add_tail(&d40d->node, &d40c->free);
  373. d40c->free_len++;
  374. } else
  375. kfree(d40d);
  376. }
  377. static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
  378. {
  379. list_add_tail(&desc->node, &d40c->active);
  380. }
  381. static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
  382. {
  383. struct d40_desc *d;
  384. if (list_empty(&d40c->active))
  385. return NULL;
  386. d = list_first_entry(&d40c->active,
  387. struct d40_desc,
  388. node);
  389. return d;
  390. }
  391. static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
  392. {
  393. list_add_tail(&desc->node, &d40c->queue);
  394. }
  395. static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
  396. {
  397. struct d40_desc *d;
  398. if (list_empty(&d40c->queue))
  399. return NULL;
  400. d = list_first_entry(&d40c->queue,
  401. struct d40_desc,
  402. node);
  403. return d;
  404. }
  405. /* Support functions for logical channels */
  406. static int d40_lcla_id_get(struct d40_chan *d40c,
  407. struct d40_lcla_pool *pool)
  408. {
  409. int src_id = 0;
  410. int dst_id = 0;
  411. struct d40_log_lli *lcla_lidx_base =
  412. pool->base + d40c->phy_chan->num * 1024;
  413. int i;
  414. int lli_per_log = d40c->base->plat_data->llis_per_log;
  415. if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
  416. return 0;
  417. if (pool->num_blocks > 32)
  418. return -EINVAL;
  419. spin_lock(&pool->lock);
  420. for (i = 0; i < pool->num_blocks; i++) {
  421. if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
  422. pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
  423. break;
  424. }
  425. }
  426. src_id = i;
  427. if (src_id >= pool->num_blocks)
  428. goto err;
  429. for (; i < pool->num_blocks; i++) {
  430. if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
  431. pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
  432. break;
  433. }
  434. }
  435. dst_id = i;
  436. if (dst_id == src_id)
  437. goto err;
  438. d40c->lcla.src_id = src_id;
  439. d40c->lcla.dst_id = dst_id;
  440. d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
  441. d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
  442. spin_unlock(&pool->lock);
  443. return 0;
  444. err:
  445. spin_unlock(&pool->lock);
  446. return -EINVAL;
  447. }
  448. static void d40_lcla_id_put(struct d40_chan *d40c,
  449. struct d40_lcla_pool *pool,
  450. int id)
  451. {
  452. if (id < 0)
  453. return;
  454. d40c->lcla.src_id = -1;
  455. d40c->lcla.dst_id = -1;
  456. spin_lock(&pool->lock);
  457. pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
  458. spin_unlock(&pool->lock);
  459. }
  460. static int d40_channel_execute_command(struct d40_chan *d40c,
  461. enum d40_command command)
  462. {
  463. int status, i;
  464. void __iomem *active_reg;
  465. int ret = 0;
  466. unsigned long flags;
  467. spin_lock_irqsave(&d40c->base->execmd_lock, flags);
  468. if (d40c->phy_chan->num % 2 == 0)
  469. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  470. else
  471. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  472. if (command == D40_DMA_SUSPEND_REQ) {
  473. status = (readl(active_reg) &
  474. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  475. D40_CHAN_POS(d40c->phy_chan->num);
  476. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  477. goto done;
  478. }
  479. writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
  480. if (command == D40_DMA_SUSPEND_REQ) {
  481. for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
  482. status = (readl(active_reg) &
  483. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  484. D40_CHAN_POS(d40c->phy_chan->num);
  485. cpu_relax();
  486. /*
  487. * Reduce the number of bus accesses while
  488. * waiting for the DMA to suspend.
  489. */
  490. udelay(3);
  491. if (status == D40_DMA_STOP ||
  492. status == D40_DMA_SUSPENDED)
  493. break;
  494. }
  495. if (i == D40_SUSPEND_MAX_IT) {
  496. dev_err(&d40c->chan.dev->device,
  497. "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
  498. __func__, d40c->phy_chan->num, d40c->log_num,
  499. status);
  500. dump_stack();
  501. ret = -EBUSY;
  502. }
  503. }
  504. done:
  505. spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
  506. return ret;
  507. }
  508. static void d40_term_all(struct d40_chan *d40c)
  509. {
  510. struct d40_desc *d40d;
  511. struct d40_desc *d;
  512. struct d40_desc *_d;
  513. /* Release active descriptors */
  514. while ((d40d = d40_first_active_get(d40c))) {
  515. d40_desc_remove(d40d);
  516. /* Return desc to free-list */
  517. d40_desc_free(d40c, d40d);
  518. }
  519. /* Release queued descriptors waiting for transfer */
  520. while ((d40d = d40_first_queued(d40c))) {
  521. d40_desc_remove(d40d);
  522. /* Return desc to free-list */
  523. d40_desc_free(d40c, d40d);
  524. }
  525. /* Release client owned descriptors */
  526. if (!list_empty(&d40c->client))
  527. list_for_each_entry_safe(d, _d, &d40c->client, node) {
  528. d40_pool_lli_free(d);
  529. d40_desc_remove(d);
  530. /* Return desc to free-list */
  531. d40_desc_free(d40c, d40d);
  532. }
  533. d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
  534. d40c->lcla.src_id);
  535. d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
  536. d40c->lcla.dst_id);
  537. d40c->pending_tx = 0;
  538. d40c->busy = false;
  539. }
  540. static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
  541. {
  542. u32 val;
  543. unsigned long flags;
  544. if (do_enable)
  545. val = D40_ACTIVATE_EVENTLINE;
  546. else
  547. val = D40_DEACTIVATE_EVENTLINE;
  548. spin_lock_irqsave(&d40c->phy_chan->lock, flags);
  549. /* Enable event line connected to device (or memcpy) */
  550. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  551. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
  552. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  553. writel((val << D40_EVENTLINE_POS(event)) |
  554. ~D40_EVENTLINE_MASK(event),
  555. d40c->base->virtbase + D40_DREG_PCBASE +
  556. d40c->phy_chan->num * D40_DREG_PCDELTA +
  557. D40_CHAN_REG_SSLNK);
  558. }
  559. if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
  560. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  561. writel((val << D40_EVENTLINE_POS(event)) |
  562. ~D40_EVENTLINE_MASK(event),
  563. d40c->base->virtbase + D40_DREG_PCBASE +
  564. d40c->phy_chan->num * D40_DREG_PCDELTA +
  565. D40_CHAN_REG_SDLNK);
  566. }
  567. spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
  568. }
  569. static u32 d40_chan_has_events(struct d40_chan *d40c)
  570. {
  571. u32 val = 0;
  572. /* If SSLNK or SDLNK is zero all events are disabled */
  573. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  574. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  575. val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
  576. d40c->phy_chan->num * D40_DREG_PCDELTA +
  577. D40_CHAN_REG_SSLNK);
  578. if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
  579. val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
  580. d40c->phy_chan->num * D40_DREG_PCDELTA +
  581. D40_CHAN_REG_SDLNK);
  582. return val;
  583. }
  584. static void d40_config_enable_lidx(struct d40_chan *d40c)
  585. {
  586. /* Set LIDX for lcla */
  587. writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
  588. D40_SREG_ELEM_LOG_LIDX_MASK,
  589. d40c->base->virtbase + D40_DREG_PCBASE +
  590. d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
  591. writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
  592. D40_SREG_ELEM_LOG_LIDX_MASK,
  593. d40c->base->virtbase + D40_DREG_PCBASE +
  594. d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
  595. }
  596. static int d40_config_write(struct d40_chan *d40c)
  597. {
  598. u32 addr_base;
  599. u32 var;
  600. int res;
  601. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  602. if (res)
  603. return res;
  604. /* Odd addresses are even addresses + 4 */
  605. addr_base = (d40c->phy_chan->num % 2) * 4;
  606. /* Setup channel mode to logical or physical */
  607. var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
  608. D40_CHAN_POS(d40c->phy_chan->num);
  609. writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
  610. /* Setup operational mode option register */
  611. var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
  612. 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
  613. writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
  614. if (d40c->log_num != D40_PHY_CHAN) {
  615. /* Set default config for CFG reg */
  616. writel(d40c->src_def_cfg,
  617. d40c->base->virtbase + D40_DREG_PCBASE +
  618. d40c->phy_chan->num * D40_DREG_PCDELTA +
  619. D40_CHAN_REG_SSCFG);
  620. writel(d40c->dst_def_cfg,
  621. d40c->base->virtbase + D40_DREG_PCBASE +
  622. d40c->phy_chan->num * D40_DREG_PCDELTA +
  623. D40_CHAN_REG_SDCFG);
  624. d40_config_enable_lidx(d40c);
  625. }
  626. return res;
  627. }
  628. static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
  629. {
  630. if (d40d->lli_phy.dst && d40d->lli_phy.src) {
  631. d40_phy_lli_write(d40c->base->virtbase,
  632. d40c->phy_chan->num,
  633. d40d->lli_phy.dst,
  634. d40d->lli_phy.src);
  635. d40d->lli_tcount = d40d->lli_len;
  636. } else if (d40d->lli_log.dst && d40d->lli_log.src) {
  637. u32 lli_len;
  638. struct d40_log_lli *src = d40d->lli_log.src;
  639. struct d40_log_lli *dst = d40d->lli_log.dst;
  640. src += d40d->lli_tcount;
  641. dst += d40d->lli_tcount;
  642. if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
  643. lli_len = d40d->lli_len;
  644. else
  645. lli_len = d40c->base->plat_data->llis_per_log;
  646. d40d->lli_tcount += lli_len;
  647. d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
  648. d40c->lcla.dst,
  649. dst, src,
  650. d40c->base->plat_data->llis_per_log);
  651. }
  652. }
  653. static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
  654. {
  655. struct d40_chan *d40c = container_of(tx->chan,
  656. struct d40_chan,
  657. chan);
  658. struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
  659. unsigned long flags;
  660. spin_lock_irqsave(&d40c->lock, flags);
  661. tx->cookie = d40_assign_cookie(d40c, d40d);
  662. d40_desc_queue(d40c, d40d);
  663. spin_unlock_irqrestore(&d40c->lock, flags);
  664. return tx->cookie;
  665. }
  666. static int d40_start(struct d40_chan *d40c)
  667. {
  668. int err;
  669. if (d40c->log_num != D40_PHY_CHAN) {
  670. err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  671. if (err)
  672. return err;
  673. d40_config_set_event(d40c, true);
  674. }
  675. err = d40_channel_execute_command(d40c, D40_DMA_RUN);
  676. return err;
  677. }
  678. static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
  679. {
  680. struct d40_desc *d40d;
  681. int err;
  682. /* Start queued jobs, if any */
  683. d40d = d40_first_queued(d40c);
  684. if (d40d != NULL) {
  685. d40c->busy = true;
  686. /* Remove from queue */
  687. d40_desc_remove(d40d);
  688. /* Add to active queue */
  689. d40_desc_submit(d40c, d40d);
  690. /* Initiate DMA job */
  691. d40_desc_load(d40c, d40d);
  692. /* Start dma job */
  693. err = d40_start(d40c);
  694. if (err)
  695. return NULL;
  696. }
  697. return d40d;
  698. }
  699. /* called from interrupt context */
  700. static void dma_tc_handle(struct d40_chan *d40c)
  701. {
  702. struct d40_desc *d40d;
  703. if (!d40c->phy_chan)
  704. return;
  705. /* Get first active entry from list */
  706. d40d = d40_first_active_get(d40c);
  707. if (d40d == NULL)
  708. return;
  709. if (d40d->lli_tcount < d40d->lli_len) {
  710. d40_desc_load(d40c, d40d);
  711. /* Start dma job */
  712. (void) d40_start(d40c);
  713. return;
  714. }
  715. if (d40_queue_start(d40c) == NULL)
  716. d40c->busy = false;
  717. d40c->pending_tx++;
  718. tasklet_schedule(&d40c->tasklet);
  719. }
  720. static void dma_tasklet(unsigned long data)
  721. {
  722. struct d40_chan *d40c = (struct d40_chan *) data;
  723. struct d40_desc *d40d_fin;
  724. unsigned long flags;
  725. dma_async_tx_callback callback;
  726. void *callback_param;
  727. spin_lock_irqsave(&d40c->lock, flags);
  728. /* Get first active entry from list */
  729. d40d_fin = d40_first_active_get(d40c);
  730. if (d40d_fin == NULL)
  731. goto err;
  732. d40c->completed = d40d_fin->txd.cookie;
  733. /*
  734. * If terminating a channel pending_tx is set to zero.
  735. * This prevents any finished active jobs to return to the client.
  736. */
  737. if (d40c->pending_tx == 0) {
  738. spin_unlock_irqrestore(&d40c->lock, flags);
  739. return;
  740. }
  741. /* Callback to client */
  742. callback = d40d_fin->txd.callback;
  743. callback_param = d40d_fin->txd.callback_param;
  744. if (async_tx_test_ack(&d40d_fin->txd)) {
  745. d40_pool_lli_free(d40d_fin);
  746. d40_desc_remove(d40d_fin);
  747. /* Return desc to free-list */
  748. d40_desc_free(d40c, d40d_fin);
  749. } else {
  750. d40_desc_reset(d40d_fin);
  751. if (!d40d_fin->is_in_client_list) {
  752. d40_desc_remove(d40d_fin);
  753. list_add_tail(&d40d_fin->node, &d40c->client);
  754. d40d_fin->is_in_client_list = true;
  755. }
  756. }
  757. d40c->pending_tx--;
  758. if (d40c->pending_tx)
  759. tasklet_schedule(&d40c->tasklet);
  760. spin_unlock_irqrestore(&d40c->lock, flags);
  761. if (callback)
  762. callback(callback_param);
  763. return;
  764. err:
  765. /* Rescue manouver if receiving double interrupts */
  766. if (d40c->pending_tx > 0)
  767. d40c->pending_tx--;
  768. spin_unlock_irqrestore(&d40c->lock, flags);
  769. }
  770. static irqreturn_t d40_handle_interrupt(int irq, void *data)
  771. {
  772. static const struct d40_interrupt_lookup il[] = {
  773. {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
  774. {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
  775. {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
  776. {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
  777. {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
  778. {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
  779. {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
  780. {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
  781. {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
  782. {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
  783. };
  784. int i;
  785. u32 regs[ARRAY_SIZE(il)];
  786. u32 tmp;
  787. u32 idx;
  788. u32 row;
  789. long chan = -1;
  790. struct d40_chan *d40c;
  791. unsigned long flags;
  792. struct d40_base *base = data;
  793. spin_lock_irqsave(&base->interrupt_lock, flags);
  794. /* Read interrupt status of both logical and physical channels */
  795. for (i = 0; i < ARRAY_SIZE(il); i++)
  796. regs[i] = readl(base->virtbase + il[i].src);
  797. for (;;) {
  798. chan = find_next_bit((unsigned long *)regs,
  799. BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
  800. /* No more set bits found? */
  801. if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
  802. break;
  803. row = chan / BITS_PER_LONG;
  804. idx = chan & (BITS_PER_LONG - 1);
  805. /* ACK interrupt */
  806. tmp = readl(base->virtbase + il[row].clr);
  807. tmp |= 1 << idx;
  808. writel(tmp, base->virtbase + il[row].clr);
  809. if (il[row].offset == D40_PHY_CHAN)
  810. d40c = base->lookup_phy_chans[idx];
  811. else
  812. d40c = base->lookup_log_chans[il[row].offset + idx];
  813. spin_lock(&d40c->lock);
  814. if (!il[row].is_error)
  815. dma_tc_handle(d40c);
  816. else
  817. dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
  818. __func__, chan, il[row].offset, idx);
  819. spin_unlock(&d40c->lock);
  820. }
  821. spin_unlock_irqrestore(&base->interrupt_lock, flags);
  822. return IRQ_HANDLED;
  823. }
  824. static int d40_validate_conf(struct d40_chan *d40c,
  825. struct stedma40_chan_cfg *conf)
  826. {
  827. int res = 0;
  828. u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
  829. u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
  830. bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
  831. == STEDMA40_CHANNEL_IN_LOG_MODE;
  832. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
  833. dst_event_group == STEDMA40_DEV_DST_MEMORY) {
  834. dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
  835. __func__);
  836. res = -EINVAL;
  837. }
  838. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
  839. src_event_group == STEDMA40_DEV_SRC_MEMORY) {
  840. dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
  841. __func__);
  842. res = -EINVAL;
  843. }
  844. if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
  845. dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
  846. dev_err(&d40c->chan.dev->device,
  847. "[%s] No event line\n", __func__);
  848. res = -EINVAL;
  849. }
  850. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
  851. (src_event_group != dst_event_group)) {
  852. dev_err(&d40c->chan.dev->device,
  853. "[%s] Invalid event group\n", __func__);
  854. res = -EINVAL;
  855. }
  856. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
  857. /*
  858. * DMAC HW supports it. Will be added to this driver,
  859. * in case any dma client requires it.
  860. */
  861. dev_err(&d40c->chan.dev->device,
  862. "[%s] periph to periph not supported\n",
  863. __func__);
  864. res = -EINVAL;
  865. }
  866. return res;
  867. }
  868. static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
  869. int log_event_line, bool is_log)
  870. {
  871. unsigned long flags;
  872. spin_lock_irqsave(&phy->lock, flags);
  873. if (!is_log) {
  874. /* Physical interrupts are masked per physical full channel */
  875. if (phy->allocated_src == D40_ALLOC_FREE &&
  876. phy->allocated_dst == D40_ALLOC_FREE) {
  877. phy->allocated_dst = D40_ALLOC_PHY;
  878. phy->allocated_src = D40_ALLOC_PHY;
  879. goto found;
  880. } else
  881. goto not_found;
  882. }
  883. /* Logical channel */
  884. if (is_src) {
  885. if (phy->allocated_src == D40_ALLOC_PHY)
  886. goto not_found;
  887. if (phy->allocated_src == D40_ALLOC_FREE)
  888. phy->allocated_src = D40_ALLOC_LOG_FREE;
  889. if (!(phy->allocated_src & (1 << log_event_line))) {
  890. phy->allocated_src |= 1 << log_event_line;
  891. goto found;
  892. } else
  893. goto not_found;
  894. } else {
  895. if (phy->allocated_dst == D40_ALLOC_PHY)
  896. goto not_found;
  897. if (phy->allocated_dst == D40_ALLOC_FREE)
  898. phy->allocated_dst = D40_ALLOC_LOG_FREE;
  899. if (!(phy->allocated_dst & (1 << log_event_line))) {
  900. phy->allocated_dst |= 1 << log_event_line;
  901. goto found;
  902. } else
  903. goto not_found;
  904. }
  905. not_found:
  906. spin_unlock_irqrestore(&phy->lock, flags);
  907. return false;
  908. found:
  909. spin_unlock_irqrestore(&phy->lock, flags);
  910. return true;
  911. }
  912. static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
  913. int log_event_line)
  914. {
  915. unsigned long flags;
  916. bool is_free = false;
  917. spin_lock_irqsave(&phy->lock, flags);
  918. if (!log_event_line) {
  919. /* Physical interrupts are masked per physical full channel */
  920. phy->allocated_dst = D40_ALLOC_FREE;
  921. phy->allocated_src = D40_ALLOC_FREE;
  922. is_free = true;
  923. goto out;
  924. }
  925. /* Logical channel */
  926. if (is_src) {
  927. phy->allocated_src &= ~(1 << log_event_line);
  928. if (phy->allocated_src == D40_ALLOC_LOG_FREE)
  929. phy->allocated_src = D40_ALLOC_FREE;
  930. } else {
  931. phy->allocated_dst &= ~(1 << log_event_line);
  932. if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
  933. phy->allocated_dst = D40_ALLOC_FREE;
  934. }
  935. is_free = ((phy->allocated_src | phy->allocated_dst) ==
  936. D40_ALLOC_FREE);
  937. out:
  938. spin_unlock_irqrestore(&phy->lock, flags);
  939. return is_free;
  940. }
  941. static int d40_allocate_channel(struct d40_chan *d40c)
  942. {
  943. int dev_type;
  944. int event_group;
  945. int event_line;
  946. struct d40_phy_res *phys;
  947. int i;
  948. int j;
  949. int log_num;
  950. bool is_src;
  951. bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
  952. == STEDMA40_CHANNEL_IN_LOG_MODE;
  953. phys = d40c->base->phy_res;
  954. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  955. dev_type = d40c->dma_cfg.src_dev_type;
  956. log_num = 2 * dev_type;
  957. is_src = true;
  958. } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  959. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  960. /* dst event lines are used for logical memcpy */
  961. dev_type = d40c->dma_cfg.dst_dev_type;
  962. log_num = 2 * dev_type + 1;
  963. is_src = false;
  964. } else
  965. return -EINVAL;
  966. event_group = D40_TYPE_TO_GROUP(dev_type);
  967. event_line = D40_TYPE_TO_EVENT(dev_type);
  968. if (!is_log) {
  969. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  970. /* Find physical half channel */
  971. for (i = 0; i < d40c->base->num_phy_chans; i++) {
  972. if (d40_alloc_mask_set(&phys[i], is_src,
  973. 0, is_log))
  974. goto found_phy;
  975. }
  976. } else
  977. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  978. int phy_num = j + event_group * 2;
  979. for (i = phy_num; i < phy_num + 2; i++) {
  980. if (d40_alloc_mask_set(&phys[i], is_src,
  981. 0, is_log))
  982. goto found_phy;
  983. }
  984. }
  985. return -EINVAL;
  986. found_phy:
  987. d40c->phy_chan = &phys[i];
  988. d40c->log_num = D40_PHY_CHAN;
  989. goto out;
  990. }
  991. if (dev_type == -1)
  992. return -EINVAL;
  993. /* Find logical channel */
  994. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  995. int phy_num = j + event_group * 2;
  996. /*
  997. * Spread logical channels across all available physical rather
  998. * than pack every logical channel at the first available phy
  999. * channels.
  1000. */
  1001. if (is_src) {
  1002. for (i = phy_num; i < phy_num + 2; i++) {
  1003. if (d40_alloc_mask_set(&phys[i], is_src,
  1004. event_line, is_log))
  1005. goto found_log;
  1006. }
  1007. } else {
  1008. for (i = phy_num + 1; i >= phy_num; i--) {
  1009. if (d40_alloc_mask_set(&phys[i], is_src,
  1010. event_line, is_log))
  1011. goto found_log;
  1012. }
  1013. }
  1014. }
  1015. return -EINVAL;
  1016. found_log:
  1017. d40c->phy_chan = &phys[i];
  1018. d40c->log_num = log_num;
  1019. out:
  1020. if (is_log)
  1021. d40c->base->lookup_log_chans[d40c->log_num] = d40c;
  1022. else
  1023. d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
  1024. return 0;
  1025. }
  1026. static int d40_config_chan(struct d40_chan *d40c,
  1027. struct stedma40_chan_cfg *info)
  1028. {
  1029. /* Fill in basic CFG register values */
  1030. d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
  1031. &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
  1032. if (d40c->log_num != D40_PHY_CHAN) {
  1033. d40_log_cfg(&d40c->dma_cfg,
  1034. &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  1035. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
  1036. d40c->lcpa = d40c->base->lcpa_base +
  1037. d40c->dma_cfg.src_dev_type * 32;
  1038. else
  1039. d40c->lcpa = d40c->base->lcpa_base +
  1040. d40c->dma_cfg.dst_dev_type * 32 + 16;
  1041. }
  1042. /* Write channel configuration to the DMA */
  1043. return d40_config_write(d40c);
  1044. }
  1045. static int d40_config_memcpy(struct d40_chan *d40c)
  1046. {
  1047. dma_cap_mask_t cap = d40c->chan.device->cap_mask;
  1048. if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
  1049. d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
  1050. d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
  1051. d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
  1052. memcpy[d40c->chan.chan_id];
  1053. } else if (dma_has_cap(DMA_MEMCPY, cap) &&
  1054. dma_has_cap(DMA_SLAVE, cap)) {
  1055. d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
  1056. } else {
  1057. dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
  1058. __func__);
  1059. return -EINVAL;
  1060. }
  1061. return 0;
  1062. }
  1063. static int d40_free_dma(struct d40_chan *d40c)
  1064. {
  1065. int res = 0;
  1066. u32 event, dir;
  1067. struct d40_phy_res *phy = d40c->phy_chan;
  1068. bool is_src;
  1069. /* Terminate all queued and active transfers */
  1070. d40_term_all(d40c);
  1071. if (phy == NULL) {
  1072. dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
  1073. __func__);
  1074. return -EINVAL;
  1075. }
  1076. if (phy->allocated_src == D40_ALLOC_FREE &&
  1077. phy->allocated_dst == D40_ALLOC_FREE) {
  1078. dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
  1079. __func__);
  1080. return -EINVAL;
  1081. }
  1082. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1083. if (res) {
  1084. dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
  1085. __func__);
  1086. return res;
  1087. }
  1088. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1089. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1090. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  1091. dir = D40_CHAN_REG_SDLNK;
  1092. is_src = false;
  1093. } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1094. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  1095. dir = D40_CHAN_REG_SSLNK;
  1096. is_src = true;
  1097. } else {
  1098. dev_err(&d40c->chan.dev->device,
  1099. "[%s] Unknown direction\n", __func__);
  1100. return -EINVAL;
  1101. }
  1102. if (d40c->log_num != D40_PHY_CHAN) {
  1103. /*
  1104. * Release logical channel, deactivate the event line during
  1105. * the time physical res is suspended.
  1106. */
  1107. writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
  1108. D40_EVENTLINE_MASK(event),
  1109. d40c->base->virtbase + D40_DREG_PCBASE +
  1110. phy->num * D40_DREG_PCDELTA + dir);
  1111. d40c->base->lookup_log_chans[d40c->log_num] = NULL;
  1112. /*
  1113. * Check if there are more logical allocation
  1114. * on this phy channel.
  1115. */
  1116. if (!d40_alloc_mask_free(phy, is_src, event)) {
  1117. /* Resume the other logical channels if any */
  1118. if (d40_chan_has_events(d40c)) {
  1119. res = d40_channel_execute_command(d40c,
  1120. D40_DMA_RUN);
  1121. if (res) {
  1122. dev_err(&d40c->chan.dev->device,
  1123. "[%s] Executing RUN command\n",
  1124. __func__);
  1125. return res;
  1126. }
  1127. }
  1128. return 0;
  1129. }
  1130. } else
  1131. d40_alloc_mask_free(phy, is_src, 0);
  1132. /* Release physical channel */
  1133. res = d40_channel_execute_command(d40c, D40_DMA_STOP);
  1134. if (res) {
  1135. dev_err(&d40c->chan.dev->device,
  1136. "[%s] Failed to stop channel\n", __func__);
  1137. return res;
  1138. }
  1139. d40c->phy_chan = NULL;
  1140. /* Invalidate channel type */
  1141. d40c->dma_cfg.channel_type = 0;
  1142. d40c->base->lookup_phy_chans[phy->num] = NULL;
  1143. return 0;
  1144. }
  1145. static int d40_pause(struct dma_chan *chan)
  1146. {
  1147. struct d40_chan *d40c =
  1148. container_of(chan, struct d40_chan, chan);
  1149. int res;
  1150. unsigned long flags;
  1151. spin_lock_irqsave(&d40c->lock, flags);
  1152. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1153. if (res == 0) {
  1154. if (d40c->log_num != D40_PHY_CHAN) {
  1155. d40_config_set_event(d40c, false);
  1156. /* Resume the other logical channels if any */
  1157. if (d40_chan_has_events(d40c))
  1158. res = d40_channel_execute_command(d40c,
  1159. D40_DMA_RUN);
  1160. }
  1161. }
  1162. spin_unlock_irqrestore(&d40c->lock, flags);
  1163. return res;
  1164. }
  1165. static bool d40_is_paused(struct d40_chan *d40c)
  1166. {
  1167. bool is_paused = false;
  1168. unsigned long flags;
  1169. void __iomem *active_reg;
  1170. u32 status;
  1171. u32 event;
  1172. int res;
  1173. spin_lock_irqsave(&d40c->lock, flags);
  1174. if (d40c->log_num == D40_PHY_CHAN) {
  1175. if (d40c->phy_chan->num % 2 == 0)
  1176. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  1177. else
  1178. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  1179. status = (readl(active_reg) &
  1180. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  1181. D40_CHAN_POS(d40c->phy_chan->num);
  1182. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  1183. is_paused = true;
  1184. goto _exit;
  1185. }
  1186. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1187. if (res != 0)
  1188. goto _exit;
  1189. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1190. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
  1191. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  1192. else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
  1193. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  1194. else {
  1195. dev_err(&d40c->chan.dev->device,
  1196. "[%s] Unknown direction\n", __func__);
  1197. goto _exit;
  1198. }
  1199. status = d40_chan_has_events(d40c);
  1200. status = (status & D40_EVENTLINE_MASK(event)) >>
  1201. D40_EVENTLINE_POS(event);
  1202. if (status != D40_DMA_RUN)
  1203. is_paused = true;
  1204. /* Resume the other logical channels if any */
  1205. if (d40_chan_has_events(d40c))
  1206. res = d40_channel_execute_command(d40c,
  1207. D40_DMA_RUN);
  1208. _exit:
  1209. spin_unlock_irqrestore(&d40c->lock, flags);
  1210. return is_paused;
  1211. }
  1212. static bool d40_tx_is_linked(struct d40_chan *d40c)
  1213. {
  1214. bool is_link;
  1215. if (d40c->log_num != D40_PHY_CHAN)
  1216. is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
  1217. else
  1218. is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
  1219. d40c->phy_chan->num * D40_DREG_PCDELTA +
  1220. D40_CHAN_REG_SDLNK) &
  1221. D40_SREG_LNK_PHYS_LNK_MASK;
  1222. return is_link;
  1223. }
  1224. static u32 d40_residue(struct d40_chan *d40c)
  1225. {
  1226. u32 num_elt;
  1227. if (d40c->log_num != D40_PHY_CHAN)
  1228. num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
  1229. >> D40_MEM_LCSP2_ECNT_POS;
  1230. else
  1231. num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
  1232. d40c->phy_chan->num * D40_DREG_PCDELTA +
  1233. D40_CHAN_REG_SDELT) &
  1234. D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
  1235. return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
  1236. }
  1237. static int d40_resume(struct dma_chan *chan)
  1238. {
  1239. struct d40_chan *d40c =
  1240. container_of(chan, struct d40_chan, chan);
  1241. int res = 0;
  1242. unsigned long flags;
  1243. spin_lock_irqsave(&d40c->lock, flags);
  1244. if (d40c->log_num != D40_PHY_CHAN) {
  1245. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1246. if (res)
  1247. goto out;
  1248. /* If bytes left to transfer or linked tx resume job */
  1249. if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
  1250. d40_config_set_event(d40c, true);
  1251. res = d40_channel_execute_command(d40c, D40_DMA_RUN);
  1252. }
  1253. } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
  1254. res = d40_channel_execute_command(d40c, D40_DMA_RUN);
  1255. out:
  1256. spin_unlock_irqrestore(&d40c->lock, flags);
  1257. return res;
  1258. }
  1259. static u32 stedma40_residue(struct dma_chan *chan)
  1260. {
  1261. struct d40_chan *d40c =
  1262. container_of(chan, struct d40_chan, chan);
  1263. u32 bytes_left;
  1264. unsigned long flags;
  1265. spin_lock_irqsave(&d40c->lock, flags);
  1266. bytes_left = d40_residue(d40c);
  1267. spin_unlock_irqrestore(&d40c->lock, flags);
  1268. return bytes_left;
  1269. }
  1270. /* Public DMA functions in addition to the DMA engine framework */
  1271. int stedma40_set_psize(struct dma_chan *chan,
  1272. int src_psize,
  1273. int dst_psize)
  1274. {
  1275. struct d40_chan *d40c =
  1276. container_of(chan, struct d40_chan, chan);
  1277. unsigned long flags;
  1278. spin_lock_irqsave(&d40c->lock, flags);
  1279. if (d40c->log_num != D40_PHY_CHAN) {
  1280. d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
  1281. d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
  1282. d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
  1283. d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
  1284. goto out;
  1285. }
  1286. if (src_psize == STEDMA40_PSIZE_PHY_1)
  1287. d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
  1288. else {
  1289. d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
  1290. d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
  1291. D40_SREG_CFG_PSIZE_POS);
  1292. d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
  1293. }
  1294. if (dst_psize == STEDMA40_PSIZE_PHY_1)
  1295. d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
  1296. else {
  1297. d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
  1298. d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
  1299. D40_SREG_CFG_PSIZE_POS);
  1300. d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
  1301. }
  1302. out:
  1303. spin_unlock_irqrestore(&d40c->lock, flags);
  1304. return 0;
  1305. }
  1306. EXPORT_SYMBOL(stedma40_set_psize);
  1307. struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
  1308. struct scatterlist *sgl_dst,
  1309. struct scatterlist *sgl_src,
  1310. unsigned int sgl_len,
  1311. unsigned long flags)
  1312. {
  1313. int res;
  1314. struct d40_desc *d40d;
  1315. struct d40_chan *d40c = container_of(chan, struct d40_chan,
  1316. chan);
  1317. unsigned long flg;
  1318. int lli_max = d40c->base->plat_data->llis_per_log;
  1319. spin_lock_irqsave(&d40c->lock, flg);
  1320. d40d = d40_desc_get(d40c);
  1321. if (d40d == NULL)
  1322. goto err;
  1323. memset(d40d, 0, sizeof(struct d40_desc));
  1324. d40d->lli_len = sgl_len;
  1325. d40d->txd.flags = flags;
  1326. if (d40c->log_num != D40_PHY_CHAN) {
  1327. if (sgl_len > 1)
  1328. /*
  1329. * Check if there is space available in lcla. If not,
  1330. * split list into 1-length and run only in lcpa
  1331. * space.
  1332. */
  1333. if (d40_lcla_id_get(d40c,
  1334. &d40c->base->lcla_pool) != 0)
  1335. lli_max = 1;
  1336. if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
  1337. dev_err(&d40c->chan.dev->device,
  1338. "[%s] Out of memory\n", __func__);
  1339. goto err;
  1340. }
  1341. (void) d40_log_sg_to_lli(d40c->lcla.src_id,
  1342. sgl_src,
  1343. sgl_len,
  1344. d40d->lli_log.src,
  1345. d40c->log_def.lcsp1,
  1346. d40c->dma_cfg.src_info.data_width,
  1347. flags & DMA_PREP_INTERRUPT, lli_max,
  1348. d40c->base->plat_data->llis_per_log);
  1349. (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
  1350. sgl_dst,
  1351. sgl_len,
  1352. d40d->lli_log.dst,
  1353. d40c->log_def.lcsp3,
  1354. d40c->dma_cfg.dst_info.data_width,
  1355. flags & DMA_PREP_INTERRUPT, lli_max,
  1356. d40c->base->plat_data->llis_per_log);
  1357. } else {
  1358. if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
  1359. dev_err(&d40c->chan.dev->device,
  1360. "[%s] Out of memory\n", __func__);
  1361. goto err;
  1362. }
  1363. res = d40_phy_sg_to_lli(sgl_src,
  1364. sgl_len,
  1365. 0,
  1366. d40d->lli_phy.src,
  1367. d40d->lli_phy.src_addr,
  1368. d40c->src_def_cfg,
  1369. d40c->dma_cfg.src_info.data_width,
  1370. d40c->dma_cfg.src_info.psize,
  1371. true);
  1372. if (res < 0)
  1373. goto err;
  1374. res = d40_phy_sg_to_lli(sgl_dst,
  1375. sgl_len,
  1376. 0,
  1377. d40d->lli_phy.dst,
  1378. d40d->lli_phy.dst_addr,
  1379. d40c->dst_def_cfg,
  1380. d40c->dma_cfg.dst_info.data_width,
  1381. d40c->dma_cfg.dst_info.psize,
  1382. true);
  1383. if (res < 0)
  1384. goto err;
  1385. (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
  1386. d40d->lli_pool.size, DMA_TO_DEVICE);
  1387. }
  1388. dma_async_tx_descriptor_init(&d40d->txd, chan);
  1389. d40d->txd.tx_submit = d40_tx_submit;
  1390. spin_unlock_irqrestore(&d40c->lock, flg);
  1391. return &d40d->txd;
  1392. err:
  1393. spin_unlock_irqrestore(&d40c->lock, flg);
  1394. return NULL;
  1395. }
  1396. EXPORT_SYMBOL(stedma40_memcpy_sg);
  1397. bool stedma40_filter(struct dma_chan *chan, void *data)
  1398. {
  1399. struct stedma40_chan_cfg *info = data;
  1400. struct d40_chan *d40c =
  1401. container_of(chan, struct d40_chan, chan);
  1402. int err;
  1403. if (data) {
  1404. err = d40_validate_conf(d40c, info);
  1405. if (!err)
  1406. d40c->dma_cfg = *info;
  1407. } else
  1408. err = d40_config_memcpy(d40c);
  1409. return err == 0;
  1410. }
  1411. EXPORT_SYMBOL(stedma40_filter);
  1412. /* DMA ENGINE functions */
  1413. static int d40_alloc_chan_resources(struct dma_chan *chan)
  1414. {
  1415. int err;
  1416. unsigned long flags;
  1417. struct d40_chan *d40c =
  1418. container_of(chan, struct d40_chan, chan);
  1419. spin_lock_irqsave(&d40c->lock, flags);
  1420. d40c->completed = chan->cookie = 1;
  1421. /*
  1422. * If no dma configuration is set (channel_type == 0)
  1423. * use default configuration
  1424. */
  1425. if (d40c->dma_cfg.channel_type == 0) {
  1426. err = d40_config_memcpy(d40c);
  1427. if (err)
  1428. goto err_alloc;
  1429. }
  1430. err = d40_allocate_channel(d40c);
  1431. if (err) {
  1432. dev_err(&d40c->chan.dev->device,
  1433. "[%s] Failed to allocate channel\n", __func__);
  1434. goto err_alloc;
  1435. }
  1436. err = d40_config_chan(d40c, &d40c->dma_cfg);
  1437. if (err) {
  1438. dev_err(&d40c->chan.dev->device,
  1439. "[%s] Failed to configure channel\n",
  1440. __func__);
  1441. goto err_config;
  1442. }
  1443. spin_unlock_irqrestore(&d40c->lock, flags);
  1444. return 0;
  1445. err_config:
  1446. (void) d40_free_dma(d40c);
  1447. err_alloc:
  1448. spin_unlock_irqrestore(&d40c->lock, flags);
  1449. dev_err(&d40c->chan.dev->device,
  1450. "[%s] Channel allocation failed\n", __func__);
  1451. return -EINVAL;
  1452. }
  1453. static void d40_free_chan_resources(struct dma_chan *chan)
  1454. {
  1455. struct d40_chan *d40c =
  1456. container_of(chan, struct d40_chan, chan);
  1457. int err;
  1458. unsigned long flags;
  1459. spin_lock_irqsave(&d40c->lock, flags);
  1460. err = d40_free_dma(d40c);
  1461. if (err)
  1462. dev_err(&d40c->chan.dev->device,
  1463. "[%s] Failed to free channel\n", __func__);
  1464. spin_unlock_irqrestore(&d40c->lock, flags);
  1465. }
  1466. static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
  1467. dma_addr_t dst,
  1468. dma_addr_t src,
  1469. size_t size,
  1470. unsigned long flags)
  1471. {
  1472. struct d40_desc *d40d;
  1473. struct d40_chan *d40c = container_of(chan, struct d40_chan,
  1474. chan);
  1475. unsigned long flg;
  1476. int err = 0;
  1477. spin_lock_irqsave(&d40c->lock, flg);
  1478. d40d = d40_desc_get(d40c);
  1479. if (d40d == NULL) {
  1480. dev_err(&d40c->chan.dev->device,
  1481. "[%s] Descriptor is NULL\n", __func__);
  1482. goto err;
  1483. }
  1484. memset(d40d, 0, sizeof(struct d40_desc));
  1485. d40d->txd.flags = flags;
  1486. dma_async_tx_descriptor_init(&d40d->txd, chan);
  1487. d40d->txd.tx_submit = d40_tx_submit;
  1488. if (d40c->log_num != D40_PHY_CHAN) {
  1489. if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
  1490. dev_err(&d40c->chan.dev->device,
  1491. "[%s] Out of memory\n", __func__);
  1492. goto err;
  1493. }
  1494. d40d->lli_len = 1;
  1495. d40_log_fill_lli(d40d->lli_log.src,
  1496. src,
  1497. size,
  1498. 0,
  1499. d40c->log_def.lcsp1,
  1500. d40c->dma_cfg.src_info.data_width,
  1501. true, true);
  1502. d40_log_fill_lli(d40d->lli_log.dst,
  1503. dst,
  1504. size,
  1505. 0,
  1506. d40c->log_def.lcsp3,
  1507. d40c->dma_cfg.dst_info.data_width,
  1508. true, true);
  1509. } else {
  1510. if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
  1511. dev_err(&d40c->chan.dev->device,
  1512. "[%s] Out of memory\n", __func__);
  1513. goto err;
  1514. }
  1515. err = d40_phy_fill_lli(d40d->lli_phy.src,
  1516. src,
  1517. size,
  1518. d40c->dma_cfg.src_info.psize,
  1519. 0,
  1520. d40c->src_def_cfg,
  1521. true,
  1522. d40c->dma_cfg.src_info.data_width,
  1523. false);
  1524. if (err)
  1525. goto err_fill_lli;
  1526. err = d40_phy_fill_lli(d40d->lli_phy.dst,
  1527. dst,
  1528. size,
  1529. d40c->dma_cfg.dst_info.psize,
  1530. 0,
  1531. d40c->dst_def_cfg,
  1532. true,
  1533. d40c->dma_cfg.dst_info.data_width,
  1534. false);
  1535. if (err)
  1536. goto err_fill_lli;
  1537. (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
  1538. d40d->lli_pool.size, DMA_TO_DEVICE);
  1539. }
  1540. spin_unlock_irqrestore(&d40c->lock, flg);
  1541. return &d40d->txd;
  1542. err_fill_lli:
  1543. dev_err(&d40c->chan.dev->device,
  1544. "[%s] Failed filling in PHY LLI\n", __func__);
  1545. d40_pool_lli_free(d40d);
  1546. err:
  1547. spin_unlock_irqrestore(&d40c->lock, flg);
  1548. return NULL;
  1549. }
  1550. static int d40_prep_slave_sg_log(struct d40_desc *d40d,
  1551. struct d40_chan *d40c,
  1552. struct scatterlist *sgl,
  1553. unsigned int sg_len,
  1554. enum dma_data_direction direction,
  1555. unsigned long flags)
  1556. {
  1557. dma_addr_t dev_addr = 0;
  1558. int total_size;
  1559. int lli_max = d40c->base->plat_data->llis_per_log;
  1560. if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
  1561. dev_err(&d40c->chan.dev->device,
  1562. "[%s] Out of memory\n", __func__);
  1563. return -ENOMEM;
  1564. }
  1565. d40d->lli_len = sg_len;
  1566. d40d->lli_tcount = 0;
  1567. if (sg_len > 1)
  1568. /*
  1569. * Check if there is space available in lcla.
  1570. * If not, split list into 1-length and run only
  1571. * in lcpa space.
  1572. */
  1573. if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
  1574. lli_max = 1;
  1575. if (direction == DMA_FROM_DEVICE) {
  1576. dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
  1577. total_size = d40_log_sg_to_dev(&d40c->lcla,
  1578. sgl, sg_len,
  1579. &d40d->lli_log,
  1580. &d40c->log_def,
  1581. d40c->dma_cfg.src_info.data_width,
  1582. d40c->dma_cfg.dst_info.data_width,
  1583. direction,
  1584. flags & DMA_PREP_INTERRUPT,
  1585. dev_addr, lli_max,
  1586. d40c->base->plat_data->llis_per_log);
  1587. } else if (direction == DMA_TO_DEVICE) {
  1588. dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
  1589. total_size = d40_log_sg_to_dev(&d40c->lcla,
  1590. sgl, sg_len,
  1591. &d40d->lli_log,
  1592. &d40c->log_def,
  1593. d40c->dma_cfg.src_info.data_width,
  1594. d40c->dma_cfg.dst_info.data_width,
  1595. direction,
  1596. flags & DMA_PREP_INTERRUPT,
  1597. dev_addr, lli_max,
  1598. d40c->base->plat_data->llis_per_log);
  1599. } else
  1600. return -EINVAL;
  1601. if (total_size < 0)
  1602. return -EINVAL;
  1603. return 0;
  1604. }
  1605. static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
  1606. struct d40_chan *d40c,
  1607. struct scatterlist *sgl,
  1608. unsigned int sgl_len,
  1609. enum dma_data_direction direction,
  1610. unsigned long flags)
  1611. {
  1612. dma_addr_t src_dev_addr;
  1613. dma_addr_t dst_dev_addr;
  1614. int res;
  1615. if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
  1616. dev_err(&d40c->chan.dev->device,
  1617. "[%s] Out of memory\n", __func__);
  1618. return -ENOMEM;
  1619. }
  1620. d40d->lli_len = sgl_len;
  1621. d40d->lli_tcount = 0;
  1622. if (direction == DMA_FROM_DEVICE) {
  1623. dst_dev_addr = 0;
  1624. src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
  1625. } else if (direction == DMA_TO_DEVICE) {
  1626. dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
  1627. src_dev_addr = 0;
  1628. } else
  1629. return -EINVAL;
  1630. res = d40_phy_sg_to_lli(sgl,
  1631. sgl_len,
  1632. src_dev_addr,
  1633. d40d->lli_phy.src,
  1634. d40d->lli_phy.src_addr,
  1635. d40c->src_def_cfg,
  1636. d40c->dma_cfg.src_info.data_width,
  1637. d40c->dma_cfg.src_info.psize,
  1638. true);
  1639. if (res < 0)
  1640. return res;
  1641. res = d40_phy_sg_to_lli(sgl,
  1642. sgl_len,
  1643. dst_dev_addr,
  1644. d40d->lli_phy.dst,
  1645. d40d->lli_phy.dst_addr,
  1646. d40c->dst_def_cfg,
  1647. d40c->dma_cfg.dst_info.data_width,
  1648. d40c->dma_cfg.dst_info.psize,
  1649. true);
  1650. if (res < 0)
  1651. return res;
  1652. (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
  1653. d40d->lli_pool.size, DMA_TO_DEVICE);
  1654. return 0;
  1655. }
  1656. static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
  1657. struct scatterlist *sgl,
  1658. unsigned int sg_len,
  1659. enum dma_data_direction direction,
  1660. unsigned long flags)
  1661. {
  1662. struct d40_desc *d40d;
  1663. struct d40_chan *d40c = container_of(chan, struct d40_chan,
  1664. chan);
  1665. unsigned long flg;
  1666. int err;
  1667. if (d40c->dma_cfg.pre_transfer)
  1668. d40c->dma_cfg.pre_transfer(chan,
  1669. d40c->dma_cfg.pre_transfer_data,
  1670. sg_dma_len(sgl));
  1671. spin_lock_irqsave(&d40c->lock, flg);
  1672. d40d = d40_desc_get(d40c);
  1673. spin_unlock_irqrestore(&d40c->lock, flg);
  1674. if (d40d == NULL)
  1675. return NULL;
  1676. memset(d40d, 0, sizeof(struct d40_desc));
  1677. if (d40c->log_num != D40_PHY_CHAN)
  1678. err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
  1679. direction, flags);
  1680. else
  1681. err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
  1682. direction, flags);
  1683. if (err) {
  1684. dev_err(&d40c->chan.dev->device,
  1685. "[%s] Failed to prepare %s slave sg job: %d\n",
  1686. __func__,
  1687. d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
  1688. return NULL;
  1689. }
  1690. d40d->txd.flags = flags;
  1691. dma_async_tx_descriptor_init(&d40d->txd, chan);
  1692. d40d->txd.tx_submit = d40_tx_submit;
  1693. return &d40d->txd;
  1694. }
  1695. static enum dma_status d40_tx_status(struct dma_chan *chan,
  1696. dma_cookie_t cookie,
  1697. struct dma_tx_state *txstate)
  1698. {
  1699. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1700. dma_cookie_t last_used;
  1701. dma_cookie_t last_complete;
  1702. int ret;
  1703. last_complete = d40c->completed;
  1704. last_used = chan->cookie;
  1705. if (d40_is_paused(d40c))
  1706. ret = DMA_PAUSED;
  1707. else
  1708. ret = dma_async_is_complete(cookie, last_complete, last_used);
  1709. dma_set_tx_state(txstate, last_complete, last_used,
  1710. stedma40_residue(chan));
  1711. return ret;
  1712. }
  1713. static void d40_issue_pending(struct dma_chan *chan)
  1714. {
  1715. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1716. unsigned long flags;
  1717. spin_lock_irqsave(&d40c->lock, flags);
  1718. /* Busy means that pending jobs are already being processed */
  1719. if (!d40c->busy)
  1720. (void) d40_queue_start(d40c);
  1721. spin_unlock_irqrestore(&d40c->lock, flags);
  1722. }
  1723. static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  1724. unsigned long arg)
  1725. {
  1726. unsigned long flags;
  1727. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1728. switch (cmd) {
  1729. case DMA_TERMINATE_ALL:
  1730. spin_lock_irqsave(&d40c->lock, flags);
  1731. d40_term_all(d40c);
  1732. spin_unlock_irqrestore(&d40c->lock, flags);
  1733. return 0;
  1734. case DMA_PAUSE:
  1735. return d40_pause(chan);
  1736. case DMA_RESUME:
  1737. return d40_resume(chan);
  1738. }
  1739. /* Other commands are unimplemented */
  1740. return -ENXIO;
  1741. }
  1742. /* Initialization functions */
  1743. static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
  1744. struct d40_chan *chans, int offset,
  1745. int num_chans)
  1746. {
  1747. int i = 0;
  1748. struct d40_chan *d40c;
  1749. INIT_LIST_HEAD(&dma->channels);
  1750. for (i = offset; i < offset + num_chans; i++) {
  1751. d40c = &chans[i];
  1752. d40c->base = base;
  1753. d40c->chan.device = dma;
  1754. /* Invalidate lcla element */
  1755. d40c->lcla.src_id = -1;
  1756. d40c->lcla.dst_id = -1;
  1757. spin_lock_init(&d40c->lock);
  1758. d40c->log_num = D40_PHY_CHAN;
  1759. INIT_LIST_HEAD(&d40c->free);
  1760. INIT_LIST_HEAD(&d40c->active);
  1761. INIT_LIST_HEAD(&d40c->queue);
  1762. INIT_LIST_HEAD(&d40c->client);
  1763. d40c->free_len = 0;
  1764. tasklet_init(&d40c->tasklet, dma_tasklet,
  1765. (unsigned long) d40c);
  1766. list_add_tail(&d40c->chan.device_node,
  1767. &dma->channels);
  1768. }
  1769. }
  1770. static int __init d40_dmaengine_init(struct d40_base *base,
  1771. int num_reserved_chans)
  1772. {
  1773. int err ;
  1774. d40_chan_init(base, &base->dma_slave, base->log_chans,
  1775. 0, base->num_log_chans);
  1776. dma_cap_zero(base->dma_slave.cap_mask);
  1777. dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
  1778. base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
  1779. base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
  1780. base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
  1781. base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
  1782. base->dma_slave.device_tx_status = d40_tx_status;
  1783. base->dma_slave.device_issue_pending = d40_issue_pending;
  1784. base->dma_slave.device_control = d40_control;
  1785. base->dma_slave.dev = base->dev;
  1786. err = dma_async_device_register(&base->dma_slave);
  1787. if (err) {
  1788. dev_err(base->dev,
  1789. "[%s] Failed to register slave channels\n",
  1790. __func__);
  1791. goto failure1;
  1792. }
  1793. d40_chan_init(base, &base->dma_memcpy, base->log_chans,
  1794. base->num_log_chans, base->plat_data->memcpy_len);
  1795. dma_cap_zero(base->dma_memcpy.cap_mask);
  1796. dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
  1797. base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
  1798. base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
  1799. base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
  1800. base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
  1801. base->dma_memcpy.device_tx_status = d40_tx_status;
  1802. base->dma_memcpy.device_issue_pending = d40_issue_pending;
  1803. base->dma_memcpy.device_control = d40_control;
  1804. base->dma_memcpy.dev = base->dev;
  1805. /*
  1806. * This controller can only access address at even
  1807. * 32bit boundaries, i.e. 2^2
  1808. */
  1809. base->dma_memcpy.copy_align = 2;
  1810. err = dma_async_device_register(&base->dma_memcpy);
  1811. if (err) {
  1812. dev_err(base->dev,
  1813. "[%s] Failed to regsiter memcpy only channels\n",
  1814. __func__);
  1815. goto failure2;
  1816. }
  1817. d40_chan_init(base, &base->dma_both, base->phy_chans,
  1818. 0, num_reserved_chans);
  1819. dma_cap_zero(base->dma_both.cap_mask);
  1820. dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
  1821. dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
  1822. base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
  1823. base->dma_both.device_free_chan_resources = d40_free_chan_resources;
  1824. base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
  1825. base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
  1826. base->dma_both.device_tx_status = d40_tx_status;
  1827. base->dma_both.device_issue_pending = d40_issue_pending;
  1828. base->dma_both.device_control = d40_control;
  1829. base->dma_both.dev = base->dev;
  1830. base->dma_both.copy_align = 2;
  1831. err = dma_async_device_register(&base->dma_both);
  1832. if (err) {
  1833. dev_err(base->dev,
  1834. "[%s] Failed to register logical and physical capable channels\n",
  1835. __func__);
  1836. goto failure3;
  1837. }
  1838. return 0;
  1839. failure3:
  1840. dma_async_device_unregister(&base->dma_memcpy);
  1841. failure2:
  1842. dma_async_device_unregister(&base->dma_slave);
  1843. failure1:
  1844. return err;
  1845. }
  1846. /* Initialization functions. */
  1847. static int __init d40_phy_res_init(struct d40_base *base)
  1848. {
  1849. int i;
  1850. int num_phy_chans_avail = 0;
  1851. u32 val[2];
  1852. int odd_even_bit = -2;
  1853. val[0] = readl(base->virtbase + D40_DREG_PRSME);
  1854. val[1] = readl(base->virtbase + D40_DREG_PRSMO);
  1855. for (i = 0; i < base->num_phy_chans; i++) {
  1856. base->phy_res[i].num = i;
  1857. odd_even_bit += 2 * ((i % 2) == 0);
  1858. if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
  1859. /* Mark security only channels as occupied */
  1860. base->phy_res[i].allocated_src = D40_ALLOC_PHY;
  1861. base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
  1862. } else {
  1863. base->phy_res[i].allocated_src = D40_ALLOC_FREE;
  1864. base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
  1865. num_phy_chans_avail++;
  1866. }
  1867. spin_lock_init(&base->phy_res[i].lock);
  1868. }
  1869. dev_info(base->dev, "%d of %d physical DMA channels available\n",
  1870. num_phy_chans_avail, base->num_phy_chans);
  1871. /* Verify settings extended vs standard */
  1872. val[0] = readl(base->virtbase + D40_DREG_PRTYP);
  1873. for (i = 0; i < base->num_phy_chans; i++) {
  1874. if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
  1875. (val[0] & 0x3) != 1)
  1876. dev_info(base->dev,
  1877. "[%s] INFO: channel %d is misconfigured (%d)\n",
  1878. __func__, i, val[0] & 0x3);
  1879. val[0] = val[0] >> 2;
  1880. }
  1881. return num_phy_chans_avail;
  1882. }
  1883. static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
  1884. {
  1885. static const struct d40_reg_val dma_id_regs[] = {
  1886. /* Peripheral Id */
  1887. { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
  1888. { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
  1889. /*
  1890. * D40_DREG_PERIPHID2 Depends on HW revision:
  1891. * MOP500/HREF ED has 0x0008,
  1892. * ? has 0x0018,
  1893. * HREF V1 has 0x0028
  1894. */
  1895. { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
  1896. /* PCell Id */
  1897. { .reg = D40_DREG_CELLID0, .val = 0x000d},
  1898. { .reg = D40_DREG_CELLID1, .val = 0x00f0},
  1899. { .reg = D40_DREG_CELLID2, .val = 0x0005},
  1900. { .reg = D40_DREG_CELLID3, .val = 0x00b1}
  1901. };
  1902. struct stedma40_platform_data *plat_data;
  1903. struct clk *clk = NULL;
  1904. void __iomem *virtbase = NULL;
  1905. struct resource *res = NULL;
  1906. struct d40_base *base = NULL;
  1907. int num_log_chans = 0;
  1908. int num_phy_chans;
  1909. int i;
  1910. clk = clk_get(&pdev->dev, NULL);
  1911. if (IS_ERR(clk)) {
  1912. dev_err(&pdev->dev, "[%s] No matching clock found\n",
  1913. __func__);
  1914. goto failure;
  1915. }
  1916. clk_enable(clk);
  1917. /* Get IO for DMAC base address */
  1918. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
  1919. if (!res)
  1920. goto failure;
  1921. if (request_mem_region(res->start, resource_size(res),
  1922. D40_NAME " I/O base") == NULL)
  1923. goto failure;
  1924. virtbase = ioremap(res->start, resource_size(res));
  1925. if (!virtbase)
  1926. goto failure;
  1927. /* HW version check */
  1928. for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
  1929. if (dma_id_regs[i].val !=
  1930. readl(virtbase + dma_id_regs[i].reg)) {
  1931. dev_err(&pdev->dev,
  1932. "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
  1933. __func__,
  1934. dma_id_regs[i].val,
  1935. dma_id_regs[i].reg,
  1936. readl(virtbase + dma_id_regs[i].reg));
  1937. goto failure;
  1938. }
  1939. }
  1940. i = readl(virtbase + D40_DREG_PERIPHID2);
  1941. if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
  1942. dev_err(&pdev->dev,
  1943. "[%s] Unknown designer! Got %x wanted %x\n",
  1944. __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
  1945. goto failure;
  1946. }
  1947. /* The number of physical channels on this HW */
  1948. num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
  1949. dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
  1950. (i >> 4) & 0xf, res->start);
  1951. plat_data = pdev->dev.platform_data;
  1952. /* Count the number of logical channels in use */
  1953. for (i = 0; i < plat_data->dev_len; i++)
  1954. if (plat_data->dev_rx[i] != 0)
  1955. num_log_chans++;
  1956. for (i = 0; i < plat_data->dev_len; i++)
  1957. if (plat_data->dev_tx[i] != 0)
  1958. num_log_chans++;
  1959. base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
  1960. (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
  1961. sizeof(struct d40_chan), GFP_KERNEL);
  1962. if (base == NULL) {
  1963. dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
  1964. goto failure;
  1965. }
  1966. base->clk = clk;
  1967. base->num_phy_chans = num_phy_chans;
  1968. base->num_log_chans = num_log_chans;
  1969. base->phy_start = res->start;
  1970. base->phy_size = resource_size(res);
  1971. base->virtbase = virtbase;
  1972. base->plat_data = plat_data;
  1973. base->dev = &pdev->dev;
  1974. base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
  1975. base->log_chans = &base->phy_chans[num_phy_chans];
  1976. base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
  1977. GFP_KERNEL);
  1978. if (!base->phy_res)
  1979. goto failure;
  1980. base->lookup_phy_chans = kzalloc(num_phy_chans *
  1981. sizeof(struct d40_chan *),
  1982. GFP_KERNEL);
  1983. if (!base->lookup_phy_chans)
  1984. goto failure;
  1985. if (num_log_chans + plat_data->memcpy_len) {
  1986. /*
  1987. * The max number of logical channels are event lines for all
  1988. * src devices and dst devices
  1989. */
  1990. base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
  1991. sizeof(struct d40_chan *),
  1992. GFP_KERNEL);
  1993. if (!base->lookup_log_chans)
  1994. goto failure;
  1995. }
  1996. base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
  1997. GFP_KERNEL);
  1998. if (!base->lcla_pool.alloc_map)
  1999. goto failure;
  2000. return base;
  2001. failure:
  2002. if (clk) {
  2003. clk_disable(clk);
  2004. clk_put(clk);
  2005. }
  2006. if (virtbase)
  2007. iounmap(virtbase);
  2008. if (res)
  2009. release_mem_region(res->start,
  2010. resource_size(res));
  2011. if (virtbase)
  2012. iounmap(virtbase);
  2013. if (base) {
  2014. kfree(base->lcla_pool.alloc_map);
  2015. kfree(base->lookup_log_chans);
  2016. kfree(base->lookup_phy_chans);
  2017. kfree(base->phy_res);
  2018. kfree(base);
  2019. }
  2020. return NULL;
  2021. }
  2022. static void __init d40_hw_init(struct d40_base *base)
  2023. {
  2024. static const struct d40_reg_val dma_init_reg[] = {
  2025. /* Clock every part of the DMA block from start */
  2026. { .reg = D40_DREG_GCC, .val = 0x0000ff01},
  2027. /* Interrupts on all logical channels */
  2028. { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
  2029. { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
  2030. { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
  2031. { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
  2032. { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
  2033. { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
  2034. { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
  2035. { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
  2036. { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
  2037. { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
  2038. { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
  2039. { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
  2040. };
  2041. int i;
  2042. u32 prmseo[2] = {0, 0};
  2043. u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
  2044. u32 pcmis = 0;
  2045. u32 pcicr = 0;
  2046. for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
  2047. writel(dma_init_reg[i].val,
  2048. base->virtbase + dma_init_reg[i].reg);
  2049. /* Configure all our dma channels to default settings */
  2050. for (i = 0; i < base->num_phy_chans; i++) {
  2051. activeo[i % 2] = activeo[i % 2] << 2;
  2052. if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
  2053. == D40_ALLOC_PHY) {
  2054. activeo[i % 2] |= 3;
  2055. continue;
  2056. }
  2057. /* Enable interrupt # */
  2058. pcmis = (pcmis << 1) | 1;
  2059. /* Clear interrupt # */
  2060. pcicr = (pcicr << 1) | 1;
  2061. /* Set channel to physical mode */
  2062. prmseo[i % 2] = prmseo[i % 2] << 2;
  2063. prmseo[i % 2] |= 1;
  2064. }
  2065. writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
  2066. writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
  2067. writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
  2068. writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
  2069. /* Write which interrupt to enable */
  2070. writel(pcmis, base->virtbase + D40_DREG_PCMIS);
  2071. /* Write which interrupt to clear */
  2072. writel(pcicr, base->virtbase + D40_DREG_PCICR);
  2073. }
  2074. static int __init d40_probe(struct platform_device *pdev)
  2075. {
  2076. int err;
  2077. int ret = -ENOENT;
  2078. struct d40_base *base;
  2079. struct resource *res = NULL;
  2080. int num_reserved_chans;
  2081. u32 val;
  2082. base = d40_hw_detect_init(pdev);
  2083. if (!base)
  2084. goto failure;
  2085. num_reserved_chans = d40_phy_res_init(base);
  2086. platform_set_drvdata(pdev, base);
  2087. spin_lock_init(&base->interrupt_lock);
  2088. spin_lock_init(&base->execmd_lock);
  2089. /* Get IO for logical channel parameter address */
  2090. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
  2091. if (!res) {
  2092. ret = -ENOENT;
  2093. dev_err(&pdev->dev,
  2094. "[%s] No \"lcpa\" memory resource\n",
  2095. __func__);
  2096. goto failure;
  2097. }
  2098. base->lcpa_size = resource_size(res);
  2099. base->phy_lcpa = res->start;
  2100. if (request_mem_region(res->start, resource_size(res),
  2101. D40_NAME " I/O lcpa") == NULL) {
  2102. ret = -EBUSY;
  2103. dev_err(&pdev->dev,
  2104. "[%s] Failed to request LCPA region 0x%x-0x%x\n",
  2105. __func__, res->start, res->end);
  2106. goto failure;
  2107. }
  2108. /* We make use of ESRAM memory for this. */
  2109. val = readl(base->virtbase + D40_DREG_LCPA);
  2110. if (res->start != val && val != 0) {
  2111. dev_warn(&pdev->dev,
  2112. "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
  2113. __func__, val, res->start);
  2114. } else
  2115. writel(res->start, base->virtbase + D40_DREG_LCPA);
  2116. base->lcpa_base = ioremap(res->start, resource_size(res));
  2117. if (!base->lcpa_base) {
  2118. ret = -ENOMEM;
  2119. dev_err(&pdev->dev,
  2120. "[%s] Failed to ioremap LCPA region\n",
  2121. __func__);
  2122. goto failure;
  2123. }
  2124. /* Get IO for logical channel link address */
  2125. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
  2126. if (!res) {
  2127. ret = -ENOENT;
  2128. dev_err(&pdev->dev,
  2129. "[%s] No \"lcla\" resource defined\n",
  2130. __func__);
  2131. goto failure;
  2132. }
  2133. base->lcla_pool.base_size = resource_size(res);
  2134. base->lcla_pool.phy = res->start;
  2135. if (request_mem_region(res->start, resource_size(res),
  2136. D40_NAME " I/O lcla") == NULL) {
  2137. ret = -EBUSY;
  2138. dev_err(&pdev->dev,
  2139. "[%s] Failed to request LCLA region 0x%x-0x%x\n",
  2140. __func__, res->start, res->end);
  2141. goto failure;
  2142. }
  2143. val = readl(base->virtbase + D40_DREG_LCLA);
  2144. if (res->start != val && val != 0) {
  2145. dev_warn(&pdev->dev,
  2146. "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
  2147. __func__, val, res->start);
  2148. } else
  2149. writel(res->start, base->virtbase + D40_DREG_LCLA);
  2150. base->lcla_pool.base = ioremap(res->start, resource_size(res));
  2151. if (!base->lcla_pool.base) {
  2152. ret = -ENOMEM;
  2153. dev_err(&pdev->dev,
  2154. "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
  2155. __func__, res->start, res->end);
  2156. goto failure;
  2157. }
  2158. spin_lock_init(&base->lcla_pool.lock);
  2159. base->lcla_pool.num_blocks = base->num_phy_chans;
  2160. base->irq = platform_get_irq(pdev, 0);
  2161. ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
  2162. if (ret) {
  2163. dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
  2164. goto failure;
  2165. }
  2166. err = d40_dmaengine_init(base, num_reserved_chans);
  2167. if (err)
  2168. goto failure;
  2169. d40_hw_init(base);
  2170. dev_info(base->dev, "initialized\n");
  2171. return 0;
  2172. failure:
  2173. if (base) {
  2174. if (base->virtbase)
  2175. iounmap(base->virtbase);
  2176. if (base->lcla_pool.phy)
  2177. release_mem_region(base->lcla_pool.phy,
  2178. base->lcla_pool.base_size);
  2179. if (base->phy_lcpa)
  2180. release_mem_region(base->phy_lcpa,
  2181. base->lcpa_size);
  2182. if (base->phy_start)
  2183. release_mem_region(base->phy_start,
  2184. base->phy_size);
  2185. if (base->clk) {
  2186. clk_disable(base->clk);
  2187. clk_put(base->clk);
  2188. }
  2189. kfree(base->lcla_pool.alloc_map);
  2190. kfree(base->lookup_log_chans);
  2191. kfree(base->lookup_phy_chans);
  2192. kfree(base->phy_res);
  2193. kfree(base);
  2194. }
  2195. dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
  2196. return ret;
  2197. }
  2198. static struct platform_driver d40_driver = {
  2199. .driver = {
  2200. .owner = THIS_MODULE,
  2201. .name = D40_NAME,
  2202. },
  2203. };
  2204. int __init stedma40_init(void)
  2205. {
  2206. return platform_driver_probe(&d40_driver, d40_probe);
  2207. }
  2208. arch_initcall(stedma40_init);