ste_dma40.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006
  1. /*
  2. * Copyright (C) Ericsson AB 2007-2008
  3. * Copyright (C) ST-Ericsson SA 2008-2010
  4. * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  5. * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  6. * License terms: GNU General Public License (GPL) version 2
  7. */
  8. #include <linux/dma-mapping.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/amba/bus.h>
  17. #include <plat/ste_dma40.h>
  18. #include "ste_dma40_ll.h"
  19. #define D40_NAME "dma40"
  20. #define D40_PHY_CHAN -1
  21. /* For masking out/in 2 bit channel positions */
  22. #define D40_CHAN_POS(chan) (2 * (chan / 2))
  23. #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  24. /* Maximum iterations taken before giving up suspending a channel */
  25. #define D40_SUSPEND_MAX_IT 500
  26. /* Hardware requirement on LCLA alignment */
  27. #define LCLA_ALIGNMENT 0x40000
  28. /* Max number of links per event group */
  29. #define D40_LCLA_LINK_PER_EVENT_GRP 128
  30. #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  31. /* Attempts before giving up to trying to get pages that are aligned */
  32. #define MAX_LCLA_ALLOC_ATTEMPTS 256
  33. /* Bit markings for allocation map */
  34. #define D40_ALLOC_FREE (1 << 31)
  35. #define D40_ALLOC_PHY (1 << 30)
  36. #define D40_ALLOC_LOG_FREE 0
  37. /**
  38. * enum 40_command - The different commands and/or statuses.
  39. *
  40. * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
  41. * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
  42. * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
  43. * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
  44. */
  45. enum d40_command {
  46. D40_DMA_STOP = 0,
  47. D40_DMA_RUN = 1,
  48. D40_DMA_SUSPEND_REQ = 2,
  49. D40_DMA_SUSPENDED = 3
  50. };
  51. /**
  52. * struct d40_lli_pool - Structure for keeping LLIs in memory
  53. *
  54. * @base: Pointer to memory area when the pre_alloc_lli's are not large
  55. * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
  56. * pre_alloc_lli is used.
  57. * @dma_addr: DMA address, if mapped
  58. * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
  59. * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
  60. * one buffer to one buffer.
  61. */
  62. struct d40_lli_pool {
  63. void *base;
  64. int size;
  65. dma_addr_t dma_addr;
  66. /* Space for dst and src, plus an extra for padding */
  67. u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
  68. };
  69. /**
  70. * struct d40_desc - A descriptor is one DMA job.
  71. *
  72. * @lli_phy: LLI settings for physical channel. Both src and dst=
  73. * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
  74. * lli_len equals one.
  75. * @lli_log: Same as above but for logical channels.
  76. * @lli_pool: The pool with two entries pre-allocated.
  77. * @lli_len: Number of llis of current descriptor.
  78. * @lli_current: Number of transferred llis.
  79. * @lcla_alloc: Number of LCLA entries allocated.
  80. * @txd: DMA engine struct. Used for among other things for communication
  81. * during a transfer.
  82. * @node: List entry.
  83. * @is_in_client_list: true if the client owns this descriptor.
  84. * the previous one.
  85. *
  86. * This descriptor is used for both logical and physical transfers.
  87. */
  88. struct d40_desc {
  89. /* LLI physical */
  90. struct d40_phy_lli_bidir lli_phy;
  91. /* LLI logical */
  92. struct d40_log_lli_bidir lli_log;
  93. struct d40_lli_pool lli_pool;
  94. int lli_len;
  95. int lli_current;
  96. int lcla_alloc;
  97. struct dma_async_tx_descriptor txd;
  98. struct list_head node;
  99. bool is_in_client_list;
  100. bool cyclic;
  101. };
  102. /**
  103. * struct d40_lcla_pool - LCLA pool settings and data.
  104. *
  105. * @base: The virtual address of LCLA. 18 bit aligned.
  106. * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
  107. * This pointer is only there for clean-up on error.
  108. * @pages: The number of pages needed for all physical channels.
  109. * Only used later for clean-up on error
  110. * @lock: Lock to protect the content in this struct.
  111. * @alloc_map: big map over which LCLA entry is own by which job.
  112. */
  113. struct d40_lcla_pool {
  114. void *base;
  115. dma_addr_t dma_addr;
  116. void *base_unaligned;
  117. int pages;
  118. spinlock_t lock;
  119. struct d40_desc **alloc_map;
  120. };
  121. /**
  122. * struct d40_phy_res - struct for handling eventlines mapped to physical
  123. * channels.
  124. *
  125. * @lock: A lock protection this entity.
  126. * @num: The physical channel number of this entity.
  127. * @allocated_src: Bit mapped to show which src event line's are mapped to
  128. * this physical channel. Can also be free or physically allocated.
  129. * @allocated_dst: Same as for src but is dst.
  130. * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
  131. * event line number.
  132. */
  133. struct d40_phy_res {
  134. spinlock_t lock;
  135. int num;
  136. u32 allocated_src;
  137. u32 allocated_dst;
  138. };
  139. struct d40_base;
  140. /**
  141. * struct d40_chan - Struct that describes a channel.
  142. *
  143. * @lock: A spinlock to protect this struct.
  144. * @log_num: The logical number, if any of this channel.
  145. * @completed: Starts with 1, after first interrupt it is set to dma engine's
  146. * current cookie.
  147. * @pending_tx: The number of pending transfers. Used between interrupt handler
  148. * and tasklet.
  149. * @busy: Set to true when transfer is ongoing on this channel.
  150. * @phy_chan: Pointer to physical channel which this instance runs on. If this
  151. * point is NULL, then the channel is not allocated.
  152. * @chan: DMA engine handle.
  153. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  154. * transfer and call client callback.
  155. * @client: Cliented owned descriptor list.
  156. * @pending_queue: Submitted jobs, to be issued by issue_pending()
  157. * @active: Active descriptor.
  158. * @queue: Queued jobs.
  159. * @dma_cfg: The client configuration of this dma channel.
  160. * @configured: whether the dma_cfg configuration is valid
  161. * @base: Pointer to the device instance struct.
  162. * @src_def_cfg: Default cfg register setting for src.
  163. * @dst_def_cfg: Default cfg register setting for dst.
  164. * @log_def: Default logical channel settings.
  165. * @lcla: Space for one dst src pair for logical channel transfers.
  166. * @lcpa: Pointer to dst and src lcpa settings.
  167. * @runtime_addr: runtime configured address.
  168. * @runtime_direction: runtime configured direction.
  169. *
  170. * This struct can either "be" a logical or a physical channel.
  171. */
  172. struct d40_chan {
  173. spinlock_t lock;
  174. int log_num;
  175. /* ID of the most recent completed transfer */
  176. int completed;
  177. int pending_tx;
  178. bool busy;
  179. struct d40_phy_res *phy_chan;
  180. struct dma_chan chan;
  181. struct tasklet_struct tasklet;
  182. struct list_head client;
  183. struct list_head pending_queue;
  184. struct list_head active;
  185. struct list_head queue;
  186. struct stedma40_chan_cfg dma_cfg;
  187. bool configured;
  188. struct d40_base *base;
  189. /* Default register configurations */
  190. u32 src_def_cfg;
  191. u32 dst_def_cfg;
  192. struct d40_def_lcsp log_def;
  193. struct d40_log_lli_full *lcpa;
  194. /* Runtime reconfiguration */
  195. dma_addr_t runtime_addr;
  196. enum dma_data_direction runtime_direction;
  197. };
  198. /**
  199. * struct d40_base - The big global struct, one for each probe'd instance.
  200. *
  201. * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
  202. * @execmd_lock: Lock for execute command usage since several channels share
  203. * the same physical register.
  204. * @dev: The device structure.
  205. * @virtbase: The virtual base address of the DMA's register.
  206. * @rev: silicon revision detected.
  207. * @clk: Pointer to the DMA clock structure.
  208. * @phy_start: Physical memory start of the DMA registers.
  209. * @phy_size: Size of the DMA register map.
  210. * @irq: The IRQ number.
  211. * @num_phy_chans: The number of physical channels. Read from HW. This
  212. * is the number of available channels for this driver, not counting "Secure
  213. * mode" allocated physical channels.
  214. * @num_log_chans: The number of logical channels. Calculated from
  215. * num_phy_chans.
  216. * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  217. * @dma_slave: dma_device channels that can do only do slave transfers.
  218. * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
  219. * @log_chans: Room for all possible logical channels in system.
  220. * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  221. * to log_chans entries.
  222. * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
  223. * to phy_chans entries.
  224. * @plat_data: Pointer to provided platform_data which is the driver
  225. * configuration.
  226. * @phy_res: Vector containing all physical channels.
  227. * @lcla_pool: lcla pool settings and data.
  228. * @lcpa_base: The virtual mapped address of LCPA.
  229. * @phy_lcpa: The physical address of the LCPA.
  230. * @lcpa_size: The size of the LCPA area.
  231. * @desc_slab: cache for descriptors.
  232. */
  233. struct d40_base {
  234. spinlock_t interrupt_lock;
  235. spinlock_t execmd_lock;
  236. struct device *dev;
  237. void __iomem *virtbase;
  238. u8 rev:4;
  239. struct clk *clk;
  240. phys_addr_t phy_start;
  241. resource_size_t phy_size;
  242. int irq;
  243. int num_phy_chans;
  244. int num_log_chans;
  245. struct dma_device dma_both;
  246. struct dma_device dma_slave;
  247. struct dma_device dma_memcpy;
  248. struct d40_chan *phy_chans;
  249. struct d40_chan *log_chans;
  250. struct d40_chan **lookup_log_chans;
  251. struct d40_chan **lookup_phy_chans;
  252. struct stedma40_platform_data *plat_data;
  253. /* Physical half channels */
  254. struct d40_phy_res *phy_res;
  255. struct d40_lcla_pool lcla_pool;
  256. void *lcpa_base;
  257. dma_addr_t phy_lcpa;
  258. resource_size_t lcpa_size;
  259. struct kmem_cache *desc_slab;
  260. };
  261. /**
  262. * struct d40_interrupt_lookup - lookup table for interrupt handler
  263. *
  264. * @src: Interrupt mask register.
  265. * @clr: Interrupt clear register.
  266. * @is_error: true if this is an error interrupt.
  267. * @offset: start delta in the lookup_log_chans in d40_base. If equals to
  268. * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
  269. */
  270. struct d40_interrupt_lookup {
  271. u32 src;
  272. u32 clr;
  273. bool is_error;
  274. int offset;
  275. };
  276. /**
  277. * struct d40_reg_val - simple lookup struct
  278. *
  279. * @reg: The register.
  280. * @val: The value that belongs to the register in reg.
  281. */
  282. struct d40_reg_val {
  283. unsigned int reg;
  284. unsigned int val;
  285. };
  286. static struct device *chan2dev(struct d40_chan *d40c)
  287. {
  288. return &d40c->chan.dev->device;
  289. }
  290. static bool chan_is_physical(struct d40_chan *chan)
  291. {
  292. return chan->log_num == D40_PHY_CHAN;
  293. }
  294. static bool chan_is_logical(struct d40_chan *chan)
  295. {
  296. return !chan_is_physical(chan);
  297. }
  298. static void __iomem *chan_base(struct d40_chan *chan)
  299. {
  300. return chan->base->virtbase + D40_DREG_PCBASE +
  301. chan->phy_chan->num * D40_DREG_PCDELTA;
  302. }
  303. #define d40_err(dev, format, arg...) \
  304. dev_err(dev, "[%s] " format, __func__, ## arg)
  305. #define chan_err(d40c, format, arg...) \
  306. d40_err(chan2dev(d40c), format, ## arg)
  307. static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
  308. int lli_len)
  309. {
  310. bool is_log = chan_is_logical(d40c);
  311. u32 align;
  312. void *base;
  313. if (is_log)
  314. align = sizeof(struct d40_log_lli);
  315. else
  316. align = sizeof(struct d40_phy_lli);
  317. if (lli_len == 1) {
  318. base = d40d->lli_pool.pre_alloc_lli;
  319. d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
  320. d40d->lli_pool.base = NULL;
  321. } else {
  322. d40d->lli_pool.size = lli_len * 2 * align;
  323. base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
  324. d40d->lli_pool.base = base;
  325. if (d40d->lli_pool.base == NULL)
  326. return -ENOMEM;
  327. }
  328. if (is_log) {
  329. d40d->lli_log.src = PTR_ALIGN(base, align);
  330. d40d->lli_log.dst = d40d->lli_log.src + lli_len;
  331. d40d->lli_pool.dma_addr = 0;
  332. } else {
  333. d40d->lli_phy.src = PTR_ALIGN(base, align);
  334. d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
  335. d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
  336. d40d->lli_phy.src,
  337. d40d->lli_pool.size,
  338. DMA_TO_DEVICE);
  339. if (dma_mapping_error(d40c->base->dev,
  340. d40d->lli_pool.dma_addr)) {
  341. kfree(d40d->lli_pool.base);
  342. d40d->lli_pool.base = NULL;
  343. d40d->lli_pool.dma_addr = 0;
  344. return -ENOMEM;
  345. }
  346. }
  347. return 0;
  348. }
  349. static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
  350. {
  351. if (d40d->lli_pool.dma_addr)
  352. dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
  353. d40d->lli_pool.size, DMA_TO_DEVICE);
  354. kfree(d40d->lli_pool.base);
  355. d40d->lli_pool.base = NULL;
  356. d40d->lli_pool.size = 0;
  357. d40d->lli_log.src = NULL;
  358. d40d->lli_log.dst = NULL;
  359. d40d->lli_phy.src = NULL;
  360. d40d->lli_phy.dst = NULL;
  361. }
  362. static int d40_lcla_alloc_one(struct d40_chan *d40c,
  363. struct d40_desc *d40d)
  364. {
  365. unsigned long flags;
  366. int i;
  367. int ret = -EINVAL;
  368. int p;
  369. spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
  370. p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
  371. /*
  372. * Allocate both src and dst at the same time, therefore the half
  373. * start on 1 since 0 can't be used since zero is used as end marker.
  374. */
  375. for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
  376. if (!d40c->base->lcla_pool.alloc_map[p + i]) {
  377. d40c->base->lcla_pool.alloc_map[p + i] = d40d;
  378. d40d->lcla_alloc++;
  379. ret = i;
  380. break;
  381. }
  382. }
  383. spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
  384. return ret;
  385. }
  386. static int d40_lcla_free_all(struct d40_chan *d40c,
  387. struct d40_desc *d40d)
  388. {
  389. unsigned long flags;
  390. int i;
  391. int ret = -EINVAL;
  392. if (chan_is_physical(d40c))
  393. return 0;
  394. spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
  395. for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
  396. if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
  397. D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
  398. d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
  399. D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
  400. d40d->lcla_alloc--;
  401. if (d40d->lcla_alloc == 0) {
  402. ret = 0;
  403. break;
  404. }
  405. }
  406. }
  407. spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
  408. return ret;
  409. }
  410. static void d40_desc_remove(struct d40_desc *d40d)
  411. {
  412. list_del(&d40d->node);
  413. }
  414. static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
  415. {
  416. struct d40_desc *desc = NULL;
  417. if (!list_empty(&d40c->client)) {
  418. struct d40_desc *d;
  419. struct d40_desc *_d;
  420. list_for_each_entry_safe(d, _d, &d40c->client, node)
  421. if (async_tx_test_ack(&d->txd)) {
  422. d40_desc_remove(d);
  423. desc = d;
  424. memset(desc, 0, sizeof(*desc));
  425. break;
  426. }
  427. }
  428. if (!desc)
  429. desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
  430. if (desc)
  431. INIT_LIST_HEAD(&desc->node);
  432. return desc;
  433. }
  434. static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
  435. {
  436. d40_pool_lli_free(d40c, d40d);
  437. d40_lcla_free_all(d40c, d40d);
  438. kmem_cache_free(d40c->base->desc_slab, d40d);
  439. }
  440. static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
  441. {
  442. list_add_tail(&desc->node, &d40c->active);
  443. }
  444. static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
  445. {
  446. struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
  447. struct d40_phy_lli *lli_src = desc->lli_phy.src;
  448. void __iomem *base = chan_base(chan);
  449. writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
  450. writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
  451. writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
  452. writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
  453. writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
  454. writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
  455. writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
  456. writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
  457. }
  458. static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
  459. {
  460. struct d40_lcla_pool *pool = &chan->base->lcla_pool;
  461. struct d40_log_lli_bidir *lli = &desc->lli_log;
  462. int lli_current = desc->lli_current;
  463. int lli_len = desc->lli_len;
  464. bool cyclic = desc->cyclic;
  465. int curr_lcla = -EINVAL;
  466. int first_lcla = 0;
  467. bool linkback;
  468. /*
  469. * We may have partially running cyclic transfers, in case we did't get
  470. * enough LCLA entries.
  471. */
  472. linkback = cyclic && lli_current == 0;
  473. /*
  474. * For linkback, we need one LCLA even with only one link, because we
  475. * can't link back to the one in LCPA space
  476. */
  477. if (linkback || (lli_len - lli_current > 1)) {
  478. curr_lcla = d40_lcla_alloc_one(chan, desc);
  479. first_lcla = curr_lcla;
  480. }
  481. /*
  482. * For linkback, we normally load the LCPA in the loop since we need to
  483. * link it to the second LCLA and not the first. However, if we
  484. * couldn't even get a first LCLA, then we have to run in LCPA and
  485. * reload manually.
  486. */
  487. if (!linkback || curr_lcla == -EINVAL) {
  488. unsigned int flags = 0;
  489. if (curr_lcla == -EINVAL)
  490. flags |= LLI_TERM_INT;
  491. d40_log_lli_lcpa_write(chan->lcpa,
  492. &lli->dst[lli_current],
  493. &lli->src[lli_current],
  494. curr_lcla,
  495. flags);
  496. lli_current++;
  497. }
  498. if (curr_lcla < 0)
  499. goto out;
  500. for (; lli_current < lli_len; lli_current++) {
  501. unsigned int lcla_offset = chan->phy_chan->num * 1024 +
  502. 8 * curr_lcla * 2;
  503. struct d40_log_lli *lcla = pool->base + lcla_offset;
  504. unsigned int flags = 0;
  505. int next_lcla;
  506. if (lli_current + 1 < lli_len)
  507. next_lcla = d40_lcla_alloc_one(chan, desc);
  508. else
  509. next_lcla = linkback ? first_lcla : -EINVAL;
  510. if (cyclic || next_lcla == -EINVAL)
  511. flags |= LLI_TERM_INT;
  512. if (linkback && curr_lcla == first_lcla) {
  513. /* First link goes in both LCPA and LCLA */
  514. d40_log_lli_lcpa_write(chan->lcpa,
  515. &lli->dst[lli_current],
  516. &lli->src[lli_current],
  517. next_lcla, flags);
  518. }
  519. /*
  520. * One unused LCLA in the cyclic case if the very first
  521. * next_lcla fails...
  522. */
  523. d40_log_lli_lcla_write(lcla,
  524. &lli->dst[lli_current],
  525. &lli->src[lli_current],
  526. next_lcla, flags);
  527. dma_sync_single_range_for_device(chan->base->dev,
  528. pool->dma_addr, lcla_offset,
  529. 2 * sizeof(struct d40_log_lli),
  530. DMA_TO_DEVICE);
  531. curr_lcla = next_lcla;
  532. if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
  533. lli_current++;
  534. break;
  535. }
  536. }
  537. out:
  538. desc->lli_current = lli_current;
  539. }
  540. static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
  541. {
  542. if (chan_is_physical(d40c)) {
  543. d40_phy_lli_load(d40c, d40d);
  544. d40d->lli_current = d40d->lli_len;
  545. } else
  546. d40_log_lli_to_lcxa(d40c, d40d);
  547. }
  548. static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
  549. {
  550. struct d40_desc *d;
  551. if (list_empty(&d40c->active))
  552. return NULL;
  553. d = list_first_entry(&d40c->active,
  554. struct d40_desc,
  555. node);
  556. return d;
  557. }
  558. /* remove desc from current queue and add it to the pending_queue */
  559. static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
  560. {
  561. d40_desc_remove(desc);
  562. desc->is_in_client_list = false;
  563. list_add_tail(&desc->node, &d40c->pending_queue);
  564. }
  565. static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
  566. {
  567. struct d40_desc *d;
  568. if (list_empty(&d40c->pending_queue))
  569. return NULL;
  570. d = list_first_entry(&d40c->pending_queue,
  571. struct d40_desc,
  572. node);
  573. return d;
  574. }
  575. static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
  576. {
  577. struct d40_desc *d;
  578. if (list_empty(&d40c->queue))
  579. return NULL;
  580. d = list_first_entry(&d40c->queue,
  581. struct d40_desc,
  582. node);
  583. return d;
  584. }
  585. static int d40_psize_2_burst_size(bool is_log, int psize)
  586. {
  587. if (is_log) {
  588. if (psize == STEDMA40_PSIZE_LOG_1)
  589. return 1;
  590. } else {
  591. if (psize == STEDMA40_PSIZE_PHY_1)
  592. return 1;
  593. }
  594. return 2 << psize;
  595. }
  596. /*
  597. * The dma only supports transmitting packages up to
  598. * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
  599. * dma elements required to send the entire sg list
  600. */
  601. static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
  602. {
  603. int dmalen;
  604. u32 max_w = max(data_width1, data_width2);
  605. u32 min_w = min(data_width1, data_width2);
  606. u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
  607. if (seg_max > STEDMA40_MAX_SEG_SIZE)
  608. seg_max -= (1 << max_w);
  609. if (!IS_ALIGNED(size, 1 << max_w))
  610. return -EINVAL;
  611. if (size <= seg_max)
  612. dmalen = 1;
  613. else {
  614. dmalen = size / seg_max;
  615. if (dmalen * seg_max < size)
  616. dmalen++;
  617. }
  618. return dmalen;
  619. }
  620. static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
  621. u32 data_width1, u32 data_width2)
  622. {
  623. struct scatterlist *sg;
  624. int i;
  625. int len = 0;
  626. int ret;
  627. for_each_sg(sgl, sg, sg_len, i) {
  628. ret = d40_size_2_dmalen(sg_dma_len(sg),
  629. data_width1, data_width2);
  630. if (ret < 0)
  631. return ret;
  632. len += ret;
  633. }
  634. return len;
  635. }
  636. /* Support functions for logical channels */
  637. static int d40_channel_execute_command(struct d40_chan *d40c,
  638. enum d40_command command)
  639. {
  640. u32 status;
  641. int i;
  642. void __iomem *active_reg;
  643. int ret = 0;
  644. unsigned long flags;
  645. u32 wmask;
  646. spin_lock_irqsave(&d40c->base->execmd_lock, flags);
  647. if (d40c->phy_chan->num % 2 == 0)
  648. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  649. else
  650. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  651. if (command == D40_DMA_SUSPEND_REQ) {
  652. status = (readl(active_reg) &
  653. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  654. D40_CHAN_POS(d40c->phy_chan->num);
  655. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  656. goto done;
  657. }
  658. wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
  659. writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
  660. active_reg);
  661. if (command == D40_DMA_SUSPEND_REQ) {
  662. for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
  663. status = (readl(active_reg) &
  664. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  665. D40_CHAN_POS(d40c->phy_chan->num);
  666. cpu_relax();
  667. /*
  668. * Reduce the number of bus accesses while
  669. * waiting for the DMA to suspend.
  670. */
  671. udelay(3);
  672. if (status == D40_DMA_STOP ||
  673. status == D40_DMA_SUSPENDED)
  674. break;
  675. }
  676. if (i == D40_SUSPEND_MAX_IT) {
  677. chan_err(d40c,
  678. "unable to suspend the chl %d (log: %d) status %x\n",
  679. d40c->phy_chan->num, d40c->log_num,
  680. status);
  681. dump_stack();
  682. ret = -EBUSY;
  683. }
  684. }
  685. done:
  686. spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
  687. return ret;
  688. }
  689. static void d40_term_all(struct d40_chan *d40c)
  690. {
  691. struct d40_desc *d40d;
  692. struct d40_desc *_d;
  693. /* Release active descriptors */
  694. while ((d40d = d40_first_active_get(d40c))) {
  695. d40_desc_remove(d40d);
  696. d40_desc_free(d40c, d40d);
  697. }
  698. /* Release queued descriptors waiting for transfer */
  699. while ((d40d = d40_first_queued(d40c))) {
  700. d40_desc_remove(d40d);
  701. d40_desc_free(d40c, d40d);
  702. }
  703. /* Release pending descriptors */
  704. while ((d40d = d40_first_pending(d40c))) {
  705. d40_desc_remove(d40d);
  706. d40_desc_free(d40c, d40d);
  707. }
  708. /* Release client owned descriptors */
  709. if (!list_empty(&d40c->client))
  710. list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
  711. d40_desc_remove(d40d);
  712. d40_desc_free(d40c, d40d);
  713. }
  714. d40c->pending_tx = 0;
  715. d40c->busy = false;
  716. }
  717. static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
  718. u32 event, int reg)
  719. {
  720. void __iomem *addr = chan_base(d40c) + reg;
  721. int tries;
  722. if (!enable) {
  723. writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
  724. | ~D40_EVENTLINE_MASK(event), addr);
  725. return;
  726. }
  727. /*
  728. * The hardware sometimes doesn't register the enable when src and dst
  729. * event lines are active on the same logical channel. Retry to ensure
  730. * it does. Usually only one retry is sufficient.
  731. */
  732. tries = 100;
  733. while (--tries) {
  734. writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
  735. | ~D40_EVENTLINE_MASK(event), addr);
  736. if (readl(addr) & D40_EVENTLINE_MASK(event))
  737. break;
  738. }
  739. if (tries != 99)
  740. dev_dbg(chan2dev(d40c),
  741. "[%s] workaround enable S%cLNK (%d tries)\n",
  742. __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
  743. 100 - tries);
  744. WARN_ON(!tries);
  745. }
  746. static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
  747. {
  748. unsigned long flags;
  749. spin_lock_irqsave(&d40c->phy_chan->lock, flags);
  750. /* Enable event line connected to device (or memcpy) */
  751. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  752. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
  753. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  754. __d40_config_set_event(d40c, do_enable, event,
  755. D40_CHAN_REG_SSLNK);
  756. }
  757. if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
  758. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  759. __d40_config_set_event(d40c, do_enable, event,
  760. D40_CHAN_REG_SDLNK);
  761. }
  762. spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
  763. }
  764. static u32 d40_chan_has_events(struct d40_chan *d40c)
  765. {
  766. void __iomem *chanbase = chan_base(d40c);
  767. u32 val;
  768. val = readl(chanbase + D40_CHAN_REG_SSLNK);
  769. val |= readl(chanbase + D40_CHAN_REG_SDLNK);
  770. return val;
  771. }
  772. static u32 d40_get_prmo(struct d40_chan *d40c)
  773. {
  774. static const unsigned int phy_map[] = {
  775. [STEDMA40_PCHAN_BASIC_MODE]
  776. = D40_DREG_PRMO_PCHAN_BASIC,
  777. [STEDMA40_PCHAN_MODULO_MODE]
  778. = D40_DREG_PRMO_PCHAN_MODULO,
  779. [STEDMA40_PCHAN_DOUBLE_DST_MODE]
  780. = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
  781. };
  782. static const unsigned int log_map[] = {
  783. [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
  784. = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
  785. [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
  786. = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
  787. [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
  788. = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
  789. };
  790. if (chan_is_physical(d40c))
  791. return phy_map[d40c->dma_cfg.mode_opt];
  792. else
  793. return log_map[d40c->dma_cfg.mode_opt];
  794. }
  795. static void d40_config_write(struct d40_chan *d40c)
  796. {
  797. u32 addr_base;
  798. u32 var;
  799. /* Odd addresses are even addresses + 4 */
  800. addr_base = (d40c->phy_chan->num % 2) * 4;
  801. /* Setup channel mode to logical or physical */
  802. var = ((u32)(chan_is_logical(d40c)) + 1) <<
  803. D40_CHAN_POS(d40c->phy_chan->num);
  804. writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
  805. /* Setup operational mode option register */
  806. var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
  807. writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
  808. if (chan_is_logical(d40c)) {
  809. int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
  810. & D40_SREG_ELEM_LOG_LIDX_MASK;
  811. void __iomem *chanbase = chan_base(d40c);
  812. /* Set default config for CFG reg */
  813. writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
  814. writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
  815. /* Set LIDX for lcla */
  816. writel(lidx, chanbase + D40_CHAN_REG_SSELT);
  817. writel(lidx, chanbase + D40_CHAN_REG_SDELT);
  818. }
  819. }
  820. static u32 d40_residue(struct d40_chan *d40c)
  821. {
  822. u32 num_elt;
  823. if (chan_is_logical(d40c))
  824. num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
  825. >> D40_MEM_LCSP2_ECNT_POS;
  826. else {
  827. u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
  828. num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
  829. >> D40_SREG_ELEM_PHY_ECNT_POS;
  830. }
  831. return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
  832. }
  833. static bool d40_tx_is_linked(struct d40_chan *d40c)
  834. {
  835. bool is_link;
  836. if (chan_is_logical(d40c))
  837. is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
  838. else
  839. is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
  840. & D40_SREG_LNK_PHYS_LNK_MASK;
  841. return is_link;
  842. }
  843. static int d40_pause(struct d40_chan *d40c)
  844. {
  845. int res = 0;
  846. unsigned long flags;
  847. if (!d40c->busy)
  848. return 0;
  849. spin_lock_irqsave(&d40c->lock, flags);
  850. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  851. if (res == 0) {
  852. if (chan_is_logical(d40c)) {
  853. d40_config_set_event(d40c, false);
  854. /* Resume the other logical channels if any */
  855. if (d40_chan_has_events(d40c))
  856. res = d40_channel_execute_command(d40c,
  857. D40_DMA_RUN);
  858. }
  859. }
  860. spin_unlock_irqrestore(&d40c->lock, flags);
  861. return res;
  862. }
  863. static int d40_resume(struct d40_chan *d40c)
  864. {
  865. int res = 0;
  866. unsigned long flags;
  867. if (!d40c->busy)
  868. return 0;
  869. spin_lock_irqsave(&d40c->lock, flags);
  870. if (d40c->base->rev == 0)
  871. if (chan_is_logical(d40c)) {
  872. res = d40_channel_execute_command(d40c,
  873. D40_DMA_SUSPEND_REQ);
  874. goto no_suspend;
  875. }
  876. /* If bytes left to transfer or linked tx resume job */
  877. if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
  878. if (chan_is_logical(d40c))
  879. d40_config_set_event(d40c, true);
  880. res = d40_channel_execute_command(d40c, D40_DMA_RUN);
  881. }
  882. no_suspend:
  883. spin_unlock_irqrestore(&d40c->lock, flags);
  884. return res;
  885. }
  886. static int d40_terminate_all(struct d40_chan *chan)
  887. {
  888. unsigned long flags;
  889. int ret = 0;
  890. ret = d40_pause(chan);
  891. if (!ret && chan_is_physical(chan))
  892. ret = d40_channel_execute_command(chan, D40_DMA_STOP);
  893. spin_lock_irqsave(&chan->lock, flags);
  894. d40_term_all(chan);
  895. spin_unlock_irqrestore(&chan->lock, flags);
  896. return ret;
  897. }
  898. static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
  899. {
  900. struct d40_chan *d40c = container_of(tx->chan,
  901. struct d40_chan,
  902. chan);
  903. struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
  904. unsigned long flags;
  905. spin_lock_irqsave(&d40c->lock, flags);
  906. d40c->chan.cookie++;
  907. if (d40c->chan.cookie < 0)
  908. d40c->chan.cookie = 1;
  909. d40d->txd.cookie = d40c->chan.cookie;
  910. d40_desc_queue(d40c, d40d);
  911. spin_unlock_irqrestore(&d40c->lock, flags);
  912. return tx->cookie;
  913. }
  914. static int d40_start(struct d40_chan *d40c)
  915. {
  916. if (d40c->base->rev == 0) {
  917. int err;
  918. if (chan_is_logical(d40c)) {
  919. err = d40_channel_execute_command(d40c,
  920. D40_DMA_SUSPEND_REQ);
  921. if (err)
  922. return err;
  923. }
  924. }
  925. if (chan_is_logical(d40c))
  926. d40_config_set_event(d40c, true);
  927. return d40_channel_execute_command(d40c, D40_DMA_RUN);
  928. }
  929. static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
  930. {
  931. struct d40_desc *d40d;
  932. int err;
  933. /* Start queued jobs, if any */
  934. d40d = d40_first_queued(d40c);
  935. if (d40d != NULL) {
  936. d40c->busy = true;
  937. /* Remove from queue */
  938. d40_desc_remove(d40d);
  939. /* Add to active queue */
  940. d40_desc_submit(d40c, d40d);
  941. /* Initiate DMA job */
  942. d40_desc_load(d40c, d40d);
  943. /* Start dma job */
  944. err = d40_start(d40c);
  945. if (err)
  946. return NULL;
  947. }
  948. return d40d;
  949. }
  950. /* called from interrupt context */
  951. static void dma_tc_handle(struct d40_chan *d40c)
  952. {
  953. struct d40_desc *d40d;
  954. /* Get first active entry from list */
  955. d40d = d40_first_active_get(d40c);
  956. if (d40d == NULL)
  957. return;
  958. if (d40d->cyclic) {
  959. /*
  960. * If this was a paritially loaded list, we need to reloaded
  961. * it, and only when the list is completed. We need to check
  962. * for done because the interrupt will hit for every link, and
  963. * not just the last one.
  964. */
  965. if (d40d->lli_current < d40d->lli_len
  966. && !d40_tx_is_linked(d40c)
  967. && !d40_residue(d40c)) {
  968. d40_lcla_free_all(d40c, d40d);
  969. d40_desc_load(d40c, d40d);
  970. (void) d40_start(d40c);
  971. if (d40d->lli_current == d40d->lli_len)
  972. d40d->lli_current = 0;
  973. }
  974. } else {
  975. d40_lcla_free_all(d40c, d40d);
  976. if (d40d->lli_current < d40d->lli_len) {
  977. d40_desc_load(d40c, d40d);
  978. /* Start dma job */
  979. (void) d40_start(d40c);
  980. return;
  981. }
  982. if (d40_queue_start(d40c) == NULL)
  983. d40c->busy = false;
  984. }
  985. d40c->pending_tx++;
  986. tasklet_schedule(&d40c->tasklet);
  987. }
  988. static void dma_tasklet(unsigned long data)
  989. {
  990. struct d40_chan *d40c = (struct d40_chan *) data;
  991. struct d40_desc *d40d;
  992. unsigned long flags;
  993. dma_async_tx_callback callback;
  994. void *callback_param;
  995. spin_lock_irqsave(&d40c->lock, flags);
  996. /* Get first active entry from list */
  997. d40d = d40_first_active_get(d40c);
  998. if (d40d == NULL)
  999. goto err;
  1000. if (!d40d->cyclic)
  1001. d40c->completed = d40d->txd.cookie;
  1002. /*
  1003. * If terminating a channel pending_tx is set to zero.
  1004. * This prevents any finished active jobs to return to the client.
  1005. */
  1006. if (d40c->pending_tx == 0) {
  1007. spin_unlock_irqrestore(&d40c->lock, flags);
  1008. return;
  1009. }
  1010. /* Callback to client */
  1011. callback = d40d->txd.callback;
  1012. callback_param = d40d->txd.callback_param;
  1013. if (!d40d->cyclic) {
  1014. if (async_tx_test_ack(&d40d->txd)) {
  1015. d40_desc_remove(d40d);
  1016. d40_desc_free(d40c, d40d);
  1017. } else {
  1018. if (!d40d->is_in_client_list) {
  1019. d40_desc_remove(d40d);
  1020. d40_lcla_free_all(d40c, d40d);
  1021. list_add_tail(&d40d->node, &d40c->client);
  1022. d40d->is_in_client_list = true;
  1023. }
  1024. }
  1025. }
  1026. d40c->pending_tx--;
  1027. if (d40c->pending_tx)
  1028. tasklet_schedule(&d40c->tasklet);
  1029. spin_unlock_irqrestore(&d40c->lock, flags);
  1030. if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
  1031. callback(callback_param);
  1032. return;
  1033. err:
  1034. /* Rescue manoeuvre if receiving double interrupts */
  1035. if (d40c->pending_tx > 0)
  1036. d40c->pending_tx--;
  1037. spin_unlock_irqrestore(&d40c->lock, flags);
  1038. }
  1039. static irqreturn_t d40_handle_interrupt(int irq, void *data)
  1040. {
  1041. static const struct d40_interrupt_lookup il[] = {
  1042. {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
  1043. {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
  1044. {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
  1045. {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
  1046. {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
  1047. {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
  1048. {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
  1049. {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
  1050. {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
  1051. {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
  1052. };
  1053. int i;
  1054. u32 regs[ARRAY_SIZE(il)];
  1055. u32 idx;
  1056. u32 row;
  1057. long chan = -1;
  1058. struct d40_chan *d40c;
  1059. unsigned long flags;
  1060. struct d40_base *base = data;
  1061. spin_lock_irqsave(&base->interrupt_lock, flags);
  1062. /* Read interrupt status of both logical and physical channels */
  1063. for (i = 0; i < ARRAY_SIZE(il); i++)
  1064. regs[i] = readl(base->virtbase + il[i].src);
  1065. for (;;) {
  1066. chan = find_next_bit((unsigned long *)regs,
  1067. BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
  1068. /* No more set bits found? */
  1069. if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
  1070. break;
  1071. row = chan / BITS_PER_LONG;
  1072. idx = chan & (BITS_PER_LONG - 1);
  1073. /* ACK interrupt */
  1074. writel(1 << idx, base->virtbase + il[row].clr);
  1075. if (il[row].offset == D40_PHY_CHAN)
  1076. d40c = base->lookup_phy_chans[idx];
  1077. else
  1078. d40c = base->lookup_log_chans[il[row].offset + idx];
  1079. spin_lock(&d40c->lock);
  1080. if (!il[row].is_error)
  1081. dma_tc_handle(d40c);
  1082. else
  1083. d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
  1084. chan, il[row].offset, idx);
  1085. spin_unlock(&d40c->lock);
  1086. }
  1087. spin_unlock_irqrestore(&base->interrupt_lock, flags);
  1088. return IRQ_HANDLED;
  1089. }
  1090. static int d40_validate_conf(struct d40_chan *d40c,
  1091. struct stedma40_chan_cfg *conf)
  1092. {
  1093. int res = 0;
  1094. u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
  1095. u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
  1096. bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
  1097. if (!conf->dir) {
  1098. chan_err(d40c, "Invalid direction.\n");
  1099. res = -EINVAL;
  1100. }
  1101. if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
  1102. d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
  1103. d40c->runtime_addr == 0) {
  1104. chan_err(d40c, "Invalid TX channel address (%d)\n",
  1105. conf->dst_dev_type);
  1106. res = -EINVAL;
  1107. }
  1108. if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
  1109. d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
  1110. d40c->runtime_addr == 0) {
  1111. chan_err(d40c, "Invalid RX channel address (%d)\n",
  1112. conf->src_dev_type);
  1113. res = -EINVAL;
  1114. }
  1115. if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
  1116. dst_event_group == STEDMA40_DEV_DST_MEMORY) {
  1117. chan_err(d40c, "Invalid dst\n");
  1118. res = -EINVAL;
  1119. }
  1120. if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
  1121. src_event_group == STEDMA40_DEV_SRC_MEMORY) {
  1122. chan_err(d40c, "Invalid src\n");
  1123. res = -EINVAL;
  1124. }
  1125. if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
  1126. dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
  1127. chan_err(d40c, "No event line\n");
  1128. res = -EINVAL;
  1129. }
  1130. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
  1131. (src_event_group != dst_event_group)) {
  1132. chan_err(d40c, "Invalid event group\n");
  1133. res = -EINVAL;
  1134. }
  1135. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
  1136. /*
  1137. * DMAC HW supports it. Will be added to this driver,
  1138. * in case any dma client requires it.
  1139. */
  1140. chan_err(d40c, "periph to periph not supported\n");
  1141. res = -EINVAL;
  1142. }
  1143. if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
  1144. (1 << conf->src_info.data_width) !=
  1145. d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
  1146. (1 << conf->dst_info.data_width)) {
  1147. /*
  1148. * The DMAC hardware only supports
  1149. * src (burst x width) == dst (burst x width)
  1150. */
  1151. chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
  1152. res = -EINVAL;
  1153. }
  1154. return res;
  1155. }
  1156. static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
  1157. int log_event_line, bool is_log)
  1158. {
  1159. unsigned long flags;
  1160. spin_lock_irqsave(&phy->lock, flags);
  1161. if (!is_log) {
  1162. /* Physical interrupts are masked per physical full channel */
  1163. if (phy->allocated_src == D40_ALLOC_FREE &&
  1164. phy->allocated_dst == D40_ALLOC_FREE) {
  1165. phy->allocated_dst = D40_ALLOC_PHY;
  1166. phy->allocated_src = D40_ALLOC_PHY;
  1167. goto found;
  1168. } else
  1169. goto not_found;
  1170. }
  1171. /* Logical channel */
  1172. if (is_src) {
  1173. if (phy->allocated_src == D40_ALLOC_PHY)
  1174. goto not_found;
  1175. if (phy->allocated_src == D40_ALLOC_FREE)
  1176. phy->allocated_src = D40_ALLOC_LOG_FREE;
  1177. if (!(phy->allocated_src & (1 << log_event_line))) {
  1178. phy->allocated_src |= 1 << log_event_line;
  1179. goto found;
  1180. } else
  1181. goto not_found;
  1182. } else {
  1183. if (phy->allocated_dst == D40_ALLOC_PHY)
  1184. goto not_found;
  1185. if (phy->allocated_dst == D40_ALLOC_FREE)
  1186. phy->allocated_dst = D40_ALLOC_LOG_FREE;
  1187. if (!(phy->allocated_dst & (1 << log_event_line))) {
  1188. phy->allocated_dst |= 1 << log_event_line;
  1189. goto found;
  1190. } else
  1191. goto not_found;
  1192. }
  1193. not_found:
  1194. spin_unlock_irqrestore(&phy->lock, flags);
  1195. return false;
  1196. found:
  1197. spin_unlock_irqrestore(&phy->lock, flags);
  1198. return true;
  1199. }
  1200. static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
  1201. int log_event_line)
  1202. {
  1203. unsigned long flags;
  1204. bool is_free = false;
  1205. spin_lock_irqsave(&phy->lock, flags);
  1206. if (!log_event_line) {
  1207. phy->allocated_dst = D40_ALLOC_FREE;
  1208. phy->allocated_src = D40_ALLOC_FREE;
  1209. is_free = true;
  1210. goto out;
  1211. }
  1212. /* Logical channel */
  1213. if (is_src) {
  1214. phy->allocated_src &= ~(1 << log_event_line);
  1215. if (phy->allocated_src == D40_ALLOC_LOG_FREE)
  1216. phy->allocated_src = D40_ALLOC_FREE;
  1217. } else {
  1218. phy->allocated_dst &= ~(1 << log_event_line);
  1219. if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
  1220. phy->allocated_dst = D40_ALLOC_FREE;
  1221. }
  1222. is_free = ((phy->allocated_src | phy->allocated_dst) ==
  1223. D40_ALLOC_FREE);
  1224. out:
  1225. spin_unlock_irqrestore(&phy->lock, flags);
  1226. return is_free;
  1227. }
  1228. static int d40_allocate_channel(struct d40_chan *d40c)
  1229. {
  1230. int dev_type;
  1231. int event_group;
  1232. int event_line;
  1233. struct d40_phy_res *phys;
  1234. int i;
  1235. int j;
  1236. int log_num;
  1237. bool is_src;
  1238. bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
  1239. phys = d40c->base->phy_res;
  1240. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1241. dev_type = d40c->dma_cfg.src_dev_type;
  1242. log_num = 2 * dev_type;
  1243. is_src = true;
  1244. } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1245. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1246. /* dst event lines are used for logical memcpy */
  1247. dev_type = d40c->dma_cfg.dst_dev_type;
  1248. log_num = 2 * dev_type + 1;
  1249. is_src = false;
  1250. } else
  1251. return -EINVAL;
  1252. event_group = D40_TYPE_TO_GROUP(dev_type);
  1253. event_line = D40_TYPE_TO_EVENT(dev_type);
  1254. if (!is_log) {
  1255. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1256. /* Find physical half channel */
  1257. for (i = 0; i < d40c->base->num_phy_chans; i++) {
  1258. if (d40_alloc_mask_set(&phys[i], is_src,
  1259. 0, is_log))
  1260. goto found_phy;
  1261. }
  1262. } else
  1263. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  1264. int phy_num = j + event_group * 2;
  1265. for (i = phy_num; i < phy_num + 2; i++) {
  1266. if (d40_alloc_mask_set(&phys[i],
  1267. is_src,
  1268. 0,
  1269. is_log))
  1270. goto found_phy;
  1271. }
  1272. }
  1273. return -EINVAL;
  1274. found_phy:
  1275. d40c->phy_chan = &phys[i];
  1276. d40c->log_num = D40_PHY_CHAN;
  1277. goto out;
  1278. }
  1279. if (dev_type == -1)
  1280. return -EINVAL;
  1281. /* Find logical channel */
  1282. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  1283. int phy_num = j + event_group * 2;
  1284. /*
  1285. * Spread logical channels across all available physical rather
  1286. * than pack every logical channel at the first available phy
  1287. * channels.
  1288. */
  1289. if (is_src) {
  1290. for (i = phy_num; i < phy_num + 2; i++) {
  1291. if (d40_alloc_mask_set(&phys[i], is_src,
  1292. event_line, is_log))
  1293. goto found_log;
  1294. }
  1295. } else {
  1296. for (i = phy_num + 1; i >= phy_num; i--) {
  1297. if (d40_alloc_mask_set(&phys[i], is_src,
  1298. event_line, is_log))
  1299. goto found_log;
  1300. }
  1301. }
  1302. }
  1303. return -EINVAL;
  1304. found_log:
  1305. d40c->phy_chan = &phys[i];
  1306. d40c->log_num = log_num;
  1307. out:
  1308. if (is_log)
  1309. d40c->base->lookup_log_chans[d40c->log_num] = d40c;
  1310. else
  1311. d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
  1312. return 0;
  1313. }
  1314. static int d40_config_memcpy(struct d40_chan *d40c)
  1315. {
  1316. dma_cap_mask_t cap = d40c->chan.device->cap_mask;
  1317. if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
  1318. d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
  1319. d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
  1320. d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
  1321. memcpy[d40c->chan.chan_id];
  1322. } else if (dma_has_cap(DMA_MEMCPY, cap) &&
  1323. dma_has_cap(DMA_SLAVE, cap)) {
  1324. d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
  1325. } else {
  1326. chan_err(d40c, "No memcpy\n");
  1327. return -EINVAL;
  1328. }
  1329. return 0;
  1330. }
  1331. static int d40_free_dma(struct d40_chan *d40c)
  1332. {
  1333. int res = 0;
  1334. u32 event;
  1335. struct d40_phy_res *phy = d40c->phy_chan;
  1336. bool is_src;
  1337. /* Terminate all queued and active transfers */
  1338. d40_term_all(d40c);
  1339. if (phy == NULL) {
  1340. chan_err(d40c, "phy == null\n");
  1341. return -EINVAL;
  1342. }
  1343. if (phy->allocated_src == D40_ALLOC_FREE &&
  1344. phy->allocated_dst == D40_ALLOC_FREE) {
  1345. chan_err(d40c, "channel already free\n");
  1346. return -EINVAL;
  1347. }
  1348. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1349. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1350. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  1351. is_src = false;
  1352. } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1353. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  1354. is_src = true;
  1355. } else {
  1356. chan_err(d40c, "Unknown direction\n");
  1357. return -EINVAL;
  1358. }
  1359. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1360. if (res) {
  1361. chan_err(d40c, "suspend failed\n");
  1362. return res;
  1363. }
  1364. if (chan_is_logical(d40c)) {
  1365. /* Release logical channel, deactivate the event line */
  1366. d40_config_set_event(d40c, false);
  1367. d40c->base->lookup_log_chans[d40c->log_num] = NULL;
  1368. /*
  1369. * Check if there are more logical allocation
  1370. * on this phy channel.
  1371. */
  1372. if (!d40_alloc_mask_free(phy, is_src, event)) {
  1373. /* Resume the other logical channels if any */
  1374. if (d40_chan_has_events(d40c)) {
  1375. res = d40_channel_execute_command(d40c,
  1376. D40_DMA_RUN);
  1377. if (res) {
  1378. chan_err(d40c,
  1379. "Executing RUN command\n");
  1380. return res;
  1381. }
  1382. }
  1383. return 0;
  1384. }
  1385. } else {
  1386. (void) d40_alloc_mask_free(phy, is_src, 0);
  1387. }
  1388. /* Release physical channel */
  1389. res = d40_channel_execute_command(d40c, D40_DMA_STOP);
  1390. if (res) {
  1391. chan_err(d40c, "Failed to stop channel\n");
  1392. return res;
  1393. }
  1394. d40c->phy_chan = NULL;
  1395. d40c->configured = false;
  1396. d40c->base->lookup_phy_chans[phy->num] = NULL;
  1397. return 0;
  1398. }
  1399. static bool d40_is_paused(struct d40_chan *d40c)
  1400. {
  1401. void __iomem *chanbase = chan_base(d40c);
  1402. bool is_paused = false;
  1403. unsigned long flags;
  1404. void __iomem *active_reg;
  1405. u32 status;
  1406. u32 event;
  1407. spin_lock_irqsave(&d40c->lock, flags);
  1408. if (chan_is_physical(d40c)) {
  1409. if (d40c->phy_chan->num % 2 == 0)
  1410. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  1411. else
  1412. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  1413. status = (readl(active_reg) &
  1414. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  1415. D40_CHAN_POS(d40c->phy_chan->num);
  1416. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  1417. is_paused = true;
  1418. goto _exit;
  1419. }
  1420. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1421. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1422. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
  1423. status = readl(chanbase + D40_CHAN_REG_SDLNK);
  1424. } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1425. event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
  1426. status = readl(chanbase + D40_CHAN_REG_SSLNK);
  1427. } else {
  1428. chan_err(d40c, "Unknown direction\n");
  1429. goto _exit;
  1430. }
  1431. status = (status & D40_EVENTLINE_MASK(event)) >>
  1432. D40_EVENTLINE_POS(event);
  1433. if (status != D40_DMA_RUN)
  1434. is_paused = true;
  1435. _exit:
  1436. spin_unlock_irqrestore(&d40c->lock, flags);
  1437. return is_paused;
  1438. }
  1439. static u32 stedma40_residue(struct dma_chan *chan)
  1440. {
  1441. struct d40_chan *d40c =
  1442. container_of(chan, struct d40_chan, chan);
  1443. u32 bytes_left;
  1444. unsigned long flags;
  1445. spin_lock_irqsave(&d40c->lock, flags);
  1446. bytes_left = d40_residue(d40c);
  1447. spin_unlock_irqrestore(&d40c->lock, flags);
  1448. return bytes_left;
  1449. }
  1450. static int
  1451. d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
  1452. struct scatterlist *sg_src, struct scatterlist *sg_dst,
  1453. unsigned int sg_len, dma_addr_t src_dev_addr,
  1454. dma_addr_t dst_dev_addr)
  1455. {
  1456. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1457. struct stedma40_half_channel_info *src_info = &cfg->src_info;
  1458. struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
  1459. int ret;
  1460. ret = d40_log_sg_to_lli(sg_src, sg_len,
  1461. src_dev_addr,
  1462. desc->lli_log.src,
  1463. chan->log_def.lcsp1,
  1464. src_info->data_width,
  1465. dst_info->data_width);
  1466. ret = d40_log_sg_to_lli(sg_dst, sg_len,
  1467. dst_dev_addr,
  1468. desc->lli_log.dst,
  1469. chan->log_def.lcsp3,
  1470. dst_info->data_width,
  1471. src_info->data_width);
  1472. return ret < 0 ? ret : 0;
  1473. }
  1474. static int
  1475. d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
  1476. struct scatterlist *sg_src, struct scatterlist *sg_dst,
  1477. unsigned int sg_len, dma_addr_t src_dev_addr,
  1478. dma_addr_t dst_dev_addr)
  1479. {
  1480. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1481. struct stedma40_half_channel_info *src_info = &cfg->src_info;
  1482. struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
  1483. unsigned long flags = 0;
  1484. int ret;
  1485. if (desc->cyclic)
  1486. flags |= LLI_CYCLIC | LLI_TERM_INT;
  1487. ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
  1488. desc->lli_phy.src,
  1489. virt_to_phys(desc->lli_phy.src),
  1490. chan->src_def_cfg,
  1491. src_info, dst_info, flags);
  1492. ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
  1493. desc->lli_phy.dst,
  1494. virt_to_phys(desc->lli_phy.dst),
  1495. chan->dst_def_cfg,
  1496. dst_info, src_info, flags);
  1497. dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
  1498. desc->lli_pool.size, DMA_TO_DEVICE);
  1499. return ret < 0 ? ret : 0;
  1500. }
  1501. static struct d40_desc *
  1502. d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
  1503. unsigned int sg_len, unsigned long dma_flags)
  1504. {
  1505. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1506. struct d40_desc *desc;
  1507. int ret;
  1508. desc = d40_desc_get(chan);
  1509. if (!desc)
  1510. return NULL;
  1511. desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
  1512. cfg->dst_info.data_width);
  1513. if (desc->lli_len < 0) {
  1514. chan_err(chan, "Unaligned size\n");
  1515. goto err;
  1516. }
  1517. ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
  1518. if (ret < 0) {
  1519. chan_err(chan, "Could not allocate lli\n");
  1520. goto err;
  1521. }
  1522. desc->lli_current = 0;
  1523. desc->txd.flags = dma_flags;
  1524. desc->txd.tx_submit = d40_tx_submit;
  1525. dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
  1526. return desc;
  1527. err:
  1528. d40_desc_free(chan, desc);
  1529. return NULL;
  1530. }
  1531. static dma_addr_t
  1532. d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
  1533. {
  1534. struct stedma40_platform_data *plat = chan->base->plat_data;
  1535. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1536. dma_addr_t addr = 0;
  1537. if (chan->runtime_addr)
  1538. return chan->runtime_addr;
  1539. if (direction == DMA_FROM_DEVICE)
  1540. addr = plat->dev_rx[cfg->src_dev_type];
  1541. else if (direction == DMA_TO_DEVICE)
  1542. addr = plat->dev_tx[cfg->dst_dev_type];
  1543. return addr;
  1544. }
  1545. static struct dma_async_tx_descriptor *
  1546. d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
  1547. struct scatterlist *sg_dst, unsigned int sg_len,
  1548. enum dma_data_direction direction, unsigned long dma_flags)
  1549. {
  1550. struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
  1551. dma_addr_t src_dev_addr = 0;
  1552. dma_addr_t dst_dev_addr = 0;
  1553. struct d40_desc *desc;
  1554. unsigned long flags;
  1555. int ret;
  1556. if (!chan->phy_chan) {
  1557. chan_err(chan, "Cannot prepare unallocated channel\n");
  1558. return NULL;
  1559. }
  1560. spin_lock_irqsave(&chan->lock, flags);
  1561. desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
  1562. if (desc == NULL)
  1563. goto err;
  1564. if (sg_next(&sg_src[sg_len - 1]) == sg_src)
  1565. desc->cyclic = true;
  1566. if (direction != DMA_NONE) {
  1567. dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
  1568. if (direction == DMA_FROM_DEVICE)
  1569. src_dev_addr = dev_addr;
  1570. else if (direction == DMA_TO_DEVICE)
  1571. dst_dev_addr = dev_addr;
  1572. }
  1573. if (chan_is_logical(chan))
  1574. ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
  1575. sg_len, src_dev_addr, dst_dev_addr);
  1576. else
  1577. ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
  1578. sg_len, src_dev_addr, dst_dev_addr);
  1579. if (ret) {
  1580. chan_err(chan, "Failed to prepare %s sg job: %d\n",
  1581. chan_is_logical(chan) ? "log" : "phy", ret);
  1582. goto err;
  1583. }
  1584. spin_unlock_irqrestore(&chan->lock, flags);
  1585. return &desc->txd;
  1586. err:
  1587. if (desc)
  1588. d40_desc_free(chan, desc);
  1589. spin_unlock_irqrestore(&chan->lock, flags);
  1590. return NULL;
  1591. }
  1592. bool stedma40_filter(struct dma_chan *chan, void *data)
  1593. {
  1594. struct stedma40_chan_cfg *info = data;
  1595. struct d40_chan *d40c =
  1596. container_of(chan, struct d40_chan, chan);
  1597. int err;
  1598. if (data) {
  1599. err = d40_validate_conf(d40c, info);
  1600. if (!err)
  1601. d40c->dma_cfg = *info;
  1602. } else
  1603. err = d40_config_memcpy(d40c);
  1604. if (!err)
  1605. d40c->configured = true;
  1606. return err == 0;
  1607. }
  1608. EXPORT_SYMBOL(stedma40_filter);
  1609. static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
  1610. {
  1611. bool realtime = d40c->dma_cfg.realtime;
  1612. bool highprio = d40c->dma_cfg.high_priority;
  1613. u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
  1614. u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
  1615. u32 event = D40_TYPE_TO_EVENT(dev_type);
  1616. u32 group = D40_TYPE_TO_GROUP(dev_type);
  1617. u32 bit = 1 << event;
  1618. /* Destination event lines are stored in the upper halfword */
  1619. if (!src)
  1620. bit <<= 16;
  1621. writel(bit, d40c->base->virtbase + prioreg + group * 4);
  1622. writel(bit, d40c->base->virtbase + rtreg + group * 4);
  1623. }
  1624. static void d40_set_prio_realtime(struct d40_chan *d40c)
  1625. {
  1626. if (d40c->base->rev < 3)
  1627. return;
  1628. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  1629. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  1630. __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
  1631. if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
  1632. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  1633. __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
  1634. }
  1635. /* DMA ENGINE functions */
  1636. static int d40_alloc_chan_resources(struct dma_chan *chan)
  1637. {
  1638. int err;
  1639. unsigned long flags;
  1640. struct d40_chan *d40c =
  1641. container_of(chan, struct d40_chan, chan);
  1642. bool is_free_phy;
  1643. spin_lock_irqsave(&d40c->lock, flags);
  1644. d40c->completed = chan->cookie = 1;
  1645. /* If no dma configuration is set use default configuration (memcpy) */
  1646. if (!d40c->configured) {
  1647. err = d40_config_memcpy(d40c);
  1648. if (err) {
  1649. chan_err(d40c, "Failed to configure memcpy channel\n");
  1650. goto fail;
  1651. }
  1652. }
  1653. is_free_phy = (d40c->phy_chan == NULL);
  1654. err = d40_allocate_channel(d40c);
  1655. if (err) {
  1656. chan_err(d40c, "Failed to allocate channel\n");
  1657. goto fail;
  1658. }
  1659. /* Fill in basic CFG register values */
  1660. d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
  1661. &d40c->dst_def_cfg, chan_is_logical(d40c));
  1662. d40_set_prio_realtime(d40c);
  1663. if (chan_is_logical(d40c)) {
  1664. d40_log_cfg(&d40c->dma_cfg,
  1665. &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  1666. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
  1667. d40c->lcpa = d40c->base->lcpa_base +
  1668. d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
  1669. else
  1670. d40c->lcpa = d40c->base->lcpa_base +
  1671. d40c->dma_cfg.dst_dev_type *
  1672. D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
  1673. }
  1674. /*
  1675. * Only write channel configuration to the DMA if the physical
  1676. * resource is free. In case of multiple logical channels
  1677. * on the same physical resource, only the first write is necessary.
  1678. */
  1679. if (is_free_phy)
  1680. d40_config_write(d40c);
  1681. fail:
  1682. spin_unlock_irqrestore(&d40c->lock, flags);
  1683. return err;
  1684. }
  1685. static void d40_free_chan_resources(struct dma_chan *chan)
  1686. {
  1687. struct d40_chan *d40c =
  1688. container_of(chan, struct d40_chan, chan);
  1689. int err;
  1690. unsigned long flags;
  1691. if (d40c->phy_chan == NULL) {
  1692. chan_err(d40c, "Cannot free unallocated channel\n");
  1693. return;
  1694. }
  1695. spin_lock_irqsave(&d40c->lock, flags);
  1696. err = d40_free_dma(d40c);
  1697. if (err)
  1698. chan_err(d40c, "Failed to free channel\n");
  1699. spin_unlock_irqrestore(&d40c->lock, flags);
  1700. }
  1701. static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
  1702. dma_addr_t dst,
  1703. dma_addr_t src,
  1704. size_t size,
  1705. unsigned long dma_flags)
  1706. {
  1707. struct scatterlist dst_sg;
  1708. struct scatterlist src_sg;
  1709. sg_init_table(&dst_sg, 1);
  1710. sg_init_table(&src_sg, 1);
  1711. sg_dma_address(&dst_sg) = dst;
  1712. sg_dma_address(&src_sg) = src;
  1713. sg_dma_len(&dst_sg) = size;
  1714. sg_dma_len(&src_sg) = size;
  1715. return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
  1716. }
  1717. static struct dma_async_tx_descriptor *
  1718. d40_prep_memcpy_sg(struct dma_chan *chan,
  1719. struct scatterlist *dst_sg, unsigned int dst_nents,
  1720. struct scatterlist *src_sg, unsigned int src_nents,
  1721. unsigned long dma_flags)
  1722. {
  1723. if (dst_nents != src_nents)
  1724. return NULL;
  1725. return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
  1726. }
  1727. static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
  1728. struct scatterlist *sgl,
  1729. unsigned int sg_len,
  1730. enum dma_data_direction direction,
  1731. unsigned long dma_flags)
  1732. {
  1733. if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
  1734. return NULL;
  1735. return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
  1736. }
  1737. static struct dma_async_tx_descriptor *
  1738. dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
  1739. size_t buf_len, size_t period_len,
  1740. enum dma_data_direction direction)
  1741. {
  1742. unsigned int periods = buf_len / period_len;
  1743. struct dma_async_tx_descriptor *txd;
  1744. struct scatterlist *sg;
  1745. int i;
  1746. sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
  1747. for (i = 0; i < periods; i++) {
  1748. sg_dma_address(&sg[i]) = dma_addr;
  1749. sg_dma_len(&sg[i]) = period_len;
  1750. dma_addr += period_len;
  1751. }
  1752. sg[periods].offset = 0;
  1753. sg[periods].length = 0;
  1754. sg[periods].page_link =
  1755. ((unsigned long)sg | 0x01) & ~0x02;
  1756. txd = d40_prep_sg(chan, sg, sg, periods, direction,
  1757. DMA_PREP_INTERRUPT);
  1758. kfree(sg);
  1759. return txd;
  1760. }
  1761. static enum dma_status d40_tx_status(struct dma_chan *chan,
  1762. dma_cookie_t cookie,
  1763. struct dma_tx_state *txstate)
  1764. {
  1765. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1766. dma_cookie_t last_used;
  1767. dma_cookie_t last_complete;
  1768. int ret;
  1769. if (d40c->phy_chan == NULL) {
  1770. chan_err(d40c, "Cannot read status of unallocated channel\n");
  1771. return -EINVAL;
  1772. }
  1773. last_complete = d40c->completed;
  1774. last_used = chan->cookie;
  1775. if (d40_is_paused(d40c))
  1776. ret = DMA_PAUSED;
  1777. else
  1778. ret = dma_async_is_complete(cookie, last_complete, last_used);
  1779. dma_set_tx_state(txstate, last_complete, last_used,
  1780. stedma40_residue(chan));
  1781. return ret;
  1782. }
  1783. static void d40_issue_pending(struct dma_chan *chan)
  1784. {
  1785. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1786. unsigned long flags;
  1787. if (d40c->phy_chan == NULL) {
  1788. chan_err(d40c, "Channel is not allocated!\n");
  1789. return;
  1790. }
  1791. spin_lock_irqsave(&d40c->lock, flags);
  1792. list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
  1793. /* Busy means that queued jobs are already being processed */
  1794. if (!d40c->busy)
  1795. (void) d40_queue_start(d40c);
  1796. spin_unlock_irqrestore(&d40c->lock, flags);
  1797. }
  1798. static int
  1799. dma40_config_to_halfchannel(struct d40_chan *d40c,
  1800. struct stedma40_half_channel_info *info,
  1801. enum dma_slave_buswidth width,
  1802. u32 maxburst)
  1803. {
  1804. enum stedma40_periph_data_width addr_width;
  1805. int psize;
  1806. switch (width) {
  1807. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  1808. addr_width = STEDMA40_BYTE_WIDTH;
  1809. break;
  1810. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  1811. addr_width = STEDMA40_HALFWORD_WIDTH;
  1812. break;
  1813. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  1814. addr_width = STEDMA40_WORD_WIDTH;
  1815. break;
  1816. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  1817. addr_width = STEDMA40_DOUBLEWORD_WIDTH;
  1818. break;
  1819. default:
  1820. dev_err(d40c->base->dev,
  1821. "illegal peripheral address width "
  1822. "requested (%d)\n",
  1823. width);
  1824. return -EINVAL;
  1825. }
  1826. if (chan_is_logical(d40c)) {
  1827. if (maxburst >= 16)
  1828. psize = STEDMA40_PSIZE_LOG_16;
  1829. else if (maxburst >= 8)
  1830. psize = STEDMA40_PSIZE_LOG_8;
  1831. else if (maxburst >= 4)
  1832. psize = STEDMA40_PSIZE_LOG_4;
  1833. else
  1834. psize = STEDMA40_PSIZE_LOG_1;
  1835. } else {
  1836. if (maxburst >= 16)
  1837. psize = STEDMA40_PSIZE_PHY_16;
  1838. else if (maxburst >= 8)
  1839. psize = STEDMA40_PSIZE_PHY_8;
  1840. else if (maxburst >= 4)
  1841. psize = STEDMA40_PSIZE_PHY_4;
  1842. else
  1843. psize = STEDMA40_PSIZE_PHY_1;
  1844. }
  1845. info->data_width = addr_width;
  1846. info->psize = psize;
  1847. info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
  1848. return 0;
  1849. }
  1850. /* Runtime reconfiguration extension */
  1851. static int d40_set_runtime_config(struct dma_chan *chan,
  1852. struct dma_slave_config *config)
  1853. {
  1854. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1855. struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
  1856. enum dma_slave_buswidth src_addr_width, dst_addr_width;
  1857. dma_addr_t config_addr;
  1858. u32 src_maxburst, dst_maxburst;
  1859. int ret;
  1860. src_addr_width = config->src_addr_width;
  1861. src_maxburst = config->src_maxburst;
  1862. dst_addr_width = config->dst_addr_width;
  1863. dst_maxburst = config->dst_maxburst;
  1864. if (config->direction == DMA_FROM_DEVICE) {
  1865. dma_addr_t dev_addr_rx =
  1866. d40c->base->plat_data->dev_rx[cfg->src_dev_type];
  1867. config_addr = config->src_addr;
  1868. if (dev_addr_rx)
  1869. dev_dbg(d40c->base->dev,
  1870. "channel has a pre-wired RX address %08x "
  1871. "overriding with %08x\n",
  1872. dev_addr_rx, config_addr);
  1873. if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
  1874. dev_dbg(d40c->base->dev,
  1875. "channel was not configured for peripheral "
  1876. "to memory transfer (%d) overriding\n",
  1877. cfg->dir);
  1878. cfg->dir = STEDMA40_PERIPH_TO_MEM;
  1879. /* Configure the memory side */
  1880. if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  1881. dst_addr_width = src_addr_width;
  1882. if (dst_maxburst == 0)
  1883. dst_maxburst = src_maxburst;
  1884. } else if (config->direction == DMA_TO_DEVICE) {
  1885. dma_addr_t dev_addr_tx =
  1886. d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
  1887. config_addr = config->dst_addr;
  1888. if (dev_addr_tx)
  1889. dev_dbg(d40c->base->dev,
  1890. "channel has a pre-wired TX address %08x "
  1891. "overriding with %08x\n",
  1892. dev_addr_tx, config_addr);
  1893. if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
  1894. dev_dbg(d40c->base->dev,
  1895. "channel was not configured for memory "
  1896. "to peripheral transfer (%d) overriding\n",
  1897. cfg->dir);
  1898. cfg->dir = STEDMA40_MEM_TO_PERIPH;
  1899. /* Configure the memory side */
  1900. if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  1901. src_addr_width = dst_addr_width;
  1902. if (src_maxburst == 0)
  1903. src_maxburst = dst_maxburst;
  1904. } else {
  1905. dev_err(d40c->base->dev,
  1906. "unrecognized channel direction %d\n",
  1907. config->direction);
  1908. return -EINVAL;
  1909. }
  1910. if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
  1911. dev_err(d40c->base->dev,
  1912. "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
  1913. src_maxburst,
  1914. src_addr_width,
  1915. dst_maxburst,
  1916. dst_addr_width);
  1917. return -EINVAL;
  1918. }
  1919. ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
  1920. src_addr_width,
  1921. src_maxburst);
  1922. if (ret)
  1923. return ret;
  1924. ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
  1925. dst_addr_width,
  1926. dst_maxburst);
  1927. if (ret)
  1928. return ret;
  1929. /* Fill in register values */
  1930. if (chan_is_logical(d40c))
  1931. d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  1932. else
  1933. d40_phy_cfg(cfg, &d40c->src_def_cfg,
  1934. &d40c->dst_def_cfg, false);
  1935. /* These settings will take precedence later */
  1936. d40c->runtime_addr = config_addr;
  1937. d40c->runtime_direction = config->direction;
  1938. dev_dbg(d40c->base->dev,
  1939. "configured channel %s for %s, data width %d/%d, "
  1940. "maxburst %d/%d elements, LE, no flow control\n",
  1941. dma_chan_name(chan),
  1942. (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
  1943. src_addr_width, dst_addr_width,
  1944. src_maxburst, dst_maxburst);
  1945. return 0;
  1946. }
  1947. static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  1948. unsigned long arg)
  1949. {
  1950. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  1951. if (d40c->phy_chan == NULL) {
  1952. chan_err(d40c, "Channel is not allocated!\n");
  1953. return -EINVAL;
  1954. }
  1955. switch (cmd) {
  1956. case DMA_TERMINATE_ALL:
  1957. return d40_terminate_all(d40c);
  1958. case DMA_PAUSE:
  1959. return d40_pause(d40c);
  1960. case DMA_RESUME:
  1961. return d40_resume(d40c);
  1962. case DMA_SLAVE_CONFIG:
  1963. return d40_set_runtime_config(chan,
  1964. (struct dma_slave_config *) arg);
  1965. default:
  1966. break;
  1967. }
  1968. /* Other commands are unimplemented */
  1969. return -ENXIO;
  1970. }
  1971. /* Initialization functions */
  1972. static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
  1973. struct d40_chan *chans, int offset,
  1974. int num_chans)
  1975. {
  1976. int i = 0;
  1977. struct d40_chan *d40c;
  1978. INIT_LIST_HEAD(&dma->channels);
  1979. for (i = offset; i < offset + num_chans; i++) {
  1980. d40c = &chans[i];
  1981. d40c->base = base;
  1982. d40c->chan.device = dma;
  1983. spin_lock_init(&d40c->lock);
  1984. d40c->log_num = D40_PHY_CHAN;
  1985. INIT_LIST_HEAD(&d40c->active);
  1986. INIT_LIST_HEAD(&d40c->queue);
  1987. INIT_LIST_HEAD(&d40c->pending_queue);
  1988. INIT_LIST_HEAD(&d40c->client);
  1989. tasklet_init(&d40c->tasklet, dma_tasklet,
  1990. (unsigned long) d40c);
  1991. list_add_tail(&d40c->chan.device_node,
  1992. &dma->channels);
  1993. }
  1994. }
  1995. static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
  1996. {
  1997. if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
  1998. dev->device_prep_slave_sg = d40_prep_slave_sg;
  1999. if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
  2000. dev->device_prep_dma_memcpy = d40_prep_memcpy;
  2001. /*
  2002. * This controller can only access address at even
  2003. * 32bit boundaries, i.e. 2^2
  2004. */
  2005. dev->copy_align = 2;
  2006. }
  2007. if (dma_has_cap(DMA_SG, dev->cap_mask))
  2008. dev->device_prep_dma_sg = d40_prep_memcpy_sg;
  2009. if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
  2010. dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
  2011. dev->device_alloc_chan_resources = d40_alloc_chan_resources;
  2012. dev->device_free_chan_resources = d40_free_chan_resources;
  2013. dev->device_issue_pending = d40_issue_pending;
  2014. dev->device_tx_status = d40_tx_status;
  2015. dev->device_control = d40_control;
  2016. dev->dev = base->dev;
  2017. }
  2018. static int __init d40_dmaengine_init(struct d40_base *base,
  2019. int num_reserved_chans)
  2020. {
  2021. int err ;
  2022. d40_chan_init(base, &base->dma_slave, base->log_chans,
  2023. 0, base->num_log_chans);
  2024. dma_cap_zero(base->dma_slave.cap_mask);
  2025. dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
  2026. dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
  2027. d40_ops_init(base, &base->dma_slave);
  2028. err = dma_async_device_register(&base->dma_slave);
  2029. if (err) {
  2030. d40_err(base->dev, "Failed to register slave channels\n");
  2031. goto failure1;
  2032. }
  2033. d40_chan_init(base, &base->dma_memcpy, base->log_chans,
  2034. base->num_log_chans, base->plat_data->memcpy_len);
  2035. dma_cap_zero(base->dma_memcpy.cap_mask);
  2036. dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
  2037. dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
  2038. d40_ops_init(base, &base->dma_memcpy);
  2039. err = dma_async_device_register(&base->dma_memcpy);
  2040. if (err) {
  2041. d40_err(base->dev,
  2042. "Failed to regsiter memcpy only channels\n");
  2043. goto failure2;
  2044. }
  2045. d40_chan_init(base, &base->dma_both, base->phy_chans,
  2046. 0, num_reserved_chans);
  2047. dma_cap_zero(base->dma_both.cap_mask);
  2048. dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
  2049. dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
  2050. dma_cap_set(DMA_SG, base->dma_both.cap_mask);
  2051. dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
  2052. d40_ops_init(base, &base->dma_both);
  2053. err = dma_async_device_register(&base->dma_both);
  2054. if (err) {
  2055. d40_err(base->dev,
  2056. "Failed to register logical and physical capable channels\n");
  2057. goto failure3;
  2058. }
  2059. return 0;
  2060. failure3:
  2061. dma_async_device_unregister(&base->dma_memcpy);
  2062. failure2:
  2063. dma_async_device_unregister(&base->dma_slave);
  2064. failure1:
  2065. return err;
  2066. }
  2067. /* Initialization functions. */
  2068. static int __init d40_phy_res_init(struct d40_base *base)
  2069. {
  2070. int i;
  2071. int num_phy_chans_avail = 0;
  2072. u32 val[2];
  2073. int odd_even_bit = -2;
  2074. val[0] = readl(base->virtbase + D40_DREG_PRSME);
  2075. val[1] = readl(base->virtbase + D40_DREG_PRSMO);
  2076. for (i = 0; i < base->num_phy_chans; i++) {
  2077. base->phy_res[i].num = i;
  2078. odd_even_bit += 2 * ((i % 2) == 0);
  2079. if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
  2080. /* Mark security only channels as occupied */
  2081. base->phy_res[i].allocated_src = D40_ALLOC_PHY;
  2082. base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
  2083. } else {
  2084. base->phy_res[i].allocated_src = D40_ALLOC_FREE;
  2085. base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
  2086. num_phy_chans_avail++;
  2087. }
  2088. spin_lock_init(&base->phy_res[i].lock);
  2089. }
  2090. /* Mark disabled channels as occupied */
  2091. for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
  2092. int chan = base->plat_data->disabled_channels[i];
  2093. base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
  2094. base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
  2095. num_phy_chans_avail--;
  2096. }
  2097. dev_info(base->dev, "%d of %d physical DMA channels available\n",
  2098. num_phy_chans_avail, base->num_phy_chans);
  2099. /* Verify settings extended vs standard */
  2100. val[0] = readl(base->virtbase + D40_DREG_PRTYP);
  2101. for (i = 0; i < base->num_phy_chans; i++) {
  2102. if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
  2103. (val[0] & 0x3) != 1)
  2104. dev_info(base->dev,
  2105. "[%s] INFO: channel %d is misconfigured (%d)\n",
  2106. __func__, i, val[0] & 0x3);
  2107. val[0] = val[0] >> 2;
  2108. }
  2109. return num_phy_chans_avail;
  2110. }
  2111. static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
  2112. {
  2113. struct stedma40_platform_data *plat_data;
  2114. struct clk *clk = NULL;
  2115. void __iomem *virtbase = NULL;
  2116. struct resource *res = NULL;
  2117. struct d40_base *base = NULL;
  2118. int num_log_chans = 0;
  2119. int num_phy_chans;
  2120. int i;
  2121. u32 pid;
  2122. u32 cid;
  2123. u8 rev;
  2124. clk = clk_get(&pdev->dev, NULL);
  2125. if (IS_ERR(clk)) {
  2126. d40_err(&pdev->dev, "No matching clock found\n");
  2127. goto failure;
  2128. }
  2129. clk_enable(clk);
  2130. /* Get IO for DMAC base address */
  2131. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
  2132. if (!res)
  2133. goto failure;
  2134. if (request_mem_region(res->start, resource_size(res),
  2135. D40_NAME " I/O base") == NULL)
  2136. goto failure;
  2137. virtbase = ioremap(res->start, resource_size(res));
  2138. if (!virtbase)
  2139. goto failure;
  2140. /* This is just a regular AMBA PrimeCell ID actually */
  2141. for (pid = 0, i = 0; i < 4; i++)
  2142. pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
  2143. & 255) << (i * 8);
  2144. for (cid = 0, i = 0; i < 4; i++)
  2145. cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
  2146. & 255) << (i * 8);
  2147. if (cid != AMBA_CID) {
  2148. d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
  2149. goto failure;
  2150. }
  2151. if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
  2152. d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
  2153. AMBA_MANF_BITS(pid),
  2154. AMBA_VENDOR_ST);
  2155. goto failure;
  2156. }
  2157. /*
  2158. * HW revision:
  2159. * DB8500ed has revision 0
  2160. * ? has revision 1
  2161. * DB8500v1 has revision 2
  2162. * DB8500v2 has revision 3
  2163. */
  2164. rev = AMBA_REV_BITS(pid);
  2165. /* The number of physical channels on this HW */
  2166. num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
  2167. dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
  2168. rev, res->start);
  2169. plat_data = pdev->dev.platform_data;
  2170. /* Count the number of logical channels in use */
  2171. for (i = 0; i < plat_data->dev_len; i++)
  2172. if (plat_data->dev_rx[i] != 0)
  2173. num_log_chans++;
  2174. for (i = 0; i < plat_data->dev_len; i++)
  2175. if (plat_data->dev_tx[i] != 0)
  2176. num_log_chans++;
  2177. base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
  2178. (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
  2179. sizeof(struct d40_chan), GFP_KERNEL);
  2180. if (base == NULL) {
  2181. d40_err(&pdev->dev, "Out of memory\n");
  2182. goto failure;
  2183. }
  2184. base->rev = rev;
  2185. base->clk = clk;
  2186. base->num_phy_chans = num_phy_chans;
  2187. base->num_log_chans = num_log_chans;
  2188. base->phy_start = res->start;
  2189. base->phy_size = resource_size(res);
  2190. base->virtbase = virtbase;
  2191. base->plat_data = plat_data;
  2192. base->dev = &pdev->dev;
  2193. base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
  2194. base->log_chans = &base->phy_chans[num_phy_chans];
  2195. base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
  2196. GFP_KERNEL);
  2197. if (!base->phy_res)
  2198. goto failure;
  2199. base->lookup_phy_chans = kzalloc(num_phy_chans *
  2200. sizeof(struct d40_chan *),
  2201. GFP_KERNEL);
  2202. if (!base->lookup_phy_chans)
  2203. goto failure;
  2204. if (num_log_chans + plat_data->memcpy_len) {
  2205. /*
  2206. * The max number of logical channels are event lines for all
  2207. * src devices and dst devices
  2208. */
  2209. base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
  2210. sizeof(struct d40_chan *),
  2211. GFP_KERNEL);
  2212. if (!base->lookup_log_chans)
  2213. goto failure;
  2214. }
  2215. base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
  2216. sizeof(struct d40_desc *) *
  2217. D40_LCLA_LINK_PER_EVENT_GRP,
  2218. GFP_KERNEL);
  2219. if (!base->lcla_pool.alloc_map)
  2220. goto failure;
  2221. base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
  2222. 0, SLAB_HWCACHE_ALIGN,
  2223. NULL);
  2224. if (base->desc_slab == NULL)
  2225. goto failure;
  2226. return base;
  2227. failure:
  2228. if (!IS_ERR(clk)) {
  2229. clk_disable(clk);
  2230. clk_put(clk);
  2231. }
  2232. if (virtbase)
  2233. iounmap(virtbase);
  2234. if (res)
  2235. release_mem_region(res->start,
  2236. resource_size(res));
  2237. if (virtbase)
  2238. iounmap(virtbase);
  2239. if (base) {
  2240. kfree(base->lcla_pool.alloc_map);
  2241. kfree(base->lookup_log_chans);
  2242. kfree(base->lookup_phy_chans);
  2243. kfree(base->phy_res);
  2244. kfree(base);
  2245. }
  2246. return NULL;
  2247. }
  2248. static void __init d40_hw_init(struct d40_base *base)
  2249. {
  2250. static const struct d40_reg_val dma_init_reg[] = {
  2251. /* Clock every part of the DMA block from start */
  2252. { .reg = D40_DREG_GCC, .val = 0x0000ff01},
  2253. /* Interrupts on all logical channels */
  2254. { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
  2255. { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
  2256. { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
  2257. { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
  2258. { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
  2259. { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
  2260. { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
  2261. { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
  2262. { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
  2263. { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
  2264. { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
  2265. { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
  2266. };
  2267. int i;
  2268. u32 prmseo[2] = {0, 0};
  2269. u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
  2270. u32 pcmis = 0;
  2271. u32 pcicr = 0;
  2272. for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
  2273. writel(dma_init_reg[i].val,
  2274. base->virtbase + dma_init_reg[i].reg);
  2275. /* Configure all our dma channels to default settings */
  2276. for (i = 0; i < base->num_phy_chans; i++) {
  2277. activeo[i % 2] = activeo[i % 2] << 2;
  2278. if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
  2279. == D40_ALLOC_PHY) {
  2280. activeo[i % 2] |= 3;
  2281. continue;
  2282. }
  2283. /* Enable interrupt # */
  2284. pcmis = (pcmis << 1) | 1;
  2285. /* Clear interrupt # */
  2286. pcicr = (pcicr << 1) | 1;
  2287. /* Set channel to physical mode */
  2288. prmseo[i % 2] = prmseo[i % 2] << 2;
  2289. prmseo[i % 2] |= 1;
  2290. }
  2291. writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
  2292. writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
  2293. writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
  2294. writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
  2295. /* Write which interrupt to enable */
  2296. writel(pcmis, base->virtbase + D40_DREG_PCMIS);
  2297. /* Write which interrupt to clear */
  2298. writel(pcicr, base->virtbase + D40_DREG_PCICR);
  2299. }
  2300. static int __init d40_lcla_allocate(struct d40_base *base)
  2301. {
  2302. struct d40_lcla_pool *pool = &base->lcla_pool;
  2303. unsigned long *page_list;
  2304. int i, j;
  2305. int ret = 0;
  2306. /*
  2307. * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
  2308. * To full fill this hardware requirement without wasting 256 kb
  2309. * we allocate pages until we get an aligned one.
  2310. */
  2311. page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
  2312. GFP_KERNEL);
  2313. if (!page_list) {
  2314. ret = -ENOMEM;
  2315. goto failure;
  2316. }
  2317. /* Calculating how many pages that are required */
  2318. base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
  2319. for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
  2320. page_list[i] = __get_free_pages(GFP_KERNEL,
  2321. base->lcla_pool.pages);
  2322. if (!page_list[i]) {
  2323. d40_err(base->dev, "Failed to allocate %d pages.\n",
  2324. base->lcla_pool.pages);
  2325. for (j = 0; j < i; j++)
  2326. free_pages(page_list[j], base->lcla_pool.pages);
  2327. goto failure;
  2328. }
  2329. if ((virt_to_phys((void *)page_list[i]) &
  2330. (LCLA_ALIGNMENT - 1)) == 0)
  2331. break;
  2332. }
  2333. for (j = 0; j < i; j++)
  2334. free_pages(page_list[j], base->lcla_pool.pages);
  2335. if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
  2336. base->lcla_pool.base = (void *)page_list[i];
  2337. } else {
  2338. /*
  2339. * After many attempts and no succees with finding the correct
  2340. * alignment, try with allocating a big buffer.
  2341. */
  2342. dev_warn(base->dev,
  2343. "[%s] Failed to get %d pages @ 18 bit align.\n",
  2344. __func__, base->lcla_pool.pages);
  2345. base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
  2346. base->num_phy_chans +
  2347. LCLA_ALIGNMENT,
  2348. GFP_KERNEL);
  2349. if (!base->lcla_pool.base_unaligned) {
  2350. ret = -ENOMEM;
  2351. goto failure;
  2352. }
  2353. base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
  2354. LCLA_ALIGNMENT);
  2355. }
  2356. pool->dma_addr = dma_map_single(base->dev, pool->base,
  2357. SZ_1K * base->num_phy_chans,
  2358. DMA_TO_DEVICE);
  2359. if (dma_mapping_error(base->dev, pool->dma_addr)) {
  2360. pool->dma_addr = 0;
  2361. ret = -ENOMEM;
  2362. goto failure;
  2363. }
  2364. writel(virt_to_phys(base->lcla_pool.base),
  2365. base->virtbase + D40_DREG_LCLA);
  2366. failure:
  2367. kfree(page_list);
  2368. return ret;
  2369. }
  2370. static int __init d40_probe(struct platform_device *pdev)
  2371. {
  2372. int err;
  2373. int ret = -ENOENT;
  2374. struct d40_base *base;
  2375. struct resource *res = NULL;
  2376. int num_reserved_chans;
  2377. u32 val;
  2378. base = d40_hw_detect_init(pdev);
  2379. if (!base)
  2380. goto failure;
  2381. num_reserved_chans = d40_phy_res_init(base);
  2382. platform_set_drvdata(pdev, base);
  2383. spin_lock_init(&base->interrupt_lock);
  2384. spin_lock_init(&base->execmd_lock);
  2385. /* Get IO for logical channel parameter address */
  2386. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
  2387. if (!res) {
  2388. ret = -ENOENT;
  2389. d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
  2390. goto failure;
  2391. }
  2392. base->lcpa_size = resource_size(res);
  2393. base->phy_lcpa = res->start;
  2394. if (request_mem_region(res->start, resource_size(res),
  2395. D40_NAME " I/O lcpa") == NULL) {
  2396. ret = -EBUSY;
  2397. d40_err(&pdev->dev,
  2398. "Failed to request LCPA region 0x%x-0x%x\n",
  2399. res->start, res->end);
  2400. goto failure;
  2401. }
  2402. /* We make use of ESRAM memory for this. */
  2403. val = readl(base->virtbase + D40_DREG_LCPA);
  2404. if (res->start != val && val != 0) {
  2405. dev_warn(&pdev->dev,
  2406. "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
  2407. __func__, val, res->start);
  2408. } else
  2409. writel(res->start, base->virtbase + D40_DREG_LCPA);
  2410. base->lcpa_base = ioremap(res->start, resource_size(res));
  2411. if (!base->lcpa_base) {
  2412. ret = -ENOMEM;
  2413. d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
  2414. goto failure;
  2415. }
  2416. ret = d40_lcla_allocate(base);
  2417. if (ret) {
  2418. d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
  2419. goto failure;
  2420. }
  2421. spin_lock_init(&base->lcla_pool.lock);
  2422. base->irq = platform_get_irq(pdev, 0);
  2423. ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
  2424. if (ret) {
  2425. d40_err(&pdev->dev, "No IRQ defined\n");
  2426. goto failure;
  2427. }
  2428. err = d40_dmaengine_init(base, num_reserved_chans);
  2429. if (err)
  2430. goto failure;
  2431. d40_hw_init(base);
  2432. dev_info(base->dev, "initialized\n");
  2433. return 0;
  2434. failure:
  2435. if (base) {
  2436. if (base->desc_slab)
  2437. kmem_cache_destroy(base->desc_slab);
  2438. if (base->virtbase)
  2439. iounmap(base->virtbase);
  2440. if (base->lcla_pool.dma_addr)
  2441. dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
  2442. SZ_1K * base->num_phy_chans,
  2443. DMA_TO_DEVICE);
  2444. if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
  2445. free_pages((unsigned long)base->lcla_pool.base,
  2446. base->lcla_pool.pages);
  2447. kfree(base->lcla_pool.base_unaligned);
  2448. if (base->phy_lcpa)
  2449. release_mem_region(base->phy_lcpa,
  2450. base->lcpa_size);
  2451. if (base->phy_start)
  2452. release_mem_region(base->phy_start,
  2453. base->phy_size);
  2454. if (base->clk) {
  2455. clk_disable(base->clk);
  2456. clk_put(base->clk);
  2457. }
  2458. kfree(base->lcla_pool.alloc_map);
  2459. kfree(base->lookup_log_chans);
  2460. kfree(base->lookup_phy_chans);
  2461. kfree(base->phy_res);
  2462. kfree(base);
  2463. }
  2464. d40_err(&pdev->dev, "probe failed\n");
  2465. return ret;
  2466. }
  2467. static struct platform_driver d40_driver = {
  2468. .driver = {
  2469. .owner = THIS_MODULE,
  2470. .name = D40_NAME,
  2471. },
  2472. };
  2473. static int __init stedma40_init(void)
  2474. {
  2475. return platform_driver_probe(&d40_driver, d40_probe);
  2476. }
  2477. subsys_initcall(stedma40_init);