ste_dma40.c 96 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772
  1. /*
  2. * Copyright (C) Ericsson AB 2007-2008
  3. * Copyright (C) ST-Ericsson SA 2008-2010
  4. * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  5. * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  6. * License terms: GNU General Public License (GPL) version 2
  7. */
  8. #include <linux/dma-mapping.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/export.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/clk.h>
  15. #include <linux/delay.h>
  16. #include <linux/pm.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/err.h>
  19. #include <linux/of.h>
  20. #include <linux/of_dma.h>
  21. #include <linux/amba/bus.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/platform_data/dma-ste-dma40.h>
  24. #include "dmaengine.h"
  25. #include "ste_dma40_ll.h"
  26. #define D40_NAME "dma40"
  27. #define D40_PHY_CHAN -1
  28. /* For masking out/in 2 bit channel positions */
  29. #define D40_CHAN_POS(chan) (2 * (chan / 2))
  30. #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  31. /* Maximum iterations taken before giving up suspending a channel */
  32. #define D40_SUSPEND_MAX_IT 500
  33. /* Milliseconds */
  34. #define DMA40_AUTOSUSPEND_DELAY 100
  35. /* Hardware requirement on LCLA alignment */
  36. #define LCLA_ALIGNMENT 0x40000
  37. /* Max number of links per event group */
  38. #define D40_LCLA_LINK_PER_EVENT_GRP 128
  39. #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  40. /* Max number of logical channels per physical channel */
  41. #define D40_MAX_LOG_CHAN_PER_PHY 32
  42. /* Attempts before giving up to trying to get pages that are aligned */
  43. #define MAX_LCLA_ALLOC_ATTEMPTS 256
  44. /* Bit markings for allocation map */
  45. #define D40_ALLOC_FREE (1 << 31)
  46. #define D40_ALLOC_PHY (1 << 30)
  47. #define D40_ALLOC_LOG_FREE 0
  48. /* Reserved event lines for memcpy only. */
  49. #define DB8500_DMA_MEMCPY_EV_0 51
  50. #define DB8500_DMA_MEMCPY_EV_1 56
  51. #define DB8500_DMA_MEMCPY_EV_2 57
  52. #define DB8500_DMA_MEMCPY_EV_3 58
  53. #define DB8500_DMA_MEMCPY_EV_4 59
  54. #define DB8500_DMA_MEMCPY_EV_5 60
  55. static int dma40_memcpy_channels[] = {
  56. DB8500_DMA_MEMCPY_EV_0,
  57. DB8500_DMA_MEMCPY_EV_1,
  58. DB8500_DMA_MEMCPY_EV_2,
  59. DB8500_DMA_MEMCPY_EV_3,
  60. DB8500_DMA_MEMCPY_EV_4,
  61. DB8500_DMA_MEMCPY_EV_5,
  62. };
  63. /* Default configuration for physcial memcpy */
  64. struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
  65. .mode = STEDMA40_MODE_PHYSICAL,
  66. .dir = STEDMA40_MEM_TO_MEM,
  67. .src_info.data_width = STEDMA40_BYTE_WIDTH,
  68. .src_info.psize = STEDMA40_PSIZE_PHY_1,
  69. .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  70. .dst_info.data_width = STEDMA40_BYTE_WIDTH,
  71. .dst_info.psize = STEDMA40_PSIZE_PHY_1,
  72. .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  73. };
  74. /* Default configuration for logical memcpy */
  75. struct stedma40_chan_cfg dma40_memcpy_conf_log = {
  76. .mode = STEDMA40_MODE_LOGICAL,
  77. .dir = STEDMA40_MEM_TO_MEM,
  78. .src_info.data_width = STEDMA40_BYTE_WIDTH,
  79. .src_info.psize = STEDMA40_PSIZE_LOG_1,
  80. .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  81. .dst_info.data_width = STEDMA40_BYTE_WIDTH,
  82. .dst_info.psize = STEDMA40_PSIZE_LOG_1,
  83. .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  84. };
  85. /**
  86. * enum 40_command - The different commands and/or statuses.
  87. *
  88. * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
  89. * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
  90. * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
  91. * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
  92. */
  93. enum d40_command {
  94. D40_DMA_STOP = 0,
  95. D40_DMA_RUN = 1,
  96. D40_DMA_SUSPEND_REQ = 2,
  97. D40_DMA_SUSPENDED = 3
  98. };
  99. /*
  100. * enum d40_events - The different Event Enables for the event lines.
  101. *
  102. * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
  103. * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
  104. * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
  105. * @D40_ROUND_EVENTLINE: Status check for event line.
  106. */
  107. enum d40_events {
  108. D40_DEACTIVATE_EVENTLINE = 0,
  109. D40_ACTIVATE_EVENTLINE = 1,
  110. D40_SUSPEND_REQ_EVENTLINE = 2,
  111. D40_ROUND_EVENTLINE = 3
  112. };
  113. /*
  114. * These are the registers that has to be saved and later restored
  115. * when the DMA hw is powered off.
  116. * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
  117. */
  118. static u32 d40_backup_regs[] = {
  119. D40_DREG_LCPA,
  120. D40_DREG_LCLA,
  121. D40_DREG_PRMSE,
  122. D40_DREG_PRMSO,
  123. D40_DREG_PRMOE,
  124. D40_DREG_PRMOO,
  125. };
  126. #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
  127. /*
  128. * since 9540 and 8540 has the same HW revision
  129. * use v4a for 9540 or ealier
  130. * use v4b for 8540 or later
  131. * HW revision:
  132. * DB8500ed has revision 0
  133. * DB8500v1 has revision 2
  134. * DB8500v2 has revision 3
  135. * AP9540v1 has revision 4
  136. * DB8540v1 has revision 4
  137. * TODO: Check if all these registers have to be saved/restored on dma40 v4a
  138. */
  139. static u32 d40_backup_regs_v4a[] = {
  140. D40_DREG_PSEG1,
  141. D40_DREG_PSEG2,
  142. D40_DREG_PSEG3,
  143. D40_DREG_PSEG4,
  144. D40_DREG_PCEG1,
  145. D40_DREG_PCEG2,
  146. D40_DREG_PCEG3,
  147. D40_DREG_PCEG4,
  148. D40_DREG_RSEG1,
  149. D40_DREG_RSEG2,
  150. D40_DREG_RSEG3,
  151. D40_DREG_RSEG4,
  152. D40_DREG_RCEG1,
  153. D40_DREG_RCEG2,
  154. D40_DREG_RCEG3,
  155. D40_DREG_RCEG4,
  156. };
  157. #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
  158. static u32 d40_backup_regs_v4b[] = {
  159. D40_DREG_CPSEG1,
  160. D40_DREG_CPSEG2,
  161. D40_DREG_CPSEG3,
  162. D40_DREG_CPSEG4,
  163. D40_DREG_CPSEG5,
  164. D40_DREG_CPCEG1,
  165. D40_DREG_CPCEG2,
  166. D40_DREG_CPCEG3,
  167. D40_DREG_CPCEG4,
  168. D40_DREG_CPCEG5,
  169. D40_DREG_CRSEG1,
  170. D40_DREG_CRSEG2,
  171. D40_DREG_CRSEG3,
  172. D40_DREG_CRSEG4,
  173. D40_DREG_CRSEG5,
  174. D40_DREG_CRCEG1,
  175. D40_DREG_CRCEG2,
  176. D40_DREG_CRCEG3,
  177. D40_DREG_CRCEG4,
  178. D40_DREG_CRCEG5,
  179. };
  180. #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
  181. static u32 d40_backup_regs_chan[] = {
  182. D40_CHAN_REG_SSCFG,
  183. D40_CHAN_REG_SSELT,
  184. D40_CHAN_REG_SSPTR,
  185. D40_CHAN_REG_SSLNK,
  186. D40_CHAN_REG_SDCFG,
  187. D40_CHAN_REG_SDELT,
  188. D40_CHAN_REG_SDPTR,
  189. D40_CHAN_REG_SDLNK,
  190. };
  191. #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
  192. BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
  193. /**
  194. * struct d40_interrupt_lookup - lookup table for interrupt handler
  195. *
  196. * @src: Interrupt mask register.
  197. * @clr: Interrupt clear register.
  198. * @is_error: true if this is an error interrupt.
  199. * @offset: start delta in the lookup_log_chans in d40_base. If equals to
  200. * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
  201. */
  202. struct d40_interrupt_lookup {
  203. u32 src;
  204. u32 clr;
  205. bool is_error;
  206. int offset;
  207. };
  208. static struct d40_interrupt_lookup il_v4a[] = {
  209. {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
  210. {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
  211. {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
  212. {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
  213. {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
  214. {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
  215. {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
  216. {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
  217. {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
  218. {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
  219. };
  220. static struct d40_interrupt_lookup il_v4b[] = {
  221. {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
  222. {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
  223. {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
  224. {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
  225. {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
  226. {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
  227. {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
  228. {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
  229. {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
  230. {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
  231. {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
  232. {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
  233. };
  234. /**
  235. * struct d40_reg_val - simple lookup struct
  236. *
  237. * @reg: The register.
  238. * @val: The value that belongs to the register in reg.
  239. */
  240. struct d40_reg_val {
  241. unsigned int reg;
  242. unsigned int val;
  243. };
  244. static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
  245. /* Clock every part of the DMA block from start */
  246. { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
  247. /* Interrupts on all logical channels */
  248. { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
  249. { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
  250. { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
  251. { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
  252. { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
  253. { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
  254. { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
  255. { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
  256. { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
  257. { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
  258. { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
  259. { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
  260. };
  261. static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
  262. /* Clock every part of the DMA block from start */
  263. { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
  264. /* Interrupts on all logical channels */
  265. { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
  266. { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
  267. { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
  268. { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
  269. { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
  270. { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
  271. { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
  272. { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
  273. { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
  274. { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
  275. { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
  276. { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
  277. { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
  278. { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
  279. { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
  280. };
  281. /**
  282. * struct d40_lli_pool - Structure for keeping LLIs in memory
  283. *
  284. * @base: Pointer to memory area when the pre_alloc_lli's are not large
  285. * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
  286. * pre_alloc_lli is used.
  287. * @dma_addr: DMA address, if mapped
  288. * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
  289. * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
  290. * one buffer to one buffer.
  291. */
  292. struct d40_lli_pool {
  293. void *base;
  294. int size;
  295. dma_addr_t dma_addr;
  296. /* Space for dst and src, plus an extra for padding */
  297. u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
  298. };
  299. /**
  300. * struct d40_desc - A descriptor is one DMA job.
  301. *
  302. * @lli_phy: LLI settings for physical channel. Both src and dst=
  303. * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
  304. * lli_len equals one.
  305. * @lli_log: Same as above but for logical channels.
  306. * @lli_pool: The pool with two entries pre-allocated.
  307. * @lli_len: Number of llis of current descriptor.
  308. * @lli_current: Number of transferred llis.
  309. * @lcla_alloc: Number of LCLA entries allocated.
  310. * @txd: DMA engine struct. Used for among other things for communication
  311. * during a transfer.
  312. * @node: List entry.
  313. * @is_in_client_list: true if the client owns this descriptor.
  314. * @cyclic: true if this is a cyclic job
  315. *
  316. * This descriptor is used for both logical and physical transfers.
  317. */
  318. struct d40_desc {
  319. /* LLI physical */
  320. struct d40_phy_lli_bidir lli_phy;
  321. /* LLI logical */
  322. struct d40_log_lli_bidir lli_log;
  323. struct d40_lli_pool lli_pool;
  324. int lli_len;
  325. int lli_current;
  326. int lcla_alloc;
  327. struct dma_async_tx_descriptor txd;
  328. struct list_head node;
  329. bool is_in_client_list;
  330. bool cyclic;
  331. };
  332. /**
  333. * struct d40_lcla_pool - LCLA pool settings and data.
  334. *
  335. * @base: The virtual address of LCLA. 18 bit aligned.
  336. * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
  337. * This pointer is only there for clean-up on error.
  338. * @pages: The number of pages needed for all physical channels.
  339. * Only used later for clean-up on error
  340. * @lock: Lock to protect the content in this struct.
  341. * @alloc_map: big map over which LCLA entry is own by which job.
  342. */
  343. struct d40_lcla_pool {
  344. void *base;
  345. dma_addr_t dma_addr;
  346. void *base_unaligned;
  347. int pages;
  348. spinlock_t lock;
  349. struct d40_desc **alloc_map;
  350. };
  351. /**
  352. * struct d40_phy_res - struct for handling eventlines mapped to physical
  353. * channels.
  354. *
  355. * @lock: A lock protection this entity.
  356. * @reserved: True if used by secure world or otherwise.
  357. * @num: The physical channel number of this entity.
  358. * @allocated_src: Bit mapped to show which src event line's are mapped to
  359. * this physical channel. Can also be free or physically allocated.
  360. * @allocated_dst: Same as for src but is dst.
  361. * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
  362. * event line number.
  363. * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
  364. */
  365. struct d40_phy_res {
  366. spinlock_t lock;
  367. bool reserved;
  368. int num;
  369. u32 allocated_src;
  370. u32 allocated_dst;
  371. bool use_soft_lli;
  372. };
  373. struct d40_base;
  374. /**
  375. * struct d40_chan - Struct that describes a channel.
  376. *
  377. * @lock: A spinlock to protect this struct.
  378. * @log_num: The logical number, if any of this channel.
  379. * @pending_tx: The number of pending transfers. Used between interrupt handler
  380. * and tasklet.
  381. * @busy: Set to true when transfer is ongoing on this channel.
  382. * @phy_chan: Pointer to physical channel which this instance runs on. If this
  383. * point is NULL, then the channel is not allocated.
  384. * @chan: DMA engine handle.
  385. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  386. * transfer and call client callback.
  387. * @client: Cliented owned descriptor list.
  388. * @pending_queue: Submitted jobs, to be issued by issue_pending()
  389. * @active: Active descriptor.
  390. * @done: Completed jobs
  391. * @queue: Queued jobs.
  392. * @prepare_queue: Prepared jobs.
  393. * @dma_cfg: The client configuration of this dma channel.
  394. * @configured: whether the dma_cfg configuration is valid
  395. * @base: Pointer to the device instance struct.
  396. * @src_def_cfg: Default cfg register setting for src.
  397. * @dst_def_cfg: Default cfg register setting for dst.
  398. * @log_def: Default logical channel settings.
  399. * @lcpa: Pointer to dst and src lcpa settings.
  400. * @runtime_addr: runtime configured address.
  401. * @runtime_direction: runtime configured direction.
  402. *
  403. * This struct can either "be" a logical or a physical channel.
  404. */
  405. struct d40_chan {
  406. spinlock_t lock;
  407. int log_num;
  408. int pending_tx;
  409. bool busy;
  410. struct d40_phy_res *phy_chan;
  411. struct dma_chan chan;
  412. struct tasklet_struct tasklet;
  413. struct list_head client;
  414. struct list_head pending_queue;
  415. struct list_head active;
  416. struct list_head done;
  417. struct list_head queue;
  418. struct list_head prepare_queue;
  419. struct stedma40_chan_cfg dma_cfg;
  420. bool configured;
  421. struct d40_base *base;
  422. /* Default register configurations */
  423. u32 src_def_cfg;
  424. u32 dst_def_cfg;
  425. struct d40_def_lcsp log_def;
  426. struct d40_log_lli_full *lcpa;
  427. /* Runtime reconfiguration */
  428. dma_addr_t runtime_addr;
  429. enum dma_transfer_direction runtime_direction;
  430. };
  431. /**
  432. * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
  433. * controller
  434. *
  435. * @backup: the pointer to the registers address array for backup
  436. * @backup_size: the size of the registers address array for backup
  437. * @realtime_en: the realtime enable register
  438. * @realtime_clear: the realtime clear register
  439. * @high_prio_en: the high priority enable register
  440. * @high_prio_clear: the high priority clear register
  441. * @interrupt_en: the interrupt enable register
  442. * @interrupt_clear: the interrupt clear register
  443. * @il: the pointer to struct d40_interrupt_lookup
  444. * @il_size: the size of d40_interrupt_lookup array
  445. * @init_reg: the pointer to the struct d40_reg_val
  446. * @init_reg_size: the size of d40_reg_val array
  447. */
  448. struct d40_gen_dmac {
  449. u32 *backup;
  450. u32 backup_size;
  451. u32 realtime_en;
  452. u32 realtime_clear;
  453. u32 high_prio_en;
  454. u32 high_prio_clear;
  455. u32 interrupt_en;
  456. u32 interrupt_clear;
  457. struct d40_interrupt_lookup *il;
  458. u32 il_size;
  459. struct d40_reg_val *init_reg;
  460. u32 init_reg_size;
  461. };
  462. /**
  463. * struct d40_base - The big global struct, one for each probe'd instance.
  464. *
  465. * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
  466. * @execmd_lock: Lock for execute command usage since several channels share
  467. * the same physical register.
  468. * @dev: The device structure.
  469. * @virtbase: The virtual base address of the DMA's register.
  470. * @rev: silicon revision detected.
  471. * @clk: Pointer to the DMA clock structure.
  472. * @phy_start: Physical memory start of the DMA registers.
  473. * @phy_size: Size of the DMA register map.
  474. * @irq: The IRQ number.
  475. * @num_phy_chans: The number of physical channels. Read from HW. This
  476. * is the number of available channels for this driver, not counting "Secure
  477. * mode" allocated physical channels.
  478. * @num_log_chans: The number of logical channels. Calculated from
  479. * num_phy_chans.
  480. * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  481. * @dma_slave: dma_device channels that can do only do slave transfers.
  482. * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
  483. * @phy_chans: Room for all possible physical channels in system.
  484. * @log_chans: Room for all possible logical channels in system.
  485. * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  486. * to log_chans entries.
  487. * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
  488. * to phy_chans entries.
  489. * @plat_data: Pointer to provided platform_data which is the driver
  490. * configuration.
  491. * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
  492. * @phy_res: Vector containing all physical channels.
  493. * @lcla_pool: lcla pool settings and data.
  494. * @lcpa_base: The virtual mapped address of LCPA.
  495. * @phy_lcpa: The physical address of the LCPA.
  496. * @lcpa_size: The size of the LCPA area.
  497. * @desc_slab: cache for descriptors.
  498. * @reg_val_backup: Here the values of some hardware registers are stored
  499. * before the DMA is powered off. They are restored when the power is back on.
  500. * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
  501. * later
  502. * @reg_val_backup_chan: Backup data for standard channel parameter registers.
  503. * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
  504. * @initialized: true if the dma has been initialized
  505. * @gen_dmac: the struct for generic registers values to represent u8500/8540
  506. * DMA controller
  507. */
  508. struct d40_base {
  509. spinlock_t interrupt_lock;
  510. spinlock_t execmd_lock;
  511. struct device *dev;
  512. void __iomem *virtbase;
  513. u8 rev:4;
  514. struct clk *clk;
  515. phys_addr_t phy_start;
  516. resource_size_t phy_size;
  517. int irq;
  518. int num_phy_chans;
  519. int num_log_chans;
  520. struct device_dma_parameters dma_parms;
  521. struct dma_device dma_both;
  522. struct dma_device dma_slave;
  523. struct dma_device dma_memcpy;
  524. struct d40_chan *phy_chans;
  525. struct d40_chan *log_chans;
  526. struct d40_chan **lookup_log_chans;
  527. struct d40_chan **lookup_phy_chans;
  528. struct stedma40_platform_data *plat_data;
  529. struct regulator *lcpa_regulator;
  530. /* Physical half channels */
  531. struct d40_phy_res *phy_res;
  532. struct d40_lcla_pool lcla_pool;
  533. void *lcpa_base;
  534. dma_addr_t phy_lcpa;
  535. resource_size_t lcpa_size;
  536. struct kmem_cache *desc_slab;
  537. u32 reg_val_backup[BACKUP_REGS_SZ];
  538. u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
  539. u32 *reg_val_backup_chan;
  540. u16 gcc_pwr_off_mask;
  541. bool initialized;
  542. struct d40_gen_dmac gen_dmac;
  543. };
  544. static struct device *chan2dev(struct d40_chan *d40c)
  545. {
  546. return &d40c->chan.dev->device;
  547. }
  548. static bool chan_is_physical(struct d40_chan *chan)
  549. {
  550. return chan->log_num == D40_PHY_CHAN;
  551. }
  552. static bool chan_is_logical(struct d40_chan *chan)
  553. {
  554. return !chan_is_physical(chan);
  555. }
  556. static void __iomem *chan_base(struct d40_chan *chan)
  557. {
  558. return chan->base->virtbase + D40_DREG_PCBASE +
  559. chan->phy_chan->num * D40_DREG_PCDELTA;
  560. }
  561. #define d40_err(dev, format, arg...) \
  562. dev_err(dev, "[%s] " format, __func__, ## arg)
  563. #define chan_err(d40c, format, arg...) \
  564. d40_err(chan2dev(d40c), format, ## arg)
  565. static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
  566. int lli_len)
  567. {
  568. bool is_log = chan_is_logical(d40c);
  569. u32 align;
  570. void *base;
  571. if (is_log)
  572. align = sizeof(struct d40_log_lli);
  573. else
  574. align = sizeof(struct d40_phy_lli);
  575. if (lli_len == 1) {
  576. base = d40d->lli_pool.pre_alloc_lli;
  577. d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
  578. d40d->lli_pool.base = NULL;
  579. } else {
  580. d40d->lli_pool.size = lli_len * 2 * align;
  581. base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
  582. d40d->lli_pool.base = base;
  583. if (d40d->lli_pool.base == NULL)
  584. return -ENOMEM;
  585. }
  586. if (is_log) {
  587. d40d->lli_log.src = PTR_ALIGN(base, align);
  588. d40d->lli_log.dst = d40d->lli_log.src + lli_len;
  589. d40d->lli_pool.dma_addr = 0;
  590. } else {
  591. d40d->lli_phy.src = PTR_ALIGN(base, align);
  592. d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
  593. d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
  594. d40d->lli_phy.src,
  595. d40d->lli_pool.size,
  596. DMA_TO_DEVICE);
  597. if (dma_mapping_error(d40c->base->dev,
  598. d40d->lli_pool.dma_addr)) {
  599. kfree(d40d->lli_pool.base);
  600. d40d->lli_pool.base = NULL;
  601. d40d->lli_pool.dma_addr = 0;
  602. return -ENOMEM;
  603. }
  604. }
  605. return 0;
  606. }
  607. static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
  608. {
  609. if (d40d->lli_pool.dma_addr)
  610. dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
  611. d40d->lli_pool.size, DMA_TO_DEVICE);
  612. kfree(d40d->lli_pool.base);
  613. d40d->lli_pool.base = NULL;
  614. d40d->lli_pool.size = 0;
  615. d40d->lli_log.src = NULL;
  616. d40d->lli_log.dst = NULL;
  617. d40d->lli_phy.src = NULL;
  618. d40d->lli_phy.dst = NULL;
  619. }
  620. static int d40_lcla_alloc_one(struct d40_chan *d40c,
  621. struct d40_desc *d40d)
  622. {
  623. unsigned long flags;
  624. int i;
  625. int ret = -EINVAL;
  626. spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
  627. /*
  628. * Allocate both src and dst at the same time, therefore the half
  629. * start on 1 since 0 can't be used since zero is used as end marker.
  630. */
  631. for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
  632. int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
  633. if (!d40c->base->lcla_pool.alloc_map[idx]) {
  634. d40c->base->lcla_pool.alloc_map[idx] = d40d;
  635. d40d->lcla_alloc++;
  636. ret = i;
  637. break;
  638. }
  639. }
  640. spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
  641. return ret;
  642. }
  643. static int d40_lcla_free_all(struct d40_chan *d40c,
  644. struct d40_desc *d40d)
  645. {
  646. unsigned long flags;
  647. int i;
  648. int ret = -EINVAL;
  649. if (chan_is_physical(d40c))
  650. return 0;
  651. spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
  652. for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
  653. int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
  654. if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
  655. d40c->base->lcla_pool.alloc_map[idx] = NULL;
  656. d40d->lcla_alloc--;
  657. if (d40d->lcla_alloc == 0) {
  658. ret = 0;
  659. break;
  660. }
  661. }
  662. }
  663. spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
  664. return ret;
  665. }
  666. static void d40_desc_remove(struct d40_desc *d40d)
  667. {
  668. list_del(&d40d->node);
  669. }
  670. static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
  671. {
  672. struct d40_desc *desc = NULL;
  673. if (!list_empty(&d40c->client)) {
  674. struct d40_desc *d;
  675. struct d40_desc *_d;
  676. list_for_each_entry_safe(d, _d, &d40c->client, node) {
  677. if (async_tx_test_ack(&d->txd)) {
  678. d40_desc_remove(d);
  679. desc = d;
  680. memset(desc, 0, sizeof(*desc));
  681. break;
  682. }
  683. }
  684. }
  685. if (!desc)
  686. desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
  687. if (desc)
  688. INIT_LIST_HEAD(&desc->node);
  689. return desc;
  690. }
  691. static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
  692. {
  693. d40_pool_lli_free(d40c, d40d);
  694. d40_lcla_free_all(d40c, d40d);
  695. kmem_cache_free(d40c->base->desc_slab, d40d);
  696. }
  697. static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
  698. {
  699. list_add_tail(&desc->node, &d40c->active);
  700. }
  701. static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
  702. {
  703. struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
  704. struct d40_phy_lli *lli_src = desc->lli_phy.src;
  705. void __iomem *base = chan_base(chan);
  706. writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
  707. writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
  708. writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
  709. writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
  710. writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
  711. writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
  712. writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
  713. writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
  714. }
  715. static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
  716. {
  717. list_add_tail(&desc->node, &d40c->done);
  718. }
  719. static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
  720. {
  721. struct d40_lcla_pool *pool = &chan->base->lcla_pool;
  722. struct d40_log_lli_bidir *lli = &desc->lli_log;
  723. int lli_current = desc->lli_current;
  724. int lli_len = desc->lli_len;
  725. bool cyclic = desc->cyclic;
  726. int curr_lcla = -EINVAL;
  727. int first_lcla = 0;
  728. bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
  729. bool linkback;
  730. /*
  731. * We may have partially running cyclic transfers, in case we did't get
  732. * enough LCLA entries.
  733. */
  734. linkback = cyclic && lli_current == 0;
  735. /*
  736. * For linkback, we need one LCLA even with only one link, because we
  737. * can't link back to the one in LCPA space
  738. */
  739. if (linkback || (lli_len - lli_current > 1)) {
  740. /*
  741. * If the channel is expected to use only soft_lli don't
  742. * allocate a lcla. This is to avoid a HW issue that exists
  743. * in some controller during a peripheral to memory transfer
  744. * that uses linked lists.
  745. */
  746. if (!(chan->phy_chan->use_soft_lli &&
  747. chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
  748. curr_lcla = d40_lcla_alloc_one(chan, desc);
  749. first_lcla = curr_lcla;
  750. }
  751. /*
  752. * For linkback, we normally load the LCPA in the loop since we need to
  753. * link it to the second LCLA and not the first. However, if we
  754. * couldn't even get a first LCLA, then we have to run in LCPA and
  755. * reload manually.
  756. */
  757. if (!linkback || curr_lcla == -EINVAL) {
  758. unsigned int flags = 0;
  759. if (curr_lcla == -EINVAL)
  760. flags |= LLI_TERM_INT;
  761. d40_log_lli_lcpa_write(chan->lcpa,
  762. &lli->dst[lli_current],
  763. &lli->src[lli_current],
  764. curr_lcla,
  765. flags);
  766. lli_current++;
  767. }
  768. if (curr_lcla < 0)
  769. goto out;
  770. for (; lli_current < lli_len; lli_current++) {
  771. unsigned int lcla_offset = chan->phy_chan->num * 1024 +
  772. 8 * curr_lcla * 2;
  773. struct d40_log_lli *lcla = pool->base + lcla_offset;
  774. unsigned int flags = 0;
  775. int next_lcla;
  776. if (lli_current + 1 < lli_len)
  777. next_lcla = d40_lcla_alloc_one(chan, desc);
  778. else
  779. next_lcla = linkback ? first_lcla : -EINVAL;
  780. if (cyclic || next_lcla == -EINVAL)
  781. flags |= LLI_TERM_INT;
  782. if (linkback && curr_lcla == first_lcla) {
  783. /* First link goes in both LCPA and LCLA */
  784. d40_log_lli_lcpa_write(chan->lcpa,
  785. &lli->dst[lli_current],
  786. &lli->src[lli_current],
  787. next_lcla, flags);
  788. }
  789. /*
  790. * One unused LCLA in the cyclic case if the very first
  791. * next_lcla fails...
  792. */
  793. d40_log_lli_lcla_write(lcla,
  794. &lli->dst[lli_current],
  795. &lli->src[lli_current],
  796. next_lcla, flags);
  797. /*
  798. * Cache maintenance is not needed if lcla is
  799. * mapped in esram
  800. */
  801. if (!use_esram_lcla) {
  802. dma_sync_single_range_for_device(chan->base->dev,
  803. pool->dma_addr, lcla_offset,
  804. 2 * sizeof(struct d40_log_lli),
  805. DMA_TO_DEVICE);
  806. }
  807. curr_lcla = next_lcla;
  808. if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
  809. lli_current++;
  810. break;
  811. }
  812. }
  813. out:
  814. desc->lli_current = lli_current;
  815. }
  816. static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
  817. {
  818. if (chan_is_physical(d40c)) {
  819. d40_phy_lli_load(d40c, d40d);
  820. d40d->lli_current = d40d->lli_len;
  821. } else
  822. d40_log_lli_to_lcxa(d40c, d40d);
  823. }
  824. static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
  825. {
  826. struct d40_desc *d;
  827. if (list_empty(&d40c->active))
  828. return NULL;
  829. d = list_first_entry(&d40c->active,
  830. struct d40_desc,
  831. node);
  832. return d;
  833. }
  834. /* remove desc from current queue and add it to the pending_queue */
  835. static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
  836. {
  837. d40_desc_remove(desc);
  838. desc->is_in_client_list = false;
  839. list_add_tail(&desc->node, &d40c->pending_queue);
  840. }
  841. static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
  842. {
  843. struct d40_desc *d;
  844. if (list_empty(&d40c->pending_queue))
  845. return NULL;
  846. d = list_first_entry(&d40c->pending_queue,
  847. struct d40_desc,
  848. node);
  849. return d;
  850. }
  851. static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
  852. {
  853. struct d40_desc *d;
  854. if (list_empty(&d40c->queue))
  855. return NULL;
  856. d = list_first_entry(&d40c->queue,
  857. struct d40_desc,
  858. node);
  859. return d;
  860. }
  861. static struct d40_desc *d40_first_done(struct d40_chan *d40c)
  862. {
  863. if (list_empty(&d40c->done))
  864. return NULL;
  865. return list_first_entry(&d40c->done, struct d40_desc, node);
  866. }
  867. static int d40_psize_2_burst_size(bool is_log, int psize)
  868. {
  869. if (is_log) {
  870. if (psize == STEDMA40_PSIZE_LOG_1)
  871. return 1;
  872. } else {
  873. if (psize == STEDMA40_PSIZE_PHY_1)
  874. return 1;
  875. }
  876. return 2 << psize;
  877. }
  878. /*
  879. * The dma only supports transmitting packages up to
  880. * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
  881. * dma elements required to send the entire sg list
  882. */
  883. static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
  884. {
  885. int dmalen;
  886. u32 max_w = max(data_width1, data_width2);
  887. u32 min_w = min(data_width1, data_width2);
  888. u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
  889. if (seg_max > STEDMA40_MAX_SEG_SIZE)
  890. seg_max -= (1 << max_w);
  891. if (!IS_ALIGNED(size, 1 << max_w))
  892. return -EINVAL;
  893. if (size <= seg_max)
  894. dmalen = 1;
  895. else {
  896. dmalen = size / seg_max;
  897. if (dmalen * seg_max < size)
  898. dmalen++;
  899. }
  900. return dmalen;
  901. }
  902. static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
  903. u32 data_width1, u32 data_width2)
  904. {
  905. struct scatterlist *sg;
  906. int i;
  907. int len = 0;
  908. int ret;
  909. for_each_sg(sgl, sg, sg_len, i) {
  910. ret = d40_size_2_dmalen(sg_dma_len(sg),
  911. data_width1, data_width2);
  912. if (ret < 0)
  913. return ret;
  914. len += ret;
  915. }
  916. return len;
  917. }
  918. #ifdef CONFIG_PM
  919. static void dma40_backup(void __iomem *baseaddr, u32 *backup,
  920. u32 *regaddr, int num, bool save)
  921. {
  922. int i;
  923. for (i = 0; i < num; i++) {
  924. void __iomem *addr = baseaddr + regaddr[i];
  925. if (save)
  926. backup[i] = readl_relaxed(addr);
  927. else
  928. writel_relaxed(backup[i], addr);
  929. }
  930. }
  931. static void d40_save_restore_registers(struct d40_base *base, bool save)
  932. {
  933. int i;
  934. /* Save/Restore channel specific registers */
  935. for (i = 0; i < base->num_phy_chans; i++) {
  936. void __iomem *addr;
  937. int idx;
  938. if (base->phy_res[i].reserved)
  939. continue;
  940. addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
  941. idx = i * ARRAY_SIZE(d40_backup_regs_chan);
  942. dma40_backup(addr, &base->reg_val_backup_chan[idx],
  943. d40_backup_regs_chan,
  944. ARRAY_SIZE(d40_backup_regs_chan),
  945. save);
  946. }
  947. /* Save/Restore global registers */
  948. dma40_backup(base->virtbase, base->reg_val_backup,
  949. d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
  950. save);
  951. /* Save/Restore registers only existing on dma40 v3 and later */
  952. if (base->gen_dmac.backup)
  953. dma40_backup(base->virtbase, base->reg_val_backup_v4,
  954. base->gen_dmac.backup,
  955. base->gen_dmac.backup_size,
  956. save);
  957. }
  958. #else
  959. static void d40_save_restore_registers(struct d40_base *base, bool save)
  960. {
  961. }
  962. #endif
  963. static int __d40_execute_command_phy(struct d40_chan *d40c,
  964. enum d40_command command)
  965. {
  966. u32 status;
  967. int i;
  968. void __iomem *active_reg;
  969. int ret = 0;
  970. unsigned long flags;
  971. u32 wmask;
  972. if (command == D40_DMA_STOP) {
  973. ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
  974. if (ret)
  975. return ret;
  976. }
  977. spin_lock_irqsave(&d40c->base->execmd_lock, flags);
  978. if (d40c->phy_chan->num % 2 == 0)
  979. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  980. else
  981. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  982. if (command == D40_DMA_SUSPEND_REQ) {
  983. status = (readl(active_reg) &
  984. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  985. D40_CHAN_POS(d40c->phy_chan->num);
  986. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  987. goto done;
  988. }
  989. wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
  990. writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
  991. active_reg);
  992. if (command == D40_DMA_SUSPEND_REQ) {
  993. for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
  994. status = (readl(active_reg) &
  995. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  996. D40_CHAN_POS(d40c->phy_chan->num);
  997. cpu_relax();
  998. /*
  999. * Reduce the number of bus accesses while
  1000. * waiting for the DMA to suspend.
  1001. */
  1002. udelay(3);
  1003. if (status == D40_DMA_STOP ||
  1004. status == D40_DMA_SUSPENDED)
  1005. break;
  1006. }
  1007. if (i == D40_SUSPEND_MAX_IT) {
  1008. chan_err(d40c,
  1009. "unable to suspend the chl %d (log: %d) status %x\n",
  1010. d40c->phy_chan->num, d40c->log_num,
  1011. status);
  1012. dump_stack();
  1013. ret = -EBUSY;
  1014. }
  1015. }
  1016. done:
  1017. spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
  1018. return ret;
  1019. }
  1020. static void d40_term_all(struct d40_chan *d40c)
  1021. {
  1022. struct d40_desc *d40d;
  1023. struct d40_desc *_d;
  1024. /* Release completed descriptors */
  1025. while ((d40d = d40_first_done(d40c))) {
  1026. d40_desc_remove(d40d);
  1027. d40_desc_free(d40c, d40d);
  1028. }
  1029. /* Release active descriptors */
  1030. while ((d40d = d40_first_active_get(d40c))) {
  1031. d40_desc_remove(d40d);
  1032. d40_desc_free(d40c, d40d);
  1033. }
  1034. /* Release queued descriptors waiting for transfer */
  1035. while ((d40d = d40_first_queued(d40c))) {
  1036. d40_desc_remove(d40d);
  1037. d40_desc_free(d40c, d40d);
  1038. }
  1039. /* Release pending descriptors */
  1040. while ((d40d = d40_first_pending(d40c))) {
  1041. d40_desc_remove(d40d);
  1042. d40_desc_free(d40c, d40d);
  1043. }
  1044. /* Release client owned descriptors */
  1045. if (!list_empty(&d40c->client))
  1046. list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
  1047. d40_desc_remove(d40d);
  1048. d40_desc_free(d40c, d40d);
  1049. }
  1050. /* Release descriptors in prepare queue */
  1051. if (!list_empty(&d40c->prepare_queue))
  1052. list_for_each_entry_safe(d40d, _d,
  1053. &d40c->prepare_queue, node) {
  1054. d40_desc_remove(d40d);
  1055. d40_desc_free(d40c, d40d);
  1056. }
  1057. d40c->pending_tx = 0;
  1058. }
  1059. static void __d40_config_set_event(struct d40_chan *d40c,
  1060. enum d40_events event_type, u32 event,
  1061. int reg)
  1062. {
  1063. void __iomem *addr = chan_base(d40c) + reg;
  1064. int tries;
  1065. u32 status;
  1066. switch (event_type) {
  1067. case D40_DEACTIVATE_EVENTLINE:
  1068. writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
  1069. | ~D40_EVENTLINE_MASK(event), addr);
  1070. break;
  1071. case D40_SUSPEND_REQ_EVENTLINE:
  1072. status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
  1073. D40_EVENTLINE_POS(event);
  1074. if (status == D40_DEACTIVATE_EVENTLINE ||
  1075. status == D40_SUSPEND_REQ_EVENTLINE)
  1076. break;
  1077. writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
  1078. | ~D40_EVENTLINE_MASK(event), addr);
  1079. for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
  1080. status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
  1081. D40_EVENTLINE_POS(event);
  1082. cpu_relax();
  1083. /*
  1084. * Reduce the number of bus accesses while
  1085. * waiting for the DMA to suspend.
  1086. */
  1087. udelay(3);
  1088. if (status == D40_DEACTIVATE_EVENTLINE)
  1089. break;
  1090. }
  1091. if (tries == D40_SUSPEND_MAX_IT) {
  1092. chan_err(d40c,
  1093. "unable to stop the event_line chl %d (log: %d)"
  1094. "status %x\n", d40c->phy_chan->num,
  1095. d40c->log_num, status);
  1096. }
  1097. break;
  1098. case D40_ACTIVATE_EVENTLINE:
  1099. /*
  1100. * The hardware sometimes doesn't register the enable when src and dst
  1101. * event lines are active on the same logical channel. Retry to ensure
  1102. * it does. Usually only one retry is sufficient.
  1103. */
  1104. tries = 100;
  1105. while (--tries) {
  1106. writel((D40_ACTIVATE_EVENTLINE <<
  1107. D40_EVENTLINE_POS(event)) |
  1108. ~D40_EVENTLINE_MASK(event), addr);
  1109. if (readl(addr) & D40_EVENTLINE_MASK(event))
  1110. break;
  1111. }
  1112. if (tries != 99)
  1113. dev_dbg(chan2dev(d40c),
  1114. "[%s] workaround enable S%cLNK (%d tries)\n",
  1115. __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
  1116. 100 - tries);
  1117. WARN_ON(!tries);
  1118. break;
  1119. case D40_ROUND_EVENTLINE:
  1120. BUG();
  1121. break;
  1122. }
  1123. }
  1124. static void d40_config_set_event(struct d40_chan *d40c,
  1125. enum d40_events event_type)
  1126. {
  1127. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
  1128. /* Enable event line connected to device (or memcpy) */
  1129. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  1130. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  1131. __d40_config_set_event(d40c, event_type, event,
  1132. D40_CHAN_REG_SSLNK);
  1133. if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
  1134. __d40_config_set_event(d40c, event_type, event,
  1135. D40_CHAN_REG_SDLNK);
  1136. }
  1137. static u32 d40_chan_has_events(struct d40_chan *d40c)
  1138. {
  1139. void __iomem *chanbase = chan_base(d40c);
  1140. u32 val;
  1141. val = readl(chanbase + D40_CHAN_REG_SSLNK);
  1142. val |= readl(chanbase + D40_CHAN_REG_SDLNK);
  1143. return val;
  1144. }
  1145. static int
  1146. __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
  1147. {
  1148. unsigned long flags;
  1149. int ret = 0;
  1150. u32 active_status;
  1151. void __iomem *active_reg;
  1152. if (d40c->phy_chan->num % 2 == 0)
  1153. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  1154. else
  1155. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  1156. spin_lock_irqsave(&d40c->phy_chan->lock, flags);
  1157. switch (command) {
  1158. case D40_DMA_STOP:
  1159. case D40_DMA_SUSPEND_REQ:
  1160. active_status = (readl(active_reg) &
  1161. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  1162. D40_CHAN_POS(d40c->phy_chan->num);
  1163. if (active_status == D40_DMA_RUN)
  1164. d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
  1165. else
  1166. d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
  1167. if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
  1168. ret = __d40_execute_command_phy(d40c, command);
  1169. break;
  1170. case D40_DMA_RUN:
  1171. d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
  1172. ret = __d40_execute_command_phy(d40c, command);
  1173. break;
  1174. case D40_DMA_SUSPENDED:
  1175. BUG();
  1176. break;
  1177. }
  1178. spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
  1179. return ret;
  1180. }
  1181. static int d40_channel_execute_command(struct d40_chan *d40c,
  1182. enum d40_command command)
  1183. {
  1184. if (chan_is_logical(d40c))
  1185. return __d40_execute_command_log(d40c, command);
  1186. else
  1187. return __d40_execute_command_phy(d40c, command);
  1188. }
  1189. static u32 d40_get_prmo(struct d40_chan *d40c)
  1190. {
  1191. static const unsigned int phy_map[] = {
  1192. [STEDMA40_PCHAN_BASIC_MODE]
  1193. = D40_DREG_PRMO_PCHAN_BASIC,
  1194. [STEDMA40_PCHAN_MODULO_MODE]
  1195. = D40_DREG_PRMO_PCHAN_MODULO,
  1196. [STEDMA40_PCHAN_DOUBLE_DST_MODE]
  1197. = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
  1198. };
  1199. static const unsigned int log_map[] = {
  1200. [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
  1201. = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
  1202. [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
  1203. = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
  1204. [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
  1205. = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
  1206. };
  1207. if (chan_is_physical(d40c))
  1208. return phy_map[d40c->dma_cfg.mode_opt];
  1209. else
  1210. return log_map[d40c->dma_cfg.mode_opt];
  1211. }
  1212. static void d40_config_write(struct d40_chan *d40c)
  1213. {
  1214. u32 addr_base;
  1215. u32 var;
  1216. /* Odd addresses are even addresses + 4 */
  1217. addr_base = (d40c->phy_chan->num % 2) * 4;
  1218. /* Setup channel mode to logical or physical */
  1219. var = ((u32)(chan_is_logical(d40c)) + 1) <<
  1220. D40_CHAN_POS(d40c->phy_chan->num);
  1221. writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
  1222. /* Setup operational mode option register */
  1223. var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
  1224. writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
  1225. if (chan_is_logical(d40c)) {
  1226. int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
  1227. & D40_SREG_ELEM_LOG_LIDX_MASK;
  1228. void __iomem *chanbase = chan_base(d40c);
  1229. /* Set default config for CFG reg */
  1230. writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
  1231. writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
  1232. /* Set LIDX for lcla */
  1233. writel(lidx, chanbase + D40_CHAN_REG_SSELT);
  1234. writel(lidx, chanbase + D40_CHAN_REG_SDELT);
  1235. /* Clear LNK which will be used by d40_chan_has_events() */
  1236. writel(0, chanbase + D40_CHAN_REG_SSLNK);
  1237. writel(0, chanbase + D40_CHAN_REG_SDLNK);
  1238. }
  1239. }
  1240. static u32 d40_residue(struct d40_chan *d40c)
  1241. {
  1242. u32 num_elt;
  1243. if (chan_is_logical(d40c))
  1244. num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
  1245. >> D40_MEM_LCSP2_ECNT_POS;
  1246. else {
  1247. u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
  1248. num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
  1249. >> D40_SREG_ELEM_PHY_ECNT_POS;
  1250. }
  1251. return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
  1252. }
  1253. static bool d40_tx_is_linked(struct d40_chan *d40c)
  1254. {
  1255. bool is_link;
  1256. if (chan_is_logical(d40c))
  1257. is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
  1258. else
  1259. is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
  1260. & D40_SREG_LNK_PHYS_LNK_MASK;
  1261. return is_link;
  1262. }
  1263. static int d40_pause(struct d40_chan *d40c)
  1264. {
  1265. int res = 0;
  1266. unsigned long flags;
  1267. if (!d40c->busy)
  1268. return 0;
  1269. pm_runtime_get_sync(d40c->base->dev);
  1270. spin_lock_irqsave(&d40c->lock, flags);
  1271. res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
  1272. pm_runtime_mark_last_busy(d40c->base->dev);
  1273. pm_runtime_put_autosuspend(d40c->base->dev);
  1274. spin_unlock_irqrestore(&d40c->lock, flags);
  1275. return res;
  1276. }
  1277. static int d40_resume(struct d40_chan *d40c)
  1278. {
  1279. int res = 0;
  1280. unsigned long flags;
  1281. if (!d40c->busy)
  1282. return 0;
  1283. spin_lock_irqsave(&d40c->lock, flags);
  1284. pm_runtime_get_sync(d40c->base->dev);
  1285. /* If bytes left to transfer or linked tx resume job */
  1286. if (d40_residue(d40c) || d40_tx_is_linked(d40c))
  1287. res = d40_channel_execute_command(d40c, D40_DMA_RUN);
  1288. pm_runtime_mark_last_busy(d40c->base->dev);
  1289. pm_runtime_put_autosuspend(d40c->base->dev);
  1290. spin_unlock_irqrestore(&d40c->lock, flags);
  1291. return res;
  1292. }
  1293. static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
  1294. {
  1295. struct d40_chan *d40c = container_of(tx->chan,
  1296. struct d40_chan,
  1297. chan);
  1298. struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
  1299. unsigned long flags;
  1300. dma_cookie_t cookie;
  1301. spin_lock_irqsave(&d40c->lock, flags);
  1302. cookie = dma_cookie_assign(tx);
  1303. d40_desc_queue(d40c, d40d);
  1304. spin_unlock_irqrestore(&d40c->lock, flags);
  1305. return cookie;
  1306. }
  1307. static int d40_start(struct d40_chan *d40c)
  1308. {
  1309. return d40_channel_execute_command(d40c, D40_DMA_RUN);
  1310. }
  1311. static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
  1312. {
  1313. struct d40_desc *d40d;
  1314. int err;
  1315. /* Start queued jobs, if any */
  1316. d40d = d40_first_queued(d40c);
  1317. if (d40d != NULL) {
  1318. if (!d40c->busy) {
  1319. d40c->busy = true;
  1320. pm_runtime_get_sync(d40c->base->dev);
  1321. }
  1322. /* Remove from queue */
  1323. d40_desc_remove(d40d);
  1324. /* Add to active queue */
  1325. d40_desc_submit(d40c, d40d);
  1326. /* Initiate DMA job */
  1327. d40_desc_load(d40c, d40d);
  1328. /* Start dma job */
  1329. err = d40_start(d40c);
  1330. if (err)
  1331. return NULL;
  1332. }
  1333. return d40d;
  1334. }
  1335. /* called from interrupt context */
  1336. static void dma_tc_handle(struct d40_chan *d40c)
  1337. {
  1338. struct d40_desc *d40d;
  1339. /* Get first active entry from list */
  1340. d40d = d40_first_active_get(d40c);
  1341. if (d40d == NULL)
  1342. return;
  1343. if (d40d->cyclic) {
  1344. /*
  1345. * If this was a paritially loaded list, we need to reloaded
  1346. * it, and only when the list is completed. We need to check
  1347. * for done because the interrupt will hit for every link, and
  1348. * not just the last one.
  1349. */
  1350. if (d40d->lli_current < d40d->lli_len
  1351. && !d40_tx_is_linked(d40c)
  1352. && !d40_residue(d40c)) {
  1353. d40_lcla_free_all(d40c, d40d);
  1354. d40_desc_load(d40c, d40d);
  1355. (void) d40_start(d40c);
  1356. if (d40d->lli_current == d40d->lli_len)
  1357. d40d->lli_current = 0;
  1358. }
  1359. } else {
  1360. d40_lcla_free_all(d40c, d40d);
  1361. if (d40d->lli_current < d40d->lli_len) {
  1362. d40_desc_load(d40c, d40d);
  1363. /* Start dma job */
  1364. (void) d40_start(d40c);
  1365. return;
  1366. }
  1367. if (d40_queue_start(d40c) == NULL)
  1368. d40c->busy = false;
  1369. pm_runtime_mark_last_busy(d40c->base->dev);
  1370. pm_runtime_put_autosuspend(d40c->base->dev);
  1371. d40_desc_remove(d40d);
  1372. d40_desc_done(d40c, d40d);
  1373. }
  1374. d40c->pending_tx++;
  1375. tasklet_schedule(&d40c->tasklet);
  1376. }
  1377. static void dma_tasklet(unsigned long data)
  1378. {
  1379. struct d40_chan *d40c = (struct d40_chan *) data;
  1380. struct d40_desc *d40d;
  1381. unsigned long flags;
  1382. dma_async_tx_callback callback;
  1383. void *callback_param;
  1384. spin_lock_irqsave(&d40c->lock, flags);
  1385. /* Get first entry from the done list */
  1386. d40d = d40_first_done(d40c);
  1387. if (d40d == NULL) {
  1388. /* Check if we have reached here for cyclic job */
  1389. d40d = d40_first_active_get(d40c);
  1390. if (d40d == NULL || !d40d->cyclic)
  1391. goto err;
  1392. }
  1393. if (!d40d->cyclic)
  1394. dma_cookie_complete(&d40d->txd);
  1395. /*
  1396. * If terminating a channel pending_tx is set to zero.
  1397. * This prevents any finished active jobs to return to the client.
  1398. */
  1399. if (d40c->pending_tx == 0) {
  1400. spin_unlock_irqrestore(&d40c->lock, flags);
  1401. return;
  1402. }
  1403. /* Callback to client */
  1404. callback = d40d->txd.callback;
  1405. callback_param = d40d->txd.callback_param;
  1406. if (!d40d->cyclic) {
  1407. if (async_tx_test_ack(&d40d->txd)) {
  1408. d40_desc_remove(d40d);
  1409. d40_desc_free(d40c, d40d);
  1410. } else if (!d40d->is_in_client_list) {
  1411. d40_desc_remove(d40d);
  1412. d40_lcla_free_all(d40c, d40d);
  1413. list_add_tail(&d40d->node, &d40c->client);
  1414. d40d->is_in_client_list = true;
  1415. }
  1416. }
  1417. d40c->pending_tx--;
  1418. if (d40c->pending_tx)
  1419. tasklet_schedule(&d40c->tasklet);
  1420. spin_unlock_irqrestore(&d40c->lock, flags);
  1421. if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
  1422. callback(callback_param);
  1423. return;
  1424. err:
  1425. /* Rescue manouver if receiving double interrupts */
  1426. if (d40c->pending_tx > 0)
  1427. d40c->pending_tx--;
  1428. spin_unlock_irqrestore(&d40c->lock, flags);
  1429. }
  1430. static irqreturn_t d40_handle_interrupt(int irq, void *data)
  1431. {
  1432. int i;
  1433. u32 idx;
  1434. u32 row;
  1435. long chan = -1;
  1436. struct d40_chan *d40c;
  1437. unsigned long flags;
  1438. struct d40_base *base = data;
  1439. u32 regs[base->gen_dmac.il_size];
  1440. struct d40_interrupt_lookup *il = base->gen_dmac.il;
  1441. u32 il_size = base->gen_dmac.il_size;
  1442. spin_lock_irqsave(&base->interrupt_lock, flags);
  1443. /* Read interrupt status of both logical and physical channels */
  1444. for (i = 0; i < il_size; i++)
  1445. regs[i] = readl(base->virtbase + il[i].src);
  1446. for (;;) {
  1447. chan = find_next_bit((unsigned long *)regs,
  1448. BITS_PER_LONG * il_size, chan + 1);
  1449. /* No more set bits found? */
  1450. if (chan == BITS_PER_LONG * il_size)
  1451. break;
  1452. row = chan / BITS_PER_LONG;
  1453. idx = chan & (BITS_PER_LONG - 1);
  1454. if (il[row].offset == D40_PHY_CHAN)
  1455. d40c = base->lookup_phy_chans[idx];
  1456. else
  1457. d40c = base->lookup_log_chans[il[row].offset + idx];
  1458. if (!d40c) {
  1459. /*
  1460. * No error because this can happen if something else
  1461. * in the system is using the channel.
  1462. */
  1463. continue;
  1464. }
  1465. /* ACK interrupt */
  1466. writel(1 << idx, base->virtbase + il[row].clr);
  1467. spin_lock(&d40c->lock);
  1468. if (!il[row].is_error)
  1469. dma_tc_handle(d40c);
  1470. else
  1471. d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
  1472. chan, il[row].offset, idx);
  1473. spin_unlock(&d40c->lock);
  1474. }
  1475. spin_unlock_irqrestore(&base->interrupt_lock, flags);
  1476. return IRQ_HANDLED;
  1477. }
  1478. static int d40_validate_conf(struct d40_chan *d40c,
  1479. struct stedma40_chan_cfg *conf)
  1480. {
  1481. int res = 0;
  1482. bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
  1483. if (!conf->dir) {
  1484. chan_err(d40c, "Invalid direction.\n");
  1485. res = -EINVAL;
  1486. }
  1487. if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
  1488. (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
  1489. (conf->dev_type < 0)) {
  1490. chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
  1491. res = -EINVAL;
  1492. }
  1493. if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
  1494. d40c->base->plat_data->dev_tx[conf->dev_type] == 0 &&
  1495. d40c->runtime_addr == 0) {
  1496. chan_err(d40c, "Invalid TX channel address (%d)\n",
  1497. conf->dev_type);
  1498. res = -EINVAL;
  1499. }
  1500. if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
  1501. d40c->base->plat_data->dev_rx[conf->dev_type] == 0 &&
  1502. d40c->runtime_addr == 0) {
  1503. chan_err(d40c, "Invalid RX channel address (%d)\n",
  1504. conf->dev_type);
  1505. res = -EINVAL;
  1506. }
  1507. if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
  1508. /*
  1509. * DMAC HW supports it. Will be added to this driver,
  1510. * in case any dma client requires it.
  1511. */
  1512. chan_err(d40c, "periph to periph not supported\n");
  1513. res = -EINVAL;
  1514. }
  1515. if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
  1516. (1 << conf->src_info.data_width) !=
  1517. d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
  1518. (1 << conf->dst_info.data_width)) {
  1519. /*
  1520. * The DMAC hardware only supports
  1521. * src (burst x width) == dst (burst x width)
  1522. */
  1523. chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
  1524. res = -EINVAL;
  1525. }
  1526. return res;
  1527. }
  1528. static bool d40_alloc_mask_set(struct d40_phy_res *phy,
  1529. bool is_src, int log_event_line, bool is_log,
  1530. bool *first_user)
  1531. {
  1532. unsigned long flags;
  1533. spin_lock_irqsave(&phy->lock, flags);
  1534. *first_user = ((phy->allocated_src | phy->allocated_dst)
  1535. == D40_ALLOC_FREE);
  1536. if (!is_log) {
  1537. /* Physical interrupts are masked per physical full channel */
  1538. if (phy->allocated_src == D40_ALLOC_FREE &&
  1539. phy->allocated_dst == D40_ALLOC_FREE) {
  1540. phy->allocated_dst = D40_ALLOC_PHY;
  1541. phy->allocated_src = D40_ALLOC_PHY;
  1542. goto found;
  1543. } else
  1544. goto not_found;
  1545. }
  1546. /* Logical channel */
  1547. if (is_src) {
  1548. if (phy->allocated_src == D40_ALLOC_PHY)
  1549. goto not_found;
  1550. if (phy->allocated_src == D40_ALLOC_FREE)
  1551. phy->allocated_src = D40_ALLOC_LOG_FREE;
  1552. if (!(phy->allocated_src & (1 << log_event_line))) {
  1553. phy->allocated_src |= 1 << log_event_line;
  1554. goto found;
  1555. } else
  1556. goto not_found;
  1557. } else {
  1558. if (phy->allocated_dst == D40_ALLOC_PHY)
  1559. goto not_found;
  1560. if (phy->allocated_dst == D40_ALLOC_FREE)
  1561. phy->allocated_dst = D40_ALLOC_LOG_FREE;
  1562. if (!(phy->allocated_dst & (1 << log_event_line))) {
  1563. phy->allocated_dst |= 1 << log_event_line;
  1564. goto found;
  1565. } else
  1566. goto not_found;
  1567. }
  1568. not_found:
  1569. spin_unlock_irqrestore(&phy->lock, flags);
  1570. return false;
  1571. found:
  1572. spin_unlock_irqrestore(&phy->lock, flags);
  1573. return true;
  1574. }
  1575. static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
  1576. int log_event_line)
  1577. {
  1578. unsigned long flags;
  1579. bool is_free = false;
  1580. spin_lock_irqsave(&phy->lock, flags);
  1581. if (!log_event_line) {
  1582. phy->allocated_dst = D40_ALLOC_FREE;
  1583. phy->allocated_src = D40_ALLOC_FREE;
  1584. is_free = true;
  1585. goto out;
  1586. }
  1587. /* Logical channel */
  1588. if (is_src) {
  1589. phy->allocated_src &= ~(1 << log_event_line);
  1590. if (phy->allocated_src == D40_ALLOC_LOG_FREE)
  1591. phy->allocated_src = D40_ALLOC_FREE;
  1592. } else {
  1593. phy->allocated_dst &= ~(1 << log_event_line);
  1594. if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
  1595. phy->allocated_dst = D40_ALLOC_FREE;
  1596. }
  1597. is_free = ((phy->allocated_src | phy->allocated_dst) ==
  1598. D40_ALLOC_FREE);
  1599. out:
  1600. spin_unlock_irqrestore(&phy->lock, flags);
  1601. return is_free;
  1602. }
  1603. static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
  1604. {
  1605. int dev_type = d40c->dma_cfg.dev_type;
  1606. int event_group;
  1607. int event_line;
  1608. struct d40_phy_res *phys;
  1609. int i;
  1610. int j;
  1611. int log_num;
  1612. int num_phy_chans;
  1613. bool is_src;
  1614. bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
  1615. phys = d40c->base->phy_res;
  1616. num_phy_chans = d40c->base->num_phy_chans;
  1617. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1618. log_num = 2 * dev_type;
  1619. is_src = true;
  1620. } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1621. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1622. /* dst event lines are used for logical memcpy */
  1623. log_num = 2 * dev_type + 1;
  1624. is_src = false;
  1625. } else
  1626. return -EINVAL;
  1627. event_group = D40_TYPE_TO_GROUP(dev_type);
  1628. event_line = D40_TYPE_TO_EVENT(dev_type);
  1629. if (!is_log) {
  1630. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1631. /* Find physical half channel */
  1632. if (d40c->dma_cfg.use_fixed_channel) {
  1633. i = d40c->dma_cfg.phy_channel;
  1634. if (d40_alloc_mask_set(&phys[i], is_src,
  1635. 0, is_log,
  1636. first_phy_user))
  1637. goto found_phy;
  1638. } else {
  1639. for (i = 0; i < num_phy_chans; i++) {
  1640. if (d40_alloc_mask_set(&phys[i], is_src,
  1641. 0, is_log,
  1642. first_phy_user))
  1643. goto found_phy;
  1644. }
  1645. }
  1646. } else
  1647. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  1648. int phy_num = j + event_group * 2;
  1649. for (i = phy_num; i < phy_num + 2; i++) {
  1650. if (d40_alloc_mask_set(&phys[i],
  1651. is_src,
  1652. 0,
  1653. is_log,
  1654. first_phy_user))
  1655. goto found_phy;
  1656. }
  1657. }
  1658. return -EINVAL;
  1659. found_phy:
  1660. d40c->phy_chan = &phys[i];
  1661. d40c->log_num = D40_PHY_CHAN;
  1662. goto out;
  1663. }
  1664. if (dev_type == -1)
  1665. return -EINVAL;
  1666. /* Find logical channel */
  1667. for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
  1668. int phy_num = j + event_group * 2;
  1669. if (d40c->dma_cfg.use_fixed_channel) {
  1670. i = d40c->dma_cfg.phy_channel;
  1671. if ((i != phy_num) && (i != phy_num + 1)) {
  1672. dev_err(chan2dev(d40c),
  1673. "invalid fixed phy channel %d\n", i);
  1674. return -EINVAL;
  1675. }
  1676. if (d40_alloc_mask_set(&phys[i], is_src, event_line,
  1677. is_log, first_phy_user))
  1678. goto found_log;
  1679. dev_err(chan2dev(d40c),
  1680. "could not allocate fixed phy channel %d\n", i);
  1681. return -EINVAL;
  1682. }
  1683. /*
  1684. * Spread logical channels across all available physical rather
  1685. * than pack every logical channel at the first available phy
  1686. * channels.
  1687. */
  1688. if (is_src) {
  1689. for (i = phy_num; i < phy_num + 2; i++) {
  1690. if (d40_alloc_mask_set(&phys[i], is_src,
  1691. event_line, is_log,
  1692. first_phy_user))
  1693. goto found_log;
  1694. }
  1695. } else {
  1696. for (i = phy_num + 1; i >= phy_num; i--) {
  1697. if (d40_alloc_mask_set(&phys[i], is_src,
  1698. event_line, is_log,
  1699. first_phy_user))
  1700. goto found_log;
  1701. }
  1702. }
  1703. }
  1704. return -EINVAL;
  1705. found_log:
  1706. d40c->phy_chan = &phys[i];
  1707. d40c->log_num = log_num;
  1708. out:
  1709. if (is_log)
  1710. d40c->base->lookup_log_chans[d40c->log_num] = d40c;
  1711. else
  1712. d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
  1713. return 0;
  1714. }
  1715. static int d40_config_memcpy(struct d40_chan *d40c)
  1716. {
  1717. dma_cap_mask_t cap = d40c->chan.device->cap_mask;
  1718. if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
  1719. d40c->dma_cfg = dma40_memcpy_conf_log;
  1720. d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
  1721. d40_log_cfg(&d40c->dma_cfg,
  1722. &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  1723. } else if (dma_has_cap(DMA_MEMCPY, cap) &&
  1724. dma_has_cap(DMA_SLAVE, cap)) {
  1725. d40c->dma_cfg = dma40_memcpy_conf_phy;
  1726. /* Generate interrrupt at end of transfer or relink. */
  1727. d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
  1728. /* Generate interrupt on error. */
  1729. d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
  1730. d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
  1731. } else {
  1732. chan_err(d40c, "No memcpy\n");
  1733. return -EINVAL;
  1734. }
  1735. return 0;
  1736. }
  1737. static int d40_free_dma(struct d40_chan *d40c)
  1738. {
  1739. int res = 0;
  1740. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
  1741. struct d40_phy_res *phy = d40c->phy_chan;
  1742. bool is_src;
  1743. /* Terminate all queued and active transfers */
  1744. d40_term_all(d40c);
  1745. if (phy == NULL) {
  1746. chan_err(d40c, "phy == null\n");
  1747. return -EINVAL;
  1748. }
  1749. if (phy->allocated_src == D40_ALLOC_FREE &&
  1750. phy->allocated_dst == D40_ALLOC_FREE) {
  1751. chan_err(d40c, "channel already free\n");
  1752. return -EINVAL;
  1753. }
  1754. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1755. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
  1756. is_src = false;
  1757. else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
  1758. is_src = true;
  1759. else {
  1760. chan_err(d40c, "Unknown direction\n");
  1761. return -EINVAL;
  1762. }
  1763. pm_runtime_get_sync(d40c->base->dev);
  1764. res = d40_channel_execute_command(d40c, D40_DMA_STOP);
  1765. if (res) {
  1766. chan_err(d40c, "stop failed\n");
  1767. goto out;
  1768. }
  1769. d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
  1770. if (chan_is_logical(d40c))
  1771. d40c->base->lookup_log_chans[d40c->log_num] = NULL;
  1772. else
  1773. d40c->base->lookup_phy_chans[phy->num] = NULL;
  1774. if (d40c->busy) {
  1775. pm_runtime_mark_last_busy(d40c->base->dev);
  1776. pm_runtime_put_autosuspend(d40c->base->dev);
  1777. }
  1778. d40c->busy = false;
  1779. d40c->phy_chan = NULL;
  1780. d40c->configured = false;
  1781. out:
  1782. pm_runtime_mark_last_busy(d40c->base->dev);
  1783. pm_runtime_put_autosuspend(d40c->base->dev);
  1784. return res;
  1785. }
  1786. static bool d40_is_paused(struct d40_chan *d40c)
  1787. {
  1788. void __iomem *chanbase = chan_base(d40c);
  1789. bool is_paused = false;
  1790. unsigned long flags;
  1791. void __iomem *active_reg;
  1792. u32 status;
  1793. u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
  1794. spin_lock_irqsave(&d40c->lock, flags);
  1795. if (chan_is_physical(d40c)) {
  1796. if (d40c->phy_chan->num % 2 == 0)
  1797. active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
  1798. else
  1799. active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
  1800. status = (readl(active_reg) &
  1801. D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
  1802. D40_CHAN_POS(d40c->phy_chan->num);
  1803. if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
  1804. is_paused = true;
  1805. goto _exit;
  1806. }
  1807. if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
  1808. d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
  1809. status = readl(chanbase + D40_CHAN_REG_SDLNK);
  1810. } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
  1811. status = readl(chanbase + D40_CHAN_REG_SSLNK);
  1812. } else {
  1813. chan_err(d40c, "Unknown direction\n");
  1814. goto _exit;
  1815. }
  1816. status = (status & D40_EVENTLINE_MASK(event)) >>
  1817. D40_EVENTLINE_POS(event);
  1818. if (status != D40_DMA_RUN)
  1819. is_paused = true;
  1820. _exit:
  1821. spin_unlock_irqrestore(&d40c->lock, flags);
  1822. return is_paused;
  1823. }
  1824. static u32 stedma40_residue(struct dma_chan *chan)
  1825. {
  1826. struct d40_chan *d40c =
  1827. container_of(chan, struct d40_chan, chan);
  1828. u32 bytes_left;
  1829. unsigned long flags;
  1830. spin_lock_irqsave(&d40c->lock, flags);
  1831. bytes_left = d40_residue(d40c);
  1832. spin_unlock_irqrestore(&d40c->lock, flags);
  1833. return bytes_left;
  1834. }
  1835. static int
  1836. d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
  1837. struct scatterlist *sg_src, struct scatterlist *sg_dst,
  1838. unsigned int sg_len, dma_addr_t src_dev_addr,
  1839. dma_addr_t dst_dev_addr)
  1840. {
  1841. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1842. struct stedma40_half_channel_info *src_info = &cfg->src_info;
  1843. struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
  1844. int ret;
  1845. ret = d40_log_sg_to_lli(sg_src, sg_len,
  1846. src_dev_addr,
  1847. desc->lli_log.src,
  1848. chan->log_def.lcsp1,
  1849. src_info->data_width,
  1850. dst_info->data_width);
  1851. ret = d40_log_sg_to_lli(sg_dst, sg_len,
  1852. dst_dev_addr,
  1853. desc->lli_log.dst,
  1854. chan->log_def.lcsp3,
  1855. dst_info->data_width,
  1856. src_info->data_width);
  1857. return ret < 0 ? ret : 0;
  1858. }
  1859. static int
  1860. d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
  1861. struct scatterlist *sg_src, struct scatterlist *sg_dst,
  1862. unsigned int sg_len, dma_addr_t src_dev_addr,
  1863. dma_addr_t dst_dev_addr)
  1864. {
  1865. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1866. struct stedma40_half_channel_info *src_info = &cfg->src_info;
  1867. struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
  1868. unsigned long flags = 0;
  1869. int ret;
  1870. if (desc->cyclic)
  1871. flags |= LLI_CYCLIC | LLI_TERM_INT;
  1872. ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
  1873. desc->lli_phy.src,
  1874. virt_to_phys(desc->lli_phy.src),
  1875. chan->src_def_cfg,
  1876. src_info, dst_info, flags);
  1877. ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
  1878. desc->lli_phy.dst,
  1879. virt_to_phys(desc->lli_phy.dst),
  1880. chan->dst_def_cfg,
  1881. dst_info, src_info, flags);
  1882. dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
  1883. desc->lli_pool.size, DMA_TO_DEVICE);
  1884. return ret < 0 ? ret : 0;
  1885. }
  1886. static struct d40_desc *
  1887. d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
  1888. unsigned int sg_len, unsigned long dma_flags)
  1889. {
  1890. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1891. struct d40_desc *desc;
  1892. int ret;
  1893. desc = d40_desc_get(chan);
  1894. if (!desc)
  1895. return NULL;
  1896. desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
  1897. cfg->dst_info.data_width);
  1898. if (desc->lli_len < 0) {
  1899. chan_err(chan, "Unaligned size\n");
  1900. goto err;
  1901. }
  1902. ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
  1903. if (ret < 0) {
  1904. chan_err(chan, "Could not allocate lli\n");
  1905. goto err;
  1906. }
  1907. desc->lli_current = 0;
  1908. desc->txd.flags = dma_flags;
  1909. desc->txd.tx_submit = d40_tx_submit;
  1910. dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
  1911. return desc;
  1912. err:
  1913. d40_desc_free(chan, desc);
  1914. return NULL;
  1915. }
  1916. static dma_addr_t
  1917. d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
  1918. {
  1919. struct stedma40_platform_data *plat = chan->base->plat_data;
  1920. struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
  1921. dma_addr_t addr = 0;
  1922. if (chan->runtime_addr)
  1923. return chan->runtime_addr;
  1924. if (direction == DMA_DEV_TO_MEM)
  1925. addr = plat->dev_rx[cfg->dev_type];
  1926. else if (direction == DMA_MEM_TO_DEV)
  1927. addr = plat->dev_tx[cfg->dev_type];
  1928. return addr;
  1929. }
  1930. static struct dma_async_tx_descriptor *
  1931. d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
  1932. struct scatterlist *sg_dst, unsigned int sg_len,
  1933. enum dma_transfer_direction direction, unsigned long dma_flags)
  1934. {
  1935. struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
  1936. dma_addr_t src_dev_addr = 0;
  1937. dma_addr_t dst_dev_addr = 0;
  1938. struct d40_desc *desc;
  1939. unsigned long flags;
  1940. int ret;
  1941. if (!chan->phy_chan) {
  1942. chan_err(chan, "Cannot prepare unallocated channel\n");
  1943. return NULL;
  1944. }
  1945. spin_lock_irqsave(&chan->lock, flags);
  1946. desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
  1947. if (desc == NULL)
  1948. goto err;
  1949. if (sg_next(&sg_src[sg_len - 1]) == sg_src)
  1950. desc->cyclic = true;
  1951. if (direction != DMA_TRANS_NONE) {
  1952. dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
  1953. if (direction == DMA_DEV_TO_MEM)
  1954. src_dev_addr = dev_addr;
  1955. else if (direction == DMA_MEM_TO_DEV)
  1956. dst_dev_addr = dev_addr;
  1957. }
  1958. if (chan_is_logical(chan))
  1959. ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
  1960. sg_len, src_dev_addr, dst_dev_addr);
  1961. else
  1962. ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
  1963. sg_len, src_dev_addr, dst_dev_addr);
  1964. if (ret) {
  1965. chan_err(chan, "Failed to prepare %s sg job: %d\n",
  1966. chan_is_logical(chan) ? "log" : "phy", ret);
  1967. goto err;
  1968. }
  1969. /*
  1970. * add descriptor to the prepare queue in order to be able
  1971. * to free them later in terminate_all
  1972. */
  1973. list_add_tail(&desc->node, &chan->prepare_queue);
  1974. spin_unlock_irqrestore(&chan->lock, flags);
  1975. return &desc->txd;
  1976. err:
  1977. if (desc)
  1978. d40_desc_free(chan, desc);
  1979. spin_unlock_irqrestore(&chan->lock, flags);
  1980. return NULL;
  1981. }
  1982. bool stedma40_filter(struct dma_chan *chan, void *data)
  1983. {
  1984. struct stedma40_chan_cfg *info = data;
  1985. struct d40_chan *d40c =
  1986. container_of(chan, struct d40_chan, chan);
  1987. int err;
  1988. if (data) {
  1989. err = d40_validate_conf(d40c, info);
  1990. if (!err)
  1991. d40c->dma_cfg = *info;
  1992. } else
  1993. err = d40_config_memcpy(d40c);
  1994. if (!err)
  1995. d40c->configured = true;
  1996. return err == 0;
  1997. }
  1998. EXPORT_SYMBOL(stedma40_filter);
  1999. static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
  2000. {
  2001. bool realtime = d40c->dma_cfg.realtime;
  2002. bool highprio = d40c->dma_cfg.high_priority;
  2003. u32 rtreg;
  2004. u32 event = D40_TYPE_TO_EVENT(dev_type);
  2005. u32 group = D40_TYPE_TO_GROUP(dev_type);
  2006. u32 bit = 1 << event;
  2007. u32 prioreg;
  2008. struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
  2009. rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
  2010. /*
  2011. * Due to a hardware bug, in some cases a logical channel triggered by
  2012. * a high priority destination event line can generate extra packet
  2013. * transactions.
  2014. *
  2015. * The workaround is to not set the high priority level for the
  2016. * destination event lines that trigger logical channels.
  2017. */
  2018. if (!src && chan_is_logical(d40c))
  2019. highprio = false;
  2020. prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
  2021. /* Destination event lines are stored in the upper halfword */
  2022. if (!src)
  2023. bit <<= 16;
  2024. writel(bit, d40c->base->virtbase + prioreg + group * 4);
  2025. writel(bit, d40c->base->virtbase + rtreg + group * 4);
  2026. }
  2027. static void d40_set_prio_realtime(struct d40_chan *d40c)
  2028. {
  2029. if (d40c->base->rev < 3)
  2030. return;
  2031. if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
  2032. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  2033. __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
  2034. if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
  2035. (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
  2036. __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
  2037. }
  2038. #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
  2039. #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
  2040. #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
  2041. #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
  2042. static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
  2043. struct of_dma *ofdma)
  2044. {
  2045. struct stedma40_chan_cfg cfg;
  2046. dma_cap_mask_t cap;
  2047. u32 flags;
  2048. memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
  2049. dma_cap_zero(cap);
  2050. dma_cap_set(DMA_SLAVE, cap);
  2051. cfg.dev_type = dma_spec->args[0];
  2052. flags = dma_spec->args[2];
  2053. switch (D40_DT_FLAGS_MODE(flags)) {
  2054. case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
  2055. case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
  2056. }
  2057. switch (D40_DT_FLAGS_DIR(flags)) {
  2058. case 0:
  2059. cfg.dir = STEDMA40_MEM_TO_PERIPH;
  2060. cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
  2061. break;
  2062. case 1:
  2063. cfg.dir = STEDMA40_PERIPH_TO_MEM;
  2064. cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
  2065. break;
  2066. }
  2067. if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
  2068. cfg.phy_channel = dma_spec->args[1];
  2069. cfg.use_fixed_channel = true;
  2070. }
  2071. return dma_request_channel(cap, stedma40_filter, &cfg);
  2072. }
  2073. /* DMA ENGINE functions */
  2074. static int d40_alloc_chan_resources(struct dma_chan *chan)
  2075. {
  2076. int err;
  2077. unsigned long flags;
  2078. struct d40_chan *d40c =
  2079. container_of(chan, struct d40_chan, chan);
  2080. bool is_free_phy;
  2081. spin_lock_irqsave(&d40c->lock, flags);
  2082. dma_cookie_init(chan);
  2083. /* If no dma configuration is set use default configuration (memcpy) */
  2084. if (!d40c->configured) {
  2085. err = d40_config_memcpy(d40c);
  2086. if (err) {
  2087. chan_err(d40c, "Failed to configure memcpy channel\n");
  2088. goto fail;
  2089. }
  2090. }
  2091. err = d40_allocate_channel(d40c, &is_free_phy);
  2092. if (err) {
  2093. chan_err(d40c, "Failed to allocate channel\n");
  2094. d40c->configured = false;
  2095. goto fail;
  2096. }
  2097. pm_runtime_get_sync(d40c->base->dev);
  2098. d40_set_prio_realtime(d40c);
  2099. if (chan_is_logical(d40c)) {
  2100. if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
  2101. d40c->lcpa = d40c->base->lcpa_base +
  2102. d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
  2103. else
  2104. d40c->lcpa = d40c->base->lcpa_base +
  2105. d40c->dma_cfg.dev_type *
  2106. D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
  2107. /* Unmask the Global Interrupt Mask. */
  2108. d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
  2109. d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
  2110. }
  2111. dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
  2112. chan_is_logical(d40c) ? "logical" : "physical",
  2113. d40c->phy_chan->num,
  2114. d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
  2115. /*
  2116. * Only write channel configuration to the DMA if the physical
  2117. * resource is free. In case of multiple logical channels
  2118. * on the same physical resource, only the first write is necessary.
  2119. */
  2120. if (is_free_phy)
  2121. d40_config_write(d40c);
  2122. fail:
  2123. pm_runtime_mark_last_busy(d40c->base->dev);
  2124. pm_runtime_put_autosuspend(d40c->base->dev);
  2125. spin_unlock_irqrestore(&d40c->lock, flags);
  2126. return err;
  2127. }
  2128. static void d40_free_chan_resources(struct dma_chan *chan)
  2129. {
  2130. struct d40_chan *d40c =
  2131. container_of(chan, struct d40_chan, chan);
  2132. int err;
  2133. unsigned long flags;
  2134. if (d40c->phy_chan == NULL) {
  2135. chan_err(d40c, "Cannot free unallocated channel\n");
  2136. return;
  2137. }
  2138. spin_lock_irqsave(&d40c->lock, flags);
  2139. err = d40_free_dma(d40c);
  2140. if (err)
  2141. chan_err(d40c, "Failed to free channel\n");
  2142. spin_unlock_irqrestore(&d40c->lock, flags);
  2143. }
  2144. static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
  2145. dma_addr_t dst,
  2146. dma_addr_t src,
  2147. size_t size,
  2148. unsigned long dma_flags)
  2149. {
  2150. struct scatterlist dst_sg;
  2151. struct scatterlist src_sg;
  2152. sg_init_table(&dst_sg, 1);
  2153. sg_init_table(&src_sg, 1);
  2154. sg_dma_address(&dst_sg) = dst;
  2155. sg_dma_address(&src_sg) = src;
  2156. sg_dma_len(&dst_sg) = size;
  2157. sg_dma_len(&src_sg) = size;
  2158. return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
  2159. }
  2160. static struct dma_async_tx_descriptor *
  2161. d40_prep_memcpy_sg(struct dma_chan *chan,
  2162. struct scatterlist *dst_sg, unsigned int dst_nents,
  2163. struct scatterlist *src_sg, unsigned int src_nents,
  2164. unsigned long dma_flags)
  2165. {
  2166. if (dst_nents != src_nents)
  2167. return NULL;
  2168. return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
  2169. }
  2170. static struct dma_async_tx_descriptor *
  2171. d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  2172. unsigned int sg_len, enum dma_transfer_direction direction,
  2173. unsigned long dma_flags, void *context)
  2174. {
  2175. if (!is_slave_direction(direction))
  2176. return NULL;
  2177. return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
  2178. }
  2179. static struct dma_async_tx_descriptor *
  2180. dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
  2181. size_t buf_len, size_t period_len,
  2182. enum dma_transfer_direction direction, unsigned long flags,
  2183. void *context)
  2184. {
  2185. unsigned int periods = buf_len / period_len;
  2186. struct dma_async_tx_descriptor *txd;
  2187. struct scatterlist *sg;
  2188. int i;
  2189. sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
  2190. for (i = 0; i < periods; i++) {
  2191. sg_dma_address(&sg[i]) = dma_addr;
  2192. sg_dma_len(&sg[i]) = period_len;
  2193. dma_addr += period_len;
  2194. }
  2195. sg[periods].offset = 0;
  2196. sg_dma_len(&sg[periods]) = 0;
  2197. sg[periods].page_link =
  2198. ((unsigned long)sg | 0x01) & ~0x02;
  2199. txd = d40_prep_sg(chan, sg, sg, periods, direction,
  2200. DMA_PREP_INTERRUPT);
  2201. kfree(sg);
  2202. return txd;
  2203. }
  2204. static enum dma_status d40_tx_status(struct dma_chan *chan,
  2205. dma_cookie_t cookie,
  2206. struct dma_tx_state *txstate)
  2207. {
  2208. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  2209. enum dma_status ret;
  2210. if (d40c->phy_chan == NULL) {
  2211. chan_err(d40c, "Cannot read status of unallocated channel\n");
  2212. return -EINVAL;
  2213. }
  2214. ret = dma_cookie_status(chan, cookie, txstate);
  2215. if (ret != DMA_SUCCESS)
  2216. dma_set_residue(txstate, stedma40_residue(chan));
  2217. if (d40_is_paused(d40c))
  2218. ret = DMA_PAUSED;
  2219. return ret;
  2220. }
  2221. static void d40_issue_pending(struct dma_chan *chan)
  2222. {
  2223. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  2224. unsigned long flags;
  2225. if (d40c->phy_chan == NULL) {
  2226. chan_err(d40c, "Channel is not allocated!\n");
  2227. return;
  2228. }
  2229. spin_lock_irqsave(&d40c->lock, flags);
  2230. list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
  2231. /* Busy means that queued jobs are already being processed */
  2232. if (!d40c->busy)
  2233. (void) d40_queue_start(d40c);
  2234. spin_unlock_irqrestore(&d40c->lock, flags);
  2235. }
  2236. static void d40_terminate_all(struct dma_chan *chan)
  2237. {
  2238. unsigned long flags;
  2239. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  2240. int ret;
  2241. spin_lock_irqsave(&d40c->lock, flags);
  2242. pm_runtime_get_sync(d40c->base->dev);
  2243. ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
  2244. if (ret)
  2245. chan_err(d40c, "Failed to stop channel\n");
  2246. d40_term_all(d40c);
  2247. pm_runtime_mark_last_busy(d40c->base->dev);
  2248. pm_runtime_put_autosuspend(d40c->base->dev);
  2249. if (d40c->busy) {
  2250. pm_runtime_mark_last_busy(d40c->base->dev);
  2251. pm_runtime_put_autosuspend(d40c->base->dev);
  2252. }
  2253. d40c->busy = false;
  2254. spin_unlock_irqrestore(&d40c->lock, flags);
  2255. }
  2256. static int
  2257. dma40_config_to_halfchannel(struct d40_chan *d40c,
  2258. struct stedma40_half_channel_info *info,
  2259. enum dma_slave_buswidth width,
  2260. u32 maxburst)
  2261. {
  2262. enum stedma40_periph_data_width addr_width;
  2263. int psize;
  2264. switch (width) {
  2265. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  2266. addr_width = STEDMA40_BYTE_WIDTH;
  2267. break;
  2268. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  2269. addr_width = STEDMA40_HALFWORD_WIDTH;
  2270. break;
  2271. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  2272. addr_width = STEDMA40_WORD_WIDTH;
  2273. break;
  2274. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  2275. addr_width = STEDMA40_DOUBLEWORD_WIDTH;
  2276. break;
  2277. default:
  2278. dev_err(d40c->base->dev,
  2279. "illegal peripheral address width "
  2280. "requested (%d)\n",
  2281. width);
  2282. return -EINVAL;
  2283. }
  2284. if (chan_is_logical(d40c)) {
  2285. if (maxburst >= 16)
  2286. psize = STEDMA40_PSIZE_LOG_16;
  2287. else if (maxburst >= 8)
  2288. psize = STEDMA40_PSIZE_LOG_8;
  2289. else if (maxburst >= 4)
  2290. psize = STEDMA40_PSIZE_LOG_4;
  2291. else
  2292. psize = STEDMA40_PSIZE_LOG_1;
  2293. } else {
  2294. if (maxburst >= 16)
  2295. psize = STEDMA40_PSIZE_PHY_16;
  2296. else if (maxburst >= 8)
  2297. psize = STEDMA40_PSIZE_PHY_8;
  2298. else if (maxburst >= 4)
  2299. psize = STEDMA40_PSIZE_PHY_4;
  2300. else
  2301. psize = STEDMA40_PSIZE_PHY_1;
  2302. }
  2303. info->data_width = addr_width;
  2304. info->psize = psize;
  2305. info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
  2306. return 0;
  2307. }
  2308. /* Runtime reconfiguration extension */
  2309. static int d40_set_runtime_config(struct dma_chan *chan,
  2310. struct dma_slave_config *config)
  2311. {
  2312. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  2313. struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
  2314. enum dma_slave_buswidth src_addr_width, dst_addr_width;
  2315. dma_addr_t config_addr;
  2316. u32 src_maxburst, dst_maxburst;
  2317. int ret;
  2318. src_addr_width = config->src_addr_width;
  2319. src_maxburst = config->src_maxburst;
  2320. dst_addr_width = config->dst_addr_width;
  2321. dst_maxburst = config->dst_maxburst;
  2322. if (config->direction == DMA_DEV_TO_MEM) {
  2323. dma_addr_t dev_addr_rx =
  2324. d40c->base->plat_data->dev_rx[cfg->dev_type];
  2325. config_addr = config->src_addr;
  2326. if (dev_addr_rx)
  2327. dev_dbg(d40c->base->dev,
  2328. "channel has a pre-wired RX address %08x "
  2329. "overriding with %08x\n",
  2330. dev_addr_rx, config_addr);
  2331. if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
  2332. dev_dbg(d40c->base->dev,
  2333. "channel was not configured for peripheral "
  2334. "to memory transfer (%d) overriding\n",
  2335. cfg->dir);
  2336. cfg->dir = STEDMA40_PERIPH_TO_MEM;
  2337. /* Configure the memory side */
  2338. if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  2339. dst_addr_width = src_addr_width;
  2340. if (dst_maxburst == 0)
  2341. dst_maxburst = src_maxburst;
  2342. } else if (config->direction == DMA_MEM_TO_DEV) {
  2343. dma_addr_t dev_addr_tx =
  2344. d40c->base->plat_data->dev_tx[cfg->dev_type];
  2345. config_addr = config->dst_addr;
  2346. if (dev_addr_tx)
  2347. dev_dbg(d40c->base->dev,
  2348. "channel has a pre-wired TX address %08x "
  2349. "overriding with %08x\n",
  2350. dev_addr_tx, config_addr);
  2351. if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
  2352. dev_dbg(d40c->base->dev,
  2353. "channel was not configured for memory "
  2354. "to peripheral transfer (%d) overriding\n",
  2355. cfg->dir);
  2356. cfg->dir = STEDMA40_MEM_TO_PERIPH;
  2357. /* Configure the memory side */
  2358. if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  2359. src_addr_width = dst_addr_width;
  2360. if (src_maxburst == 0)
  2361. src_maxburst = dst_maxburst;
  2362. } else {
  2363. dev_err(d40c->base->dev,
  2364. "unrecognized channel direction %d\n",
  2365. config->direction);
  2366. return -EINVAL;
  2367. }
  2368. if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
  2369. dev_err(d40c->base->dev,
  2370. "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
  2371. src_maxburst,
  2372. src_addr_width,
  2373. dst_maxburst,
  2374. dst_addr_width);
  2375. return -EINVAL;
  2376. }
  2377. if (src_maxburst > 16) {
  2378. src_maxburst = 16;
  2379. dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
  2380. } else if (dst_maxburst > 16) {
  2381. dst_maxburst = 16;
  2382. src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
  2383. }
  2384. ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
  2385. src_addr_width,
  2386. src_maxburst);
  2387. if (ret)
  2388. return ret;
  2389. ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
  2390. dst_addr_width,
  2391. dst_maxburst);
  2392. if (ret)
  2393. return ret;
  2394. /* Fill in register values */
  2395. if (chan_is_logical(d40c))
  2396. d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
  2397. else
  2398. d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
  2399. /* These settings will take precedence later */
  2400. d40c->runtime_addr = config_addr;
  2401. d40c->runtime_direction = config->direction;
  2402. dev_dbg(d40c->base->dev,
  2403. "configured channel %s for %s, data width %d/%d, "
  2404. "maxburst %d/%d elements, LE, no flow control\n",
  2405. dma_chan_name(chan),
  2406. (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
  2407. src_addr_width, dst_addr_width,
  2408. src_maxburst, dst_maxburst);
  2409. return 0;
  2410. }
  2411. static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  2412. unsigned long arg)
  2413. {
  2414. struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
  2415. if (d40c->phy_chan == NULL) {
  2416. chan_err(d40c, "Channel is not allocated!\n");
  2417. return -EINVAL;
  2418. }
  2419. switch (cmd) {
  2420. case DMA_TERMINATE_ALL:
  2421. d40_terminate_all(chan);
  2422. return 0;
  2423. case DMA_PAUSE:
  2424. return d40_pause(d40c);
  2425. case DMA_RESUME:
  2426. return d40_resume(d40c);
  2427. case DMA_SLAVE_CONFIG:
  2428. return d40_set_runtime_config(chan,
  2429. (struct dma_slave_config *) arg);
  2430. default:
  2431. break;
  2432. }
  2433. /* Other commands are unimplemented */
  2434. return -ENXIO;
  2435. }
  2436. /* Initialization functions */
  2437. static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
  2438. struct d40_chan *chans, int offset,
  2439. int num_chans)
  2440. {
  2441. int i = 0;
  2442. struct d40_chan *d40c;
  2443. INIT_LIST_HEAD(&dma->channels);
  2444. for (i = offset; i < offset + num_chans; i++) {
  2445. d40c = &chans[i];
  2446. d40c->base = base;
  2447. d40c->chan.device = dma;
  2448. spin_lock_init(&d40c->lock);
  2449. d40c->log_num = D40_PHY_CHAN;
  2450. INIT_LIST_HEAD(&d40c->done);
  2451. INIT_LIST_HEAD(&d40c->active);
  2452. INIT_LIST_HEAD(&d40c->queue);
  2453. INIT_LIST_HEAD(&d40c->pending_queue);
  2454. INIT_LIST_HEAD(&d40c->client);
  2455. INIT_LIST_HEAD(&d40c->prepare_queue);
  2456. tasklet_init(&d40c->tasklet, dma_tasklet,
  2457. (unsigned long) d40c);
  2458. list_add_tail(&d40c->chan.device_node,
  2459. &dma->channels);
  2460. }
  2461. }
  2462. static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
  2463. {
  2464. if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
  2465. dev->device_prep_slave_sg = d40_prep_slave_sg;
  2466. if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
  2467. dev->device_prep_dma_memcpy = d40_prep_memcpy;
  2468. /*
  2469. * This controller can only access address at even
  2470. * 32bit boundaries, i.e. 2^2
  2471. */
  2472. dev->copy_align = 2;
  2473. }
  2474. if (dma_has_cap(DMA_SG, dev->cap_mask))
  2475. dev->device_prep_dma_sg = d40_prep_memcpy_sg;
  2476. if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
  2477. dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
  2478. dev->device_alloc_chan_resources = d40_alloc_chan_resources;
  2479. dev->device_free_chan_resources = d40_free_chan_resources;
  2480. dev->device_issue_pending = d40_issue_pending;
  2481. dev->device_tx_status = d40_tx_status;
  2482. dev->device_control = d40_control;
  2483. dev->dev = base->dev;
  2484. }
  2485. static int __init d40_dmaengine_init(struct d40_base *base,
  2486. int num_reserved_chans)
  2487. {
  2488. int err ;
  2489. d40_chan_init(base, &base->dma_slave, base->log_chans,
  2490. 0, base->num_log_chans);
  2491. dma_cap_zero(base->dma_slave.cap_mask);
  2492. dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
  2493. dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
  2494. d40_ops_init(base, &base->dma_slave);
  2495. err = dma_async_device_register(&base->dma_slave);
  2496. if (err) {
  2497. d40_err(base->dev, "Failed to register slave channels\n");
  2498. goto failure1;
  2499. }
  2500. d40_chan_init(base, &base->dma_memcpy, base->log_chans,
  2501. base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels));
  2502. dma_cap_zero(base->dma_memcpy.cap_mask);
  2503. dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
  2504. dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
  2505. d40_ops_init(base, &base->dma_memcpy);
  2506. err = dma_async_device_register(&base->dma_memcpy);
  2507. if (err) {
  2508. d40_err(base->dev,
  2509. "Failed to regsiter memcpy only channels\n");
  2510. goto failure2;
  2511. }
  2512. d40_chan_init(base, &base->dma_both, base->phy_chans,
  2513. 0, num_reserved_chans);
  2514. dma_cap_zero(base->dma_both.cap_mask);
  2515. dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
  2516. dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
  2517. dma_cap_set(DMA_SG, base->dma_both.cap_mask);
  2518. dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
  2519. d40_ops_init(base, &base->dma_both);
  2520. err = dma_async_device_register(&base->dma_both);
  2521. if (err) {
  2522. d40_err(base->dev,
  2523. "Failed to register logical and physical capable channels\n");
  2524. goto failure3;
  2525. }
  2526. return 0;
  2527. failure3:
  2528. dma_async_device_unregister(&base->dma_memcpy);
  2529. failure2:
  2530. dma_async_device_unregister(&base->dma_slave);
  2531. failure1:
  2532. return err;
  2533. }
  2534. /* Suspend resume functionality */
  2535. #ifdef CONFIG_PM
  2536. static int dma40_pm_suspend(struct device *dev)
  2537. {
  2538. struct platform_device *pdev = to_platform_device(dev);
  2539. struct d40_base *base = platform_get_drvdata(pdev);
  2540. int ret = 0;
  2541. if (base->lcpa_regulator)
  2542. ret = regulator_disable(base->lcpa_regulator);
  2543. return ret;
  2544. }
  2545. static int dma40_runtime_suspend(struct device *dev)
  2546. {
  2547. struct platform_device *pdev = to_platform_device(dev);
  2548. struct d40_base *base = platform_get_drvdata(pdev);
  2549. d40_save_restore_registers(base, true);
  2550. /* Don't disable/enable clocks for v1 due to HW bugs */
  2551. if (base->rev != 1)
  2552. writel_relaxed(base->gcc_pwr_off_mask,
  2553. base->virtbase + D40_DREG_GCC);
  2554. return 0;
  2555. }
  2556. static int dma40_runtime_resume(struct device *dev)
  2557. {
  2558. struct platform_device *pdev = to_platform_device(dev);
  2559. struct d40_base *base = platform_get_drvdata(pdev);
  2560. if (base->initialized)
  2561. d40_save_restore_registers(base, false);
  2562. writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
  2563. base->virtbase + D40_DREG_GCC);
  2564. return 0;
  2565. }
  2566. static int dma40_resume(struct device *dev)
  2567. {
  2568. struct platform_device *pdev = to_platform_device(dev);
  2569. struct d40_base *base = platform_get_drvdata(pdev);
  2570. int ret = 0;
  2571. if (base->lcpa_regulator)
  2572. ret = regulator_enable(base->lcpa_regulator);
  2573. return ret;
  2574. }
  2575. static const struct dev_pm_ops dma40_pm_ops = {
  2576. .suspend = dma40_pm_suspend,
  2577. .runtime_suspend = dma40_runtime_suspend,
  2578. .runtime_resume = dma40_runtime_resume,
  2579. .resume = dma40_resume,
  2580. };
  2581. #define DMA40_PM_OPS (&dma40_pm_ops)
  2582. #else
  2583. #define DMA40_PM_OPS NULL
  2584. #endif
  2585. /* Initialization functions. */
  2586. static int __init d40_phy_res_init(struct d40_base *base)
  2587. {
  2588. int i;
  2589. int num_phy_chans_avail = 0;
  2590. u32 val[2];
  2591. int odd_even_bit = -2;
  2592. int gcc = D40_DREG_GCC_ENA;
  2593. val[0] = readl(base->virtbase + D40_DREG_PRSME);
  2594. val[1] = readl(base->virtbase + D40_DREG_PRSMO);
  2595. for (i = 0; i < base->num_phy_chans; i++) {
  2596. base->phy_res[i].num = i;
  2597. odd_even_bit += 2 * ((i % 2) == 0);
  2598. if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
  2599. /* Mark security only channels as occupied */
  2600. base->phy_res[i].allocated_src = D40_ALLOC_PHY;
  2601. base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
  2602. base->phy_res[i].reserved = true;
  2603. gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
  2604. D40_DREG_GCC_SRC);
  2605. gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
  2606. D40_DREG_GCC_DST);
  2607. } else {
  2608. base->phy_res[i].allocated_src = D40_ALLOC_FREE;
  2609. base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
  2610. base->phy_res[i].reserved = false;
  2611. num_phy_chans_avail++;
  2612. }
  2613. spin_lock_init(&base->phy_res[i].lock);
  2614. }
  2615. /* Mark disabled channels as occupied */
  2616. for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
  2617. int chan = base->plat_data->disabled_channels[i];
  2618. base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
  2619. base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
  2620. base->phy_res[chan].reserved = true;
  2621. gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
  2622. D40_DREG_GCC_SRC);
  2623. gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
  2624. D40_DREG_GCC_DST);
  2625. num_phy_chans_avail--;
  2626. }
  2627. /* Mark soft_lli channels */
  2628. for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
  2629. int chan = base->plat_data->soft_lli_chans[i];
  2630. base->phy_res[chan].use_soft_lli = true;
  2631. }
  2632. dev_info(base->dev, "%d of %d physical DMA channels available\n",
  2633. num_phy_chans_avail, base->num_phy_chans);
  2634. /* Verify settings extended vs standard */
  2635. val[0] = readl(base->virtbase + D40_DREG_PRTYP);
  2636. for (i = 0; i < base->num_phy_chans; i++) {
  2637. if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
  2638. (val[0] & 0x3) != 1)
  2639. dev_info(base->dev,
  2640. "[%s] INFO: channel %d is misconfigured (%d)\n",
  2641. __func__, i, val[0] & 0x3);
  2642. val[0] = val[0] >> 2;
  2643. }
  2644. /*
  2645. * To keep things simple, Enable all clocks initially.
  2646. * The clocks will get managed later post channel allocation.
  2647. * The clocks for the event lines on which reserved channels exists
  2648. * are not managed here.
  2649. */
  2650. writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
  2651. base->gcc_pwr_off_mask = gcc;
  2652. return num_phy_chans_avail;
  2653. }
  2654. static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
  2655. {
  2656. struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
  2657. struct clk *clk = NULL;
  2658. void __iomem *virtbase = NULL;
  2659. struct resource *res = NULL;
  2660. struct d40_base *base = NULL;
  2661. int num_log_chans = 0;
  2662. int num_phy_chans;
  2663. int clk_ret = -EINVAL;
  2664. int i;
  2665. u32 pid;
  2666. u32 cid;
  2667. u8 rev;
  2668. clk = clk_get(&pdev->dev, NULL);
  2669. if (IS_ERR(clk)) {
  2670. d40_err(&pdev->dev, "No matching clock found\n");
  2671. goto failure;
  2672. }
  2673. clk_ret = clk_prepare_enable(clk);
  2674. if (clk_ret) {
  2675. d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
  2676. goto failure;
  2677. }
  2678. /* Get IO for DMAC base address */
  2679. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
  2680. if (!res)
  2681. goto failure;
  2682. if (request_mem_region(res->start, resource_size(res),
  2683. D40_NAME " I/O base") == NULL)
  2684. goto failure;
  2685. virtbase = ioremap(res->start, resource_size(res));
  2686. if (!virtbase)
  2687. goto failure;
  2688. /* This is just a regular AMBA PrimeCell ID actually */
  2689. for (pid = 0, i = 0; i < 4; i++)
  2690. pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
  2691. & 255) << (i * 8);
  2692. for (cid = 0, i = 0; i < 4; i++)
  2693. cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
  2694. & 255) << (i * 8);
  2695. if (cid != AMBA_CID) {
  2696. d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
  2697. goto failure;
  2698. }
  2699. if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
  2700. d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
  2701. AMBA_MANF_BITS(pid),
  2702. AMBA_VENDOR_ST);
  2703. goto failure;
  2704. }
  2705. /*
  2706. * HW revision:
  2707. * DB8500ed has revision 0
  2708. * ? has revision 1
  2709. * DB8500v1 has revision 2
  2710. * DB8500v2 has revision 3
  2711. * AP9540v1 has revision 4
  2712. * DB8540v1 has revision 4
  2713. */
  2714. rev = AMBA_REV_BITS(pid);
  2715. if (rev < 2) {
  2716. d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
  2717. goto failure;
  2718. }
  2719. /* The number of physical channels on this HW */
  2720. if (plat_data->num_of_phy_chans)
  2721. num_phy_chans = plat_data->num_of_phy_chans;
  2722. else
  2723. num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
  2724. num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
  2725. dev_info(&pdev->dev,
  2726. "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
  2727. rev, res->start, num_phy_chans, num_log_chans);
  2728. base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
  2729. (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) *
  2730. sizeof(struct d40_chan), GFP_KERNEL);
  2731. if (base == NULL) {
  2732. d40_err(&pdev->dev, "Out of memory\n");
  2733. goto failure;
  2734. }
  2735. base->rev = rev;
  2736. base->clk = clk;
  2737. base->num_phy_chans = num_phy_chans;
  2738. base->num_log_chans = num_log_chans;
  2739. base->phy_start = res->start;
  2740. base->phy_size = resource_size(res);
  2741. base->virtbase = virtbase;
  2742. base->plat_data = plat_data;
  2743. base->dev = &pdev->dev;
  2744. base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
  2745. base->log_chans = &base->phy_chans[num_phy_chans];
  2746. if (base->plat_data->num_of_phy_chans == 14) {
  2747. base->gen_dmac.backup = d40_backup_regs_v4b;
  2748. base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
  2749. base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
  2750. base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
  2751. base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
  2752. base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
  2753. base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
  2754. base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
  2755. base->gen_dmac.il = il_v4b;
  2756. base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
  2757. base->gen_dmac.init_reg = dma_init_reg_v4b;
  2758. base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
  2759. } else {
  2760. if (base->rev >= 3) {
  2761. base->gen_dmac.backup = d40_backup_regs_v4a;
  2762. base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
  2763. }
  2764. base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
  2765. base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
  2766. base->gen_dmac.realtime_en = D40_DREG_RSEG1;
  2767. base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
  2768. base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
  2769. base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
  2770. base->gen_dmac.il = il_v4a;
  2771. base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
  2772. base->gen_dmac.init_reg = dma_init_reg_v4a;
  2773. base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
  2774. }
  2775. base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
  2776. GFP_KERNEL);
  2777. if (!base->phy_res)
  2778. goto failure;
  2779. base->lookup_phy_chans = kzalloc(num_phy_chans *
  2780. sizeof(struct d40_chan *),
  2781. GFP_KERNEL);
  2782. if (!base->lookup_phy_chans)
  2783. goto failure;
  2784. base->lookup_log_chans = kzalloc(num_log_chans *
  2785. sizeof(struct d40_chan *),
  2786. GFP_KERNEL);
  2787. if (!base->lookup_log_chans)
  2788. goto failure;
  2789. base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
  2790. sizeof(d40_backup_regs_chan),
  2791. GFP_KERNEL);
  2792. if (!base->reg_val_backup_chan)
  2793. goto failure;
  2794. base->lcla_pool.alloc_map =
  2795. kzalloc(num_phy_chans * sizeof(struct d40_desc *)
  2796. * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
  2797. if (!base->lcla_pool.alloc_map)
  2798. goto failure;
  2799. base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
  2800. 0, SLAB_HWCACHE_ALIGN,
  2801. NULL);
  2802. if (base->desc_slab == NULL)
  2803. goto failure;
  2804. return base;
  2805. failure:
  2806. if (!clk_ret)
  2807. clk_disable_unprepare(clk);
  2808. if (!IS_ERR(clk))
  2809. clk_put(clk);
  2810. if (virtbase)
  2811. iounmap(virtbase);
  2812. if (res)
  2813. release_mem_region(res->start,
  2814. resource_size(res));
  2815. if (virtbase)
  2816. iounmap(virtbase);
  2817. if (base) {
  2818. kfree(base->lcla_pool.alloc_map);
  2819. kfree(base->reg_val_backup_chan);
  2820. kfree(base->lookup_log_chans);
  2821. kfree(base->lookup_phy_chans);
  2822. kfree(base->phy_res);
  2823. kfree(base);
  2824. }
  2825. return NULL;
  2826. }
  2827. static void __init d40_hw_init(struct d40_base *base)
  2828. {
  2829. int i;
  2830. u32 prmseo[2] = {0, 0};
  2831. u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
  2832. u32 pcmis = 0;
  2833. u32 pcicr = 0;
  2834. struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
  2835. u32 reg_size = base->gen_dmac.init_reg_size;
  2836. for (i = 0; i < reg_size; i++)
  2837. writel(dma_init_reg[i].val,
  2838. base->virtbase + dma_init_reg[i].reg);
  2839. /* Configure all our dma channels to default settings */
  2840. for (i = 0; i < base->num_phy_chans; i++) {
  2841. activeo[i % 2] = activeo[i % 2] << 2;
  2842. if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
  2843. == D40_ALLOC_PHY) {
  2844. activeo[i % 2] |= 3;
  2845. continue;
  2846. }
  2847. /* Enable interrupt # */
  2848. pcmis = (pcmis << 1) | 1;
  2849. /* Clear interrupt # */
  2850. pcicr = (pcicr << 1) | 1;
  2851. /* Set channel to physical mode */
  2852. prmseo[i % 2] = prmseo[i % 2] << 2;
  2853. prmseo[i % 2] |= 1;
  2854. }
  2855. writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
  2856. writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
  2857. writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
  2858. writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
  2859. /* Write which interrupt to enable */
  2860. writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
  2861. /* Write which interrupt to clear */
  2862. writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
  2863. /* These are __initdata and cannot be accessed after init */
  2864. base->gen_dmac.init_reg = NULL;
  2865. base->gen_dmac.init_reg_size = 0;
  2866. }
  2867. static int __init d40_lcla_allocate(struct d40_base *base)
  2868. {
  2869. struct d40_lcla_pool *pool = &base->lcla_pool;
  2870. unsigned long *page_list;
  2871. int i, j;
  2872. int ret = 0;
  2873. /*
  2874. * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
  2875. * To full fill this hardware requirement without wasting 256 kb
  2876. * we allocate pages until we get an aligned one.
  2877. */
  2878. page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
  2879. GFP_KERNEL);
  2880. if (!page_list) {
  2881. ret = -ENOMEM;
  2882. goto failure;
  2883. }
  2884. /* Calculating how many pages that are required */
  2885. base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
  2886. for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
  2887. page_list[i] = __get_free_pages(GFP_KERNEL,
  2888. base->lcla_pool.pages);
  2889. if (!page_list[i]) {
  2890. d40_err(base->dev, "Failed to allocate %d pages.\n",
  2891. base->lcla_pool.pages);
  2892. for (j = 0; j < i; j++)
  2893. free_pages(page_list[j], base->lcla_pool.pages);
  2894. goto failure;
  2895. }
  2896. if ((virt_to_phys((void *)page_list[i]) &
  2897. (LCLA_ALIGNMENT - 1)) == 0)
  2898. break;
  2899. }
  2900. for (j = 0; j < i; j++)
  2901. free_pages(page_list[j], base->lcla_pool.pages);
  2902. if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
  2903. base->lcla_pool.base = (void *)page_list[i];
  2904. } else {
  2905. /*
  2906. * After many attempts and no succees with finding the correct
  2907. * alignment, try with allocating a big buffer.
  2908. */
  2909. dev_warn(base->dev,
  2910. "[%s] Failed to get %d pages @ 18 bit align.\n",
  2911. __func__, base->lcla_pool.pages);
  2912. base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
  2913. base->num_phy_chans +
  2914. LCLA_ALIGNMENT,
  2915. GFP_KERNEL);
  2916. if (!base->lcla_pool.base_unaligned) {
  2917. ret = -ENOMEM;
  2918. goto failure;
  2919. }
  2920. base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
  2921. LCLA_ALIGNMENT);
  2922. }
  2923. pool->dma_addr = dma_map_single(base->dev, pool->base,
  2924. SZ_1K * base->num_phy_chans,
  2925. DMA_TO_DEVICE);
  2926. if (dma_mapping_error(base->dev, pool->dma_addr)) {
  2927. pool->dma_addr = 0;
  2928. ret = -ENOMEM;
  2929. goto failure;
  2930. }
  2931. writel(virt_to_phys(base->lcla_pool.base),
  2932. base->virtbase + D40_DREG_LCLA);
  2933. failure:
  2934. kfree(page_list);
  2935. return ret;
  2936. }
  2937. static int __init d40_of_probe(struct platform_device *pdev,
  2938. struct device_node *np)
  2939. {
  2940. struct stedma40_platform_data *pdata;
  2941. /*
  2942. * FIXME: Fill in this routine as more support is added.
  2943. * First platform enabled (u8500) doens't need any extra
  2944. * properties to run, so this is fairly sparce currently.
  2945. */
  2946. pdata = devm_kzalloc(&pdev->dev,
  2947. sizeof(struct stedma40_platform_data),
  2948. GFP_KERNEL);
  2949. if (!pdata)
  2950. return -ENOMEM;
  2951. pdev->dev.platform_data = pdata;
  2952. return 0;
  2953. }
  2954. static int __init d40_probe(struct platform_device *pdev)
  2955. {
  2956. struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
  2957. struct device_node *np = pdev->dev.of_node;
  2958. int err;
  2959. int ret = -ENOENT;
  2960. struct d40_base *base = NULL;
  2961. struct resource *res = NULL;
  2962. int num_reserved_chans;
  2963. u32 val;
  2964. if (!plat_data) {
  2965. if (np) {
  2966. if(d40_of_probe(pdev, np)) {
  2967. ret = -ENOMEM;
  2968. goto failure;
  2969. }
  2970. } else {
  2971. d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
  2972. goto failure;
  2973. }
  2974. }
  2975. base = d40_hw_detect_init(pdev);
  2976. if (!base)
  2977. goto failure;
  2978. num_reserved_chans = d40_phy_res_init(base);
  2979. platform_set_drvdata(pdev, base);
  2980. spin_lock_init(&base->interrupt_lock);
  2981. spin_lock_init(&base->execmd_lock);
  2982. /* Get IO for logical channel parameter address */
  2983. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
  2984. if (!res) {
  2985. ret = -ENOENT;
  2986. d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
  2987. goto failure;
  2988. }
  2989. base->lcpa_size = resource_size(res);
  2990. base->phy_lcpa = res->start;
  2991. if (request_mem_region(res->start, resource_size(res),
  2992. D40_NAME " I/O lcpa") == NULL) {
  2993. ret = -EBUSY;
  2994. d40_err(&pdev->dev,
  2995. "Failed to request LCPA region 0x%x-0x%x\n",
  2996. res->start, res->end);
  2997. goto failure;
  2998. }
  2999. /* We make use of ESRAM memory for this. */
  3000. val = readl(base->virtbase + D40_DREG_LCPA);
  3001. if (res->start != val && val != 0) {
  3002. dev_warn(&pdev->dev,
  3003. "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
  3004. __func__, val, res->start);
  3005. } else
  3006. writel(res->start, base->virtbase + D40_DREG_LCPA);
  3007. base->lcpa_base = ioremap(res->start, resource_size(res));
  3008. if (!base->lcpa_base) {
  3009. ret = -ENOMEM;
  3010. d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
  3011. goto failure;
  3012. }
  3013. /* If lcla has to be located in ESRAM we don't need to allocate */
  3014. if (base->plat_data->use_esram_lcla) {
  3015. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  3016. "lcla_esram");
  3017. if (!res) {
  3018. ret = -ENOENT;
  3019. d40_err(&pdev->dev,
  3020. "No \"lcla_esram\" memory resource\n");
  3021. goto failure;
  3022. }
  3023. base->lcla_pool.base = ioremap(res->start,
  3024. resource_size(res));
  3025. if (!base->lcla_pool.base) {
  3026. ret = -ENOMEM;
  3027. d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
  3028. goto failure;
  3029. }
  3030. writel(res->start, base->virtbase + D40_DREG_LCLA);
  3031. } else {
  3032. ret = d40_lcla_allocate(base);
  3033. if (ret) {
  3034. d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
  3035. goto failure;
  3036. }
  3037. }
  3038. spin_lock_init(&base->lcla_pool.lock);
  3039. base->irq = platform_get_irq(pdev, 0);
  3040. ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
  3041. if (ret) {
  3042. d40_err(&pdev->dev, "No IRQ defined\n");
  3043. goto failure;
  3044. }
  3045. pm_runtime_irq_safe(base->dev);
  3046. pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
  3047. pm_runtime_use_autosuspend(base->dev);
  3048. pm_runtime_enable(base->dev);
  3049. pm_runtime_resume(base->dev);
  3050. if (base->plat_data->use_esram_lcla) {
  3051. base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
  3052. if (IS_ERR(base->lcpa_regulator)) {
  3053. d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
  3054. base->lcpa_regulator = NULL;
  3055. goto failure;
  3056. }
  3057. ret = regulator_enable(base->lcpa_regulator);
  3058. if (ret) {
  3059. d40_err(&pdev->dev,
  3060. "Failed to enable lcpa_regulator\n");
  3061. regulator_put(base->lcpa_regulator);
  3062. base->lcpa_regulator = NULL;
  3063. goto failure;
  3064. }
  3065. }
  3066. base->initialized = true;
  3067. err = d40_dmaengine_init(base, num_reserved_chans);
  3068. if (err)
  3069. goto failure;
  3070. base->dev->dma_parms = &base->dma_parms;
  3071. err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
  3072. if (err) {
  3073. d40_err(&pdev->dev, "Failed to set dma max seg size\n");
  3074. goto failure;
  3075. }
  3076. d40_hw_init(base);
  3077. if (np) {
  3078. err = of_dma_controller_register(np, d40_xlate, NULL);
  3079. if (err && err != -ENODEV)
  3080. dev_err(&pdev->dev,
  3081. "could not register of_dma_controller\n");
  3082. }
  3083. dev_info(base->dev, "initialized\n");
  3084. return 0;
  3085. failure:
  3086. if (base) {
  3087. if (base->desc_slab)
  3088. kmem_cache_destroy(base->desc_slab);
  3089. if (base->virtbase)
  3090. iounmap(base->virtbase);
  3091. if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
  3092. iounmap(base->lcla_pool.base);
  3093. base->lcla_pool.base = NULL;
  3094. }
  3095. if (base->lcla_pool.dma_addr)
  3096. dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
  3097. SZ_1K * base->num_phy_chans,
  3098. DMA_TO_DEVICE);
  3099. if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
  3100. free_pages((unsigned long)base->lcla_pool.base,
  3101. base->lcla_pool.pages);
  3102. kfree(base->lcla_pool.base_unaligned);
  3103. if (base->phy_lcpa)
  3104. release_mem_region(base->phy_lcpa,
  3105. base->lcpa_size);
  3106. if (base->phy_start)
  3107. release_mem_region(base->phy_start,
  3108. base->phy_size);
  3109. if (base->clk) {
  3110. clk_disable_unprepare(base->clk);
  3111. clk_put(base->clk);
  3112. }
  3113. if (base->lcpa_regulator) {
  3114. regulator_disable(base->lcpa_regulator);
  3115. regulator_put(base->lcpa_regulator);
  3116. }
  3117. kfree(base->lcla_pool.alloc_map);
  3118. kfree(base->lookup_log_chans);
  3119. kfree(base->lookup_phy_chans);
  3120. kfree(base->phy_res);
  3121. kfree(base);
  3122. }
  3123. d40_err(&pdev->dev, "probe failed\n");
  3124. return ret;
  3125. }
  3126. static const struct of_device_id d40_match[] = {
  3127. { .compatible = "stericsson,dma40", },
  3128. {}
  3129. };
  3130. static struct platform_driver d40_driver = {
  3131. .driver = {
  3132. .owner = THIS_MODULE,
  3133. .name = D40_NAME,
  3134. .pm = DMA40_PM_OPS,
  3135. .of_match_table = d40_match,
  3136. },
  3137. };
  3138. static int __init stedma40_init(void)
  3139. {
  3140. return platform_driver_probe(&d40_driver, d40_probe);
  3141. }
  3142. subsys_initcall(stedma40_init);