pl330.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113
  1. /*
  2. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  3. * http://www.samsung.com
  4. *
  5. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  6. * Jaswinder Singh <jassi.brar@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/io.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <linux/string.h>
  19. #include <linux/delay.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/amba/bus.h>
  25. #include <linux/amba/pl330.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/scatterlist.h>
  28. #include <linux/of.h>
  29. #include "dmaengine.h"
  30. #define PL330_MAX_CHAN 8
  31. #define PL330_MAX_IRQS 32
  32. #define PL330_MAX_PERI 32
  33. enum pl330_srccachectrl {
  34. SCCTRL0, /* Noncacheable and nonbufferable */
  35. SCCTRL1, /* Bufferable only */
  36. SCCTRL2, /* Cacheable, but do not allocate */
  37. SCCTRL3, /* Cacheable and bufferable, but do not allocate */
  38. SINVALID1,
  39. SINVALID2,
  40. SCCTRL6, /* Cacheable write-through, allocate on reads only */
  41. SCCTRL7, /* Cacheable write-back, allocate on reads only */
  42. };
  43. enum pl330_dstcachectrl {
  44. DCCTRL0, /* Noncacheable and nonbufferable */
  45. DCCTRL1, /* Bufferable only */
  46. DCCTRL2, /* Cacheable, but do not allocate */
  47. DCCTRL3, /* Cacheable and bufferable, but do not allocate */
  48. DINVALID1, /* AWCACHE = 0x1000 */
  49. DINVALID2,
  50. DCCTRL6, /* Cacheable write-through, allocate on writes only */
  51. DCCTRL7, /* Cacheable write-back, allocate on writes only */
  52. };
  53. enum pl330_byteswap {
  54. SWAP_NO,
  55. SWAP_2,
  56. SWAP_4,
  57. SWAP_8,
  58. SWAP_16,
  59. };
  60. enum pl330_reqtype {
  61. MEMTOMEM,
  62. MEMTODEV,
  63. DEVTOMEM,
  64. DEVTODEV,
  65. };
  66. /* Register and Bit field Definitions */
  67. #define DS 0x0
  68. #define DS_ST_STOP 0x0
  69. #define DS_ST_EXEC 0x1
  70. #define DS_ST_CMISS 0x2
  71. #define DS_ST_UPDTPC 0x3
  72. #define DS_ST_WFE 0x4
  73. #define DS_ST_ATBRR 0x5
  74. #define DS_ST_QBUSY 0x6
  75. #define DS_ST_WFP 0x7
  76. #define DS_ST_KILL 0x8
  77. #define DS_ST_CMPLT 0x9
  78. #define DS_ST_FLTCMP 0xe
  79. #define DS_ST_FAULT 0xf
  80. #define DPC 0x4
  81. #define INTEN 0x20
  82. #define ES 0x24
  83. #define INTSTATUS 0x28
  84. #define INTCLR 0x2c
  85. #define FSM 0x30
  86. #define FSC 0x34
  87. #define FTM 0x38
  88. #define _FTC 0x40
  89. #define FTC(n) (_FTC + (n)*0x4)
  90. #define _CS 0x100
  91. #define CS(n) (_CS + (n)*0x8)
  92. #define CS_CNS (1 << 21)
  93. #define _CPC 0x104
  94. #define CPC(n) (_CPC + (n)*0x8)
  95. #define _SA 0x400
  96. #define SA(n) (_SA + (n)*0x20)
  97. #define _DA 0x404
  98. #define DA(n) (_DA + (n)*0x20)
  99. #define _CC 0x408
  100. #define CC(n) (_CC + (n)*0x20)
  101. #define CC_SRCINC (1 << 0)
  102. #define CC_DSTINC (1 << 14)
  103. #define CC_SRCPRI (1 << 8)
  104. #define CC_DSTPRI (1 << 22)
  105. #define CC_SRCNS (1 << 9)
  106. #define CC_DSTNS (1 << 23)
  107. #define CC_SRCIA (1 << 10)
  108. #define CC_DSTIA (1 << 24)
  109. #define CC_SRCBRSTLEN_SHFT 4
  110. #define CC_DSTBRSTLEN_SHFT 18
  111. #define CC_SRCBRSTSIZE_SHFT 1
  112. #define CC_DSTBRSTSIZE_SHFT 15
  113. #define CC_SRCCCTRL_SHFT 11
  114. #define CC_SRCCCTRL_MASK 0x7
  115. #define CC_DSTCCTRL_SHFT 25
  116. #define CC_DRCCCTRL_MASK 0x7
  117. #define CC_SWAP_SHFT 28
  118. #define _LC0 0x40c
  119. #define LC0(n) (_LC0 + (n)*0x20)
  120. #define _LC1 0x410
  121. #define LC1(n) (_LC1 + (n)*0x20)
  122. #define DBGSTATUS 0xd00
  123. #define DBG_BUSY (1 << 0)
  124. #define DBGCMD 0xd04
  125. #define DBGINST0 0xd08
  126. #define DBGINST1 0xd0c
  127. #define CR0 0xe00
  128. #define CR1 0xe04
  129. #define CR2 0xe08
  130. #define CR3 0xe0c
  131. #define CR4 0xe10
  132. #define CRD 0xe14
  133. #define PERIPH_ID 0xfe0
  134. #define PERIPH_REV_SHIFT 20
  135. #define PERIPH_REV_MASK 0xf
  136. #define PERIPH_REV_R0P0 0
  137. #define PERIPH_REV_R1P0 1
  138. #define PERIPH_REV_R1P1 2
  139. #define PCELL_ID 0xff0
  140. #define CR0_PERIPH_REQ_SET (1 << 0)
  141. #define CR0_BOOT_EN_SET (1 << 1)
  142. #define CR0_BOOT_MAN_NS (1 << 2)
  143. #define CR0_NUM_CHANS_SHIFT 4
  144. #define CR0_NUM_CHANS_MASK 0x7
  145. #define CR0_NUM_PERIPH_SHIFT 12
  146. #define CR0_NUM_PERIPH_MASK 0x1f
  147. #define CR0_NUM_EVENTS_SHIFT 17
  148. #define CR0_NUM_EVENTS_MASK 0x1f
  149. #define CR1_ICACHE_LEN_SHIFT 0
  150. #define CR1_ICACHE_LEN_MASK 0x7
  151. #define CR1_NUM_ICACHELINES_SHIFT 4
  152. #define CR1_NUM_ICACHELINES_MASK 0xf
  153. #define CRD_DATA_WIDTH_SHIFT 0
  154. #define CRD_DATA_WIDTH_MASK 0x7
  155. #define CRD_WR_CAP_SHIFT 4
  156. #define CRD_WR_CAP_MASK 0x7
  157. #define CRD_WR_Q_DEP_SHIFT 8
  158. #define CRD_WR_Q_DEP_MASK 0xf
  159. #define CRD_RD_CAP_SHIFT 12
  160. #define CRD_RD_CAP_MASK 0x7
  161. #define CRD_RD_Q_DEP_SHIFT 16
  162. #define CRD_RD_Q_DEP_MASK 0xf
  163. #define CRD_DATA_BUFF_SHIFT 20
  164. #define CRD_DATA_BUFF_MASK 0x3ff
  165. #define PART 0x330
  166. #define DESIGNER 0x41
  167. #define REVISION 0x0
  168. #define INTEG_CFG 0x0
  169. #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
  170. #define PCELL_ID_VAL 0xb105f00d
  171. #define PL330_STATE_STOPPED (1 << 0)
  172. #define PL330_STATE_EXECUTING (1 << 1)
  173. #define PL330_STATE_WFE (1 << 2)
  174. #define PL330_STATE_FAULTING (1 << 3)
  175. #define PL330_STATE_COMPLETING (1 << 4)
  176. #define PL330_STATE_WFP (1 << 5)
  177. #define PL330_STATE_KILLING (1 << 6)
  178. #define PL330_STATE_FAULT_COMPLETING (1 << 7)
  179. #define PL330_STATE_CACHEMISS (1 << 8)
  180. #define PL330_STATE_UPDTPC (1 << 9)
  181. #define PL330_STATE_ATBARRIER (1 << 10)
  182. #define PL330_STATE_QUEUEBUSY (1 << 11)
  183. #define PL330_STATE_INVALID (1 << 15)
  184. #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
  185. | PL330_STATE_WFE | PL330_STATE_FAULTING)
  186. #define CMD_DMAADDH 0x54
  187. #define CMD_DMAEND 0x00
  188. #define CMD_DMAFLUSHP 0x35
  189. #define CMD_DMAGO 0xa0
  190. #define CMD_DMALD 0x04
  191. #define CMD_DMALDP 0x25
  192. #define CMD_DMALP 0x20
  193. #define CMD_DMALPEND 0x28
  194. #define CMD_DMAKILL 0x01
  195. #define CMD_DMAMOV 0xbc
  196. #define CMD_DMANOP 0x18
  197. #define CMD_DMARMB 0x12
  198. #define CMD_DMASEV 0x34
  199. #define CMD_DMAST 0x08
  200. #define CMD_DMASTP 0x29
  201. #define CMD_DMASTZ 0x0c
  202. #define CMD_DMAWFE 0x36
  203. #define CMD_DMAWFP 0x30
  204. #define CMD_DMAWMB 0x13
  205. #define SZ_DMAADDH 3
  206. #define SZ_DMAEND 1
  207. #define SZ_DMAFLUSHP 2
  208. #define SZ_DMALD 1
  209. #define SZ_DMALDP 2
  210. #define SZ_DMALP 2
  211. #define SZ_DMALPEND 2
  212. #define SZ_DMAKILL 1
  213. #define SZ_DMAMOV 6
  214. #define SZ_DMANOP 1
  215. #define SZ_DMARMB 1
  216. #define SZ_DMASEV 2
  217. #define SZ_DMAST 1
  218. #define SZ_DMASTP 2
  219. #define SZ_DMASTZ 1
  220. #define SZ_DMAWFE 2
  221. #define SZ_DMAWFP 2
  222. #define SZ_DMAWMB 1
  223. #define SZ_DMAGO 6
  224. #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
  225. #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
  226. #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
  227. #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
  228. /*
  229. * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
  230. * at 1byte/burst for P<->M and M<->M respectively.
  231. * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
  232. * should be enough for P<->M and M<->M respectively.
  233. */
  234. #define MCODE_BUFF_PER_REQ 256
  235. /* If the _pl330_req is available to the client */
  236. #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
  237. /* Use this _only_ to wait on transient states */
  238. #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
  239. #ifdef PL330_DEBUG_MCGEN
  240. static unsigned cmd_line;
  241. #define PL330_DBGCMD_DUMP(off, x...) do { \
  242. printk("%x:", cmd_line); \
  243. printk(x); \
  244. cmd_line += off; \
  245. } while (0)
  246. #define PL330_DBGMC_START(addr) (cmd_line = addr)
  247. #else
  248. #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
  249. #define PL330_DBGMC_START(addr) do {} while (0)
  250. #endif
  251. /* The number of default descriptors */
  252. #define NR_DEFAULT_DESC 16
  253. /* Populated by the PL330 core driver for DMA API driver's info */
  254. struct pl330_config {
  255. u32 periph_id;
  256. u32 pcell_id;
  257. #define DMAC_MODE_NS (1 << 0)
  258. unsigned int mode;
  259. unsigned int data_bus_width:10; /* In number of bits */
  260. unsigned int data_buf_dep:10;
  261. unsigned int num_chan:4;
  262. unsigned int num_peri:6;
  263. u32 peri_ns;
  264. unsigned int num_events:6;
  265. u32 irq_ns;
  266. };
  267. /* Handle to the DMAC provided to the PL330 core */
  268. struct pl330_info {
  269. /* Owning device */
  270. struct device *dev;
  271. /* Size of MicroCode buffers for each channel. */
  272. unsigned mcbufsz;
  273. /* ioremap'ed address of PL330 registers. */
  274. void __iomem *base;
  275. /* Client can freely use it. */
  276. void *client_data;
  277. /* PL330 core data, Client must not touch it. */
  278. void *pl330_data;
  279. /* Populated by the PL330 core driver during pl330_add */
  280. struct pl330_config pcfg;
  281. /*
  282. * If the DMAC has some reset mechanism, then the
  283. * client may want to provide pointer to the method.
  284. */
  285. void (*dmac_reset)(struct pl330_info *pi);
  286. };
  287. /**
  288. * Request Configuration.
  289. * The PL330 core does not modify this and uses the last
  290. * working configuration if the request doesn't provide any.
  291. *
  292. * The Client may want to provide this info only for the
  293. * first request and a request with new settings.
  294. */
  295. struct pl330_reqcfg {
  296. /* Address Incrementing */
  297. unsigned dst_inc:1;
  298. unsigned src_inc:1;
  299. /*
  300. * For now, the SRC & DST protection levels
  301. * and burst size/length are assumed same.
  302. */
  303. bool nonsecure;
  304. bool privileged;
  305. bool insnaccess;
  306. unsigned brst_len:5;
  307. unsigned brst_size:3; /* in power of 2 */
  308. enum pl330_dstcachectrl dcctl;
  309. enum pl330_srccachectrl scctl;
  310. enum pl330_byteswap swap;
  311. struct pl330_config *pcfg;
  312. };
  313. /*
  314. * One cycle of DMAC operation.
  315. * There may be more than one xfer in a request.
  316. */
  317. struct pl330_xfer {
  318. u32 src_addr;
  319. u32 dst_addr;
  320. /* Size to xfer */
  321. u32 bytes;
  322. /*
  323. * Pointer to next xfer in the list.
  324. * The last xfer in the req must point to NULL.
  325. */
  326. struct pl330_xfer *next;
  327. };
  328. /* The xfer callbacks are made with one of these arguments. */
  329. enum pl330_op_err {
  330. /* The all xfers in the request were success. */
  331. PL330_ERR_NONE,
  332. /* If req aborted due to global error. */
  333. PL330_ERR_ABORT,
  334. /* If req failed due to problem with Channel. */
  335. PL330_ERR_FAIL,
  336. };
  337. /* A request defining Scatter-Gather List ending with NULL xfer. */
  338. struct pl330_req {
  339. enum pl330_reqtype rqtype;
  340. /* Index of peripheral for the xfer. */
  341. unsigned peri:5;
  342. /* Unique token for this xfer, set by the client. */
  343. void *token;
  344. /* Callback to be called after xfer. */
  345. void (*xfer_cb)(void *token, enum pl330_op_err err);
  346. /* If NULL, req will be done at last set parameters. */
  347. struct pl330_reqcfg *cfg;
  348. /* Pointer to first xfer in the request. */
  349. struct pl330_xfer *x;
  350. };
  351. /*
  352. * To know the status of the channel and DMAC, the client
  353. * provides a pointer to this structure. The PL330 core
  354. * fills it with current information.
  355. */
  356. struct pl330_chanstatus {
  357. /*
  358. * If the DMAC engine halted due to some error,
  359. * the client should remove-add DMAC.
  360. */
  361. bool dmac_halted;
  362. /*
  363. * If channel is halted due to some error,
  364. * the client should ABORT/FLUSH and START the channel.
  365. */
  366. bool faulting;
  367. /* Location of last load */
  368. u32 src_addr;
  369. /* Location of last store */
  370. u32 dst_addr;
  371. /*
  372. * Pointer to the currently active req, NULL if channel is
  373. * inactive, even though the requests may be present.
  374. */
  375. struct pl330_req *top_req;
  376. /* Pointer to req waiting second in the queue if any. */
  377. struct pl330_req *wait_req;
  378. };
  379. enum pl330_chan_op {
  380. /* Start the channel */
  381. PL330_OP_START,
  382. /* Abort the active xfer */
  383. PL330_OP_ABORT,
  384. /* Stop xfer and flush queue */
  385. PL330_OP_FLUSH,
  386. };
  387. struct _xfer_spec {
  388. u32 ccr;
  389. struct pl330_req *r;
  390. struct pl330_xfer *x;
  391. };
  392. enum dmamov_dst {
  393. SAR = 0,
  394. CCR,
  395. DAR,
  396. };
  397. enum pl330_dst {
  398. SRC = 0,
  399. DST,
  400. };
  401. enum pl330_cond {
  402. SINGLE,
  403. BURST,
  404. ALWAYS,
  405. };
  406. struct _pl330_req {
  407. u32 mc_bus;
  408. void *mc_cpu;
  409. /* Number of bytes taken to setup MC for the req */
  410. u32 mc_len;
  411. struct pl330_req *r;
  412. /* Hook to attach to DMAC's list of reqs with due callback */
  413. struct list_head rqd;
  414. };
  415. /* ToBeDone for tasklet */
  416. struct _pl330_tbd {
  417. bool reset_dmac;
  418. bool reset_mngr;
  419. u8 reset_chan;
  420. };
  421. /* A DMAC Thread */
  422. struct pl330_thread {
  423. u8 id;
  424. int ev;
  425. /* If the channel is not yet acquired by any client */
  426. bool free;
  427. /* Parent DMAC */
  428. struct pl330_dmac *dmac;
  429. /* Only two at a time */
  430. struct _pl330_req req[2];
  431. /* Index of the last enqueued request */
  432. unsigned lstenq;
  433. /* Index of the last submitted request or -1 if the DMA is stopped */
  434. int req_running;
  435. };
  436. enum pl330_dmac_state {
  437. UNINIT,
  438. INIT,
  439. DYING,
  440. };
  441. /* A DMAC */
  442. struct pl330_dmac {
  443. spinlock_t lock;
  444. /* Holds list of reqs with due callbacks */
  445. struct list_head req_done;
  446. /* Pointer to platform specific stuff */
  447. struct pl330_info *pinfo;
  448. /* Maximum possible events/irqs */
  449. int events[32];
  450. /* BUS address of MicroCode buffer */
  451. u32 mcode_bus;
  452. /* CPU address of MicroCode buffer */
  453. void *mcode_cpu;
  454. /* List of all Channel threads */
  455. struct pl330_thread *channels;
  456. /* Pointer to the MANAGER thread */
  457. struct pl330_thread *manager;
  458. /* To handle bad news in interrupt */
  459. struct tasklet_struct tasks;
  460. struct _pl330_tbd dmac_tbd;
  461. /* State of DMAC operation */
  462. enum pl330_dmac_state state;
  463. };
  464. enum desc_status {
  465. /* In the DMAC pool */
  466. FREE,
  467. /*
  468. * Allocted to some channel during prep_xxx
  469. * Also may be sitting on the work_list.
  470. */
  471. PREP,
  472. /*
  473. * Sitting on the work_list and already submitted
  474. * to the PL330 core. Not more than two descriptors
  475. * of a channel can be BUSY at any time.
  476. */
  477. BUSY,
  478. /*
  479. * Sitting on the channel work_list but xfer done
  480. * by PL330 core
  481. */
  482. DONE,
  483. };
  484. struct dma_pl330_chan {
  485. /* Schedule desc completion */
  486. struct tasklet_struct task;
  487. /* DMA-Engine Channel */
  488. struct dma_chan chan;
  489. /* List of to be xfered descriptors */
  490. struct list_head work_list;
  491. /* Pointer to the DMAC that manages this channel,
  492. * NULL if the channel is available to be acquired.
  493. * As the parent, this DMAC also provides descriptors
  494. * to the channel.
  495. */
  496. struct dma_pl330_dmac *dmac;
  497. /* To protect channel manipulation */
  498. spinlock_t lock;
  499. /* Token of a hardware channel thread of PL330 DMAC
  500. * NULL if the channel is available to be acquired.
  501. */
  502. void *pl330_chid;
  503. /* For D-to-M and M-to-D channels */
  504. int burst_sz; /* the peripheral fifo width */
  505. int burst_len; /* the number of burst */
  506. dma_addr_t fifo_addr;
  507. /* for cyclic capability */
  508. bool cyclic;
  509. };
  510. struct dma_pl330_dmac {
  511. struct pl330_info pif;
  512. /* DMA-Engine Device */
  513. struct dma_device ddma;
  514. /* Pool of descriptors available for the DMAC's channels */
  515. struct list_head desc_pool;
  516. /* To protect desc_pool manipulation */
  517. spinlock_t pool_lock;
  518. /* Peripheral channels connected to this DMAC */
  519. struct dma_pl330_chan *peripherals; /* keep at end */
  520. struct clk *clk;
  521. };
  522. struct dma_pl330_desc {
  523. /* To attach to a queue as child */
  524. struct list_head node;
  525. /* Descriptor for the DMA Engine API */
  526. struct dma_async_tx_descriptor txd;
  527. /* Xfer for PL330 core */
  528. struct pl330_xfer px;
  529. struct pl330_reqcfg rqcfg;
  530. struct pl330_req req;
  531. enum desc_status status;
  532. /* The channel which currently holds this desc */
  533. struct dma_pl330_chan *pchan;
  534. };
  535. static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
  536. {
  537. if (r && r->xfer_cb)
  538. r->xfer_cb(r->token, err);
  539. }
  540. static inline bool _queue_empty(struct pl330_thread *thrd)
  541. {
  542. return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
  543. ? true : false;
  544. }
  545. static inline bool _queue_full(struct pl330_thread *thrd)
  546. {
  547. return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
  548. ? false : true;
  549. }
  550. static inline bool is_manager(struct pl330_thread *thrd)
  551. {
  552. struct pl330_dmac *pl330 = thrd->dmac;
  553. /* MANAGER is indexed at the end */
  554. if (thrd->id == pl330->pinfo->pcfg.num_chan)
  555. return true;
  556. else
  557. return false;
  558. }
  559. /* If manager of the thread is in Non-Secure mode */
  560. static inline bool _manager_ns(struct pl330_thread *thrd)
  561. {
  562. struct pl330_dmac *pl330 = thrd->dmac;
  563. return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
  564. }
  565. static inline u32 get_id(struct pl330_info *pi, u32 off)
  566. {
  567. void __iomem *regs = pi->base;
  568. u32 id = 0;
  569. id |= (readb(regs + off + 0x0) << 0);
  570. id |= (readb(regs + off + 0x4) << 8);
  571. id |= (readb(regs + off + 0x8) << 16);
  572. id |= (readb(regs + off + 0xc) << 24);
  573. return id;
  574. }
  575. static inline u32 get_revision(u32 periph_id)
  576. {
  577. return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
  578. }
  579. static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
  580. enum pl330_dst da, u16 val)
  581. {
  582. if (dry_run)
  583. return SZ_DMAADDH;
  584. buf[0] = CMD_DMAADDH;
  585. buf[0] |= (da << 1);
  586. *((u16 *)&buf[1]) = val;
  587. PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
  588. da == 1 ? "DA" : "SA", val);
  589. return SZ_DMAADDH;
  590. }
  591. static inline u32 _emit_END(unsigned dry_run, u8 buf[])
  592. {
  593. if (dry_run)
  594. return SZ_DMAEND;
  595. buf[0] = CMD_DMAEND;
  596. PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
  597. return SZ_DMAEND;
  598. }
  599. static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
  600. {
  601. if (dry_run)
  602. return SZ_DMAFLUSHP;
  603. buf[0] = CMD_DMAFLUSHP;
  604. peri &= 0x1f;
  605. peri <<= 3;
  606. buf[1] = peri;
  607. PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
  608. return SZ_DMAFLUSHP;
  609. }
  610. static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
  611. {
  612. if (dry_run)
  613. return SZ_DMALD;
  614. buf[0] = CMD_DMALD;
  615. if (cond == SINGLE)
  616. buf[0] |= (0 << 1) | (1 << 0);
  617. else if (cond == BURST)
  618. buf[0] |= (1 << 1) | (1 << 0);
  619. PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
  620. cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
  621. return SZ_DMALD;
  622. }
  623. static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
  624. enum pl330_cond cond, u8 peri)
  625. {
  626. if (dry_run)
  627. return SZ_DMALDP;
  628. buf[0] = CMD_DMALDP;
  629. if (cond == BURST)
  630. buf[0] |= (1 << 1);
  631. peri &= 0x1f;
  632. peri <<= 3;
  633. buf[1] = peri;
  634. PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
  635. cond == SINGLE ? 'S' : 'B', peri >> 3);
  636. return SZ_DMALDP;
  637. }
  638. static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
  639. unsigned loop, u8 cnt)
  640. {
  641. if (dry_run)
  642. return SZ_DMALP;
  643. buf[0] = CMD_DMALP;
  644. if (loop)
  645. buf[0] |= (1 << 1);
  646. cnt--; /* DMAC increments by 1 internally */
  647. buf[1] = cnt;
  648. PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
  649. return SZ_DMALP;
  650. }
  651. struct _arg_LPEND {
  652. enum pl330_cond cond;
  653. bool forever;
  654. unsigned loop;
  655. u8 bjump;
  656. };
  657. static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
  658. const struct _arg_LPEND *arg)
  659. {
  660. enum pl330_cond cond = arg->cond;
  661. bool forever = arg->forever;
  662. unsigned loop = arg->loop;
  663. u8 bjump = arg->bjump;
  664. if (dry_run)
  665. return SZ_DMALPEND;
  666. buf[0] = CMD_DMALPEND;
  667. if (loop)
  668. buf[0] |= (1 << 2);
  669. if (!forever)
  670. buf[0] |= (1 << 4);
  671. if (cond == SINGLE)
  672. buf[0] |= (0 << 1) | (1 << 0);
  673. else if (cond == BURST)
  674. buf[0] |= (1 << 1) | (1 << 0);
  675. buf[1] = bjump;
  676. PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
  677. forever ? "FE" : "END",
  678. cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
  679. loop ? '1' : '0',
  680. bjump);
  681. return SZ_DMALPEND;
  682. }
  683. static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
  684. {
  685. if (dry_run)
  686. return SZ_DMAKILL;
  687. buf[0] = CMD_DMAKILL;
  688. return SZ_DMAKILL;
  689. }
  690. static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
  691. enum dmamov_dst dst, u32 val)
  692. {
  693. if (dry_run)
  694. return SZ_DMAMOV;
  695. buf[0] = CMD_DMAMOV;
  696. buf[1] = dst;
  697. *((u32 *)&buf[2]) = val;
  698. PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
  699. dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
  700. return SZ_DMAMOV;
  701. }
  702. static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
  703. {
  704. if (dry_run)
  705. return SZ_DMANOP;
  706. buf[0] = CMD_DMANOP;
  707. PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
  708. return SZ_DMANOP;
  709. }
  710. static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
  711. {
  712. if (dry_run)
  713. return SZ_DMARMB;
  714. buf[0] = CMD_DMARMB;
  715. PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
  716. return SZ_DMARMB;
  717. }
  718. static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
  719. {
  720. if (dry_run)
  721. return SZ_DMASEV;
  722. buf[0] = CMD_DMASEV;
  723. ev &= 0x1f;
  724. ev <<= 3;
  725. buf[1] = ev;
  726. PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
  727. return SZ_DMASEV;
  728. }
  729. static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
  730. {
  731. if (dry_run)
  732. return SZ_DMAST;
  733. buf[0] = CMD_DMAST;
  734. if (cond == SINGLE)
  735. buf[0] |= (0 << 1) | (1 << 0);
  736. else if (cond == BURST)
  737. buf[0] |= (1 << 1) | (1 << 0);
  738. PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
  739. cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
  740. return SZ_DMAST;
  741. }
  742. static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
  743. enum pl330_cond cond, u8 peri)
  744. {
  745. if (dry_run)
  746. return SZ_DMASTP;
  747. buf[0] = CMD_DMASTP;
  748. if (cond == BURST)
  749. buf[0] |= (1 << 1);
  750. peri &= 0x1f;
  751. peri <<= 3;
  752. buf[1] = peri;
  753. PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
  754. cond == SINGLE ? 'S' : 'B', peri >> 3);
  755. return SZ_DMASTP;
  756. }
  757. static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
  758. {
  759. if (dry_run)
  760. return SZ_DMASTZ;
  761. buf[0] = CMD_DMASTZ;
  762. PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
  763. return SZ_DMASTZ;
  764. }
  765. static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
  766. unsigned invalidate)
  767. {
  768. if (dry_run)
  769. return SZ_DMAWFE;
  770. buf[0] = CMD_DMAWFE;
  771. ev &= 0x1f;
  772. ev <<= 3;
  773. buf[1] = ev;
  774. if (invalidate)
  775. buf[1] |= (1 << 1);
  776. PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
  777. ev >> 3, invalidate ? ", I" : "");
  778. return SZ_DMAWFE;
  779. }
  780. static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
  781. enum pl330_cond cond, u8 peri)
  782. {
  783. if (dry_run)
  784. return SZ_DMAWFP;
  785. buf[0] = CMD_DMAWFP;
  786. if (cond == SINGLE)
  787. buf[0] |= (0 << 1) | (0 << 0);
  788. else if (cond == BURST)
  789. buf[0] |= (1 << 1) | (0 << 0);
  790. else
  791. buf[0] |= (0 << 1) | (1 << 0);
  792. peri &= 0x1f;
  793. peri <<= 3;
  794. buf[1] = peri;
  795. PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
  796. cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
  797. return SZ_DMAWFP;
  798. }
  799. static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
  800. {
  801. if (dry_run)
  802. return SZ_DMAWMB;
  803. buf[0] = CMD_DMAWMB;
  804. PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
  805. return SZ_DMAWMB;
  806. }
  807. struct _arg_GO {
  808. u8 chan;
  809. u32 addr;
  810. unsigned ns;
  811. };
  812. static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
  813. const struct _arg_GO *arg)
  814. {
  815. u8 chan = arg->chan;
  816. u32 addr = arg->addr;
  817. unsigned ns = arg->ns;
  818. if (dry_run)
  819. return SZ_DMAGO;
  820. buf[0] = CMD_DMAGO;
  821. buf[0] |= (ns << 1);
  822. buf[1] = chan & 0x7;
  823. *((u32 *)&buf[2]) = addr;
  824. return SZ_DMAGO;
  825. }
  826. #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
  827. /* Returns Time-Out */
  828. static bool _until_dmac_idle(struct pl330_thread *thrd)
  829. {
  830. void __iomem *regs = thrd->dmac->pinfo->base;
  831. unsigned long loops = msecs_to_loops(5);
  832. do {
  833. /* Until Manager is Idle */
  834. if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
  835. break;
  836. cpu_relax();
  837. } while (--loops);
  838. if (!loops)
  839. return true;
  840. return false;
  841. }
  842. static inline void _execute_DBGINSN(struct pl330_thread *thrd,
  843. u8 insn[], bool as_manager)
  844. {
  845. void __iomem *regs = thrd->dmac->pinfo->base;
  846. u32 val;
  847. val = (insn[0] << 16) | (insn[1] << 24);
  848. if (!as_manager) {
  849. val |= (1 << 0);
  850. val |= (thrd->id << 8); /* Channel Number */
  851. }
  852. writel(val, regs + DBGINST0);
  853. val = *((u32 *)&insn[2]);
  854. writel(val, regs + DBGINST1);
  855. /* If timed out due to halted state-machine */
  856. if (_until_dmac_idle(thrd)) {
  857. dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
  858. return;
  859. }
  860. /* Get going */
  861. writel(0, regs + DBGCMD);
  862. }
  863. /*
  864. * Mark a _pl330_req as free.
  865. * We do it by writing DMAEND as the first instruction
  866. * because no valid request is going to have DMAEND as
  867. * its first instruction to execute.
  868. */
  869. static void mark_free(struct pl330_thread *thrd, int idx)
  870. {
  871. struct _pl330_req *req = &thrd->req[idx];
  872. _emit_END(0, req->mc_cpu);
  873. req->mc_len = 0;
  874. thrd->req_running = -1;
  875. }
  876. static inline u32 _state(struct pl330_thread *thrd)
  877. {
  878. void __iomem *regs = thrd->dmac->pinfo->base;
  879. u32 val;
  880. if (is_manager(thrd))
  881. val = readl(regs + DS) & 0xf;
  882. else
  883. val = readl(regs + CS(thrd->id)) & 0xf;
  884. switch (val) {
  885. case DS_ST_STOP:
  886. return PL330_STATE_STOPPED;
  887. case DS_ST_EXEC:
  888. return PL330_STATE_EXECUTING;
  889. case DS_ST_CMISS:
  890. return PL330_STATE_CACHEMISS;
  891. case DS_ST_UPDTPC:
  892. return PL330_STATE_UPDTPC;
  893. case DS_ST_WFE:
  894. return PL330_STATE_WFE;
  895. case DS_ST_FAULT:
  896. return PL330_STATE_FAULTING;
  897. case DS_ST_ATBRR:
  898. if (is_manager(thrd))
  899. return PL330_STATE_INVALID;
  900. else
  901. return PL330_STATE_ATBARRIER;
  902. case DS_ST_QBUSY:
  903. if (is_manager(thrd))
  904. return PL330_STATE_INVALID;
  905. else
  906. return PL330_STATE_QUEUEBUSY;
  907. case DS_ST_WFP:
  908. if (is_manager(thrd))
  909. return PL330_STATE_INVALID;
  910. else
  911. return PL330_STATE_WFP;
  912. case DS_ST_KILL:
  913. if (is_manager(thrd))
  914. return PL330_STATE_INVALID;
  915. else
  916. return PL330_STATE_KILLING;
  917. case DS_ST_CMPLT:
  918. if (is_manager(thrd))
  919. return PL330_STATE_INVALID;
  920. else
  921. return PL330_STATE_COMPLETING;
  922. case DS_ST_FLTCMP:
  923. if (is_manager(thrd))
  924. return PL330_STATE_INVALID;
  925. else
  926. return PL330_STATE_FAULT_COMPLETING;
  927. default:
  928. return PL330_STATE_INVALID;
  929. }
  930. }
  931. static void _stop(struct pl330_thread *thrd)
  932. {
  933. void __iomem *regs = thrd->dmac->pinfo->base;
  934. u8 insn[6] = {0, 0, 0, 0, 0, 0};
  935. if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
  936. UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
  937. /* Return if nothing needs to be done */
  938. if (_state(thrd) == PL330_STATE_COMPLETING
  939. || _state(thrd) == PL330_STATE_KILLING
  940. || _state(thrd) == PL330_STATE_STOPPED)
  941. return;
  942. _emit_KILL(0, insn);
  943. /* Stop generating interrupts for SEV */
  944. writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
  945. _execute_DBGINSN(thrd, insn, is_manager(thrd));
  946. }
  947. /* Start doing req 'idx' of thread 'thrd' */
  948. static bool _trigger(struct pl330_thread *thrd)
  949. {
  950. void __iomem *regs = thrd->dmac->pinfo->base;
  951. struct _pl330_req *req;
  952. struct pl330_req *r;
  953. struct _arg_GO go;
  954. unsigned ns;
  955. u8 insn[6] = {0, 0, 0, 0, 0, 0};
  956. int idx;
  957. /* Return if already ACTIVE */
  958. if (_state(thrd) != PL330_STATE_STOPPED)
  959. return true;
  960. idx = 1 - thrd->lstenq;
  961. if (!IS_FREE(&thrd->req[idx]))
  962. req = &thrd->req[idx];
  963. else {
  964. idx = thrd->lstenq;
  965. if (!IS_FREE(&thrd->req[idx]))
  966. req = &thrd->req[idx];
  967. else
  968. req = NULL;
  969. }
  970. /* Return if no request */
  971. if (!req || !req->r)
  972. return true;
  973. r = req->r;
  974. if (r->cfg)
  975. ns = r->cfg->nonsecure ? 1 : 0;
  976. else if (readl(regs + CS(thrd->id)) & CS_CNS)
  977. ns = 1;
  978. else
  979. ns = 0;
  980. /* See 'Abort Sources' point-4 at Page 2-25 */
  981. if (_manager_ns(thrd) && !ns)
  982. dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
  983. __func__, __LINE__);
  984. go.chan = thrd->id;
  985. go.addr = req->mc_bus;
  986. go.ns = ns;
  987. _emit_GO(0, insn, &go);
  988. /* Set to generate interrupts for SEV */
  989. writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
  990. /* Only manager can execute GO */
  991. _execute_DBGINSN(thrd, insn, true);
  992. thrd->req_running = idx;
  993. return true;
  994. }
  995. static bool _start(struct pl330_thread *thrd)
  996. {
  997. switch (_state(thrd)) {
  998. case PL330_STATE_FAULT_COMPLETING:
  999. UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
  1000. if (_state(thrd) == PL330_STATE_KILLING)
  1001. UNTIL(thrd, PL330_STATE_STOPPED)
  1002. case PL330_STATE_FAULTING:
  1003. _stop(thrd);
  1004. case PL330_STATE_KILLING:
  1005. case PL330_STATE_COMPLETING:
  1006. UNTIL(thrd, PL330_STATE_STOPPED)
  1007. case PL330_STATE_STOPPED:
  1008. return _trigger(thrd);
  1009. case PL330_STATE_WFP:
  1010. case PL330_STATE_QUEUEBUSY:
  1011. case PL330_STATE_ATBARRIER:
  1012. case PL330_STATE_UPDTPC:
  1013. case PL330_STATE_CACHEMISS:
  1014. case PL330_STATE_EXECUTING:
  1015. return true;
  1016. case PL330_STATE_WFE: /* For RESUME, nothing yet */
  1017. default:
  1018. return false;
  1019. }
  1020. }
  1021. static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
  1022. const struct _xfer_spec *pxs, int cyc)
  1023. {
  1024. int off = 0;
  1025. struct pl330_config *pcfg = pxs->r->cfg->pcfg;
  1026. /* check lock-up free version */
  1027. if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
  1028. while (cyc--) {
  1029. off += _emit_LD(dry_run, &buf[off], ALWAYS);
  1030. off += _emit_ST(dry_run, &buf[off], ALWAYS);
  1031. }
  1032. } else {
  1033. while (cyc--) {
  1034. off += _emit_LD(dry_run, &buf[off], ALWAYS);
  1035. off += _emit_RMB(dry_run, &buf[off]);
  1036. off += _emit_ST(dry_run, &buf[off], ALWAYS);
  1037. off += _emit_WMB(dry_run, &buf[off]);
  1038. }
  1039. }
  1040. return off;
  1041. }
  1042. static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
  1043. const struct _xfer_spec *pxs, int cyc)
  1044. {
  1045. int off = 0;
  1046. while (cyc--) {
  1047. off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
  1048. off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
  1049. off += _emit_ST(dry_run, &buf[off], ALWAYS);
  1050. off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
  1051. }
  1052. return off;
  1053. }
  1054. static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
  1055. const struct _xfer_spec *pxs, int cyc)
  1056. {
  1057. int off = 0;
  1058. while (cyc--) {
  1059. off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
  1060. off += _emit_LD(dry_run, &buf[off], ALWAYS);
  1061. off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
  1062. off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
  1063. }
  1064. return off;
  1065. }
  1066. static int _bursts(unsigned dry_run, u8 buf[],
  1067. const struct _xfer_spec *pxs, int cyc)
  1068. {
  1069. int off = 0;
  1070. switch (pxs->r->rqtype) {
  1071. case MEMTODEV:
  1072. off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
  1073. break;
  1074. case DEVTOMEM:
  1075. off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
  1076. break;
  1077. case MEMTOMEM:
  1078. off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
  1079. break;
  1080. default:
  1081. off += 0x40000000; /* Scare off the Client */
  1082. break;
  1083. }
  1084. return off;
  1085. }
  1086. /* Returns bytes consumed and updates bursts */
  1087. static inline int _loop(unsigned dry_run, u8 buf[],
  1088. unsigned long *bursts, const struct _xfer_spec *pxs)
  1089. {
  1090. int cyc, cycmax, szlp, szlpend, szbrst, off;
  1091. unsigned lcnt0, lcnt1, ljmp0, ljmp1;
  1092. struct _arg_LPEND lpend;
  1093. /* Max iterations possible in DMALP is 256 */
  1094. if (*bursts >= 256*256) {
  1095. lcnt1 = 256;
  1096. lcnt0 = 256;
  1097. cyc = *bursts / lcnt1 / lcnt0;
  1098. } else if (*bursts > 256) {
  1099. lcnt1 = 256;
  1100. lcnt0 = *bursts / lcnt1;
  1101. cyc = 1;
  1102. } else {
  1103. lcnt1 = *bursts;
  1104. lcnt0 = 0;
  1105. cyc = 1;
  1106. }
  1107. szlp = _emit_LP(1, buf, 0, 0);
  1108. szbrst = _bursts(1, buf, pxs, 1);
  1109. lpend.cond = ALWAYS;
  1110. lpend.forever = false;
  1111. lpend.loop = 0;
  1112. lpend.bjump = 0;
  1113. szlpend = _emit_LPEND(1, buf, &lpend);
  1114. if (lcnt0) {
  1115. szlp *= 2;
  1116. szlpend *= 2;
  1117. }
  1118. /*
  1119. * Max bursts that we can unroll due to limit on the
  1120. * size of backward jump that can be encoded in DMALPEND
  1121. * which is 8-bits and hence 255
  1122. */
  1123. cycmax = (255 - (szlp + szlpend)) / szbrst;
  1124. cyc = (cycmax < cyc) ? cycmax : cyc;
  1125. off = 0;
  1126. if (lcnt0) {
  1127. off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
  1128. ljmp0 = off;
  1129. }
  1130. off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
  1131. ljmp1 = off;
  1132. off += _bursts(dry_run, &buf[off], pxs, cyc);
  1133. lpend.cond = ALWAYS;
  1134. lpend.forever = false;
  1135. lpend.loop = 1;
  1136. lpend.bjump = off - ljmp1;
  1137. off += _emit_LPEND(dry_run, &buf[off], &lpend);
  1138. if (lcnt0) {
  1139. lpend.cond = ALWAYS;
  1140. lpend.forever = false;
  1141. lpend.loop = 0;
  1142. lpend.bjump = off - ljmp0;
  1143. off += _emit_LPEND(dry_run, &buf[off], &lpend);
  1144. }
  1145. *bursts = lcnt1 * cyc;
  1146. if (lcnt0)
  1147. *bursts *= lcnt0;
  1148. return off;
  1149. }
  1150. static inline int _setup_loops(unsigned dry_run, u8 buf[],
  1151. const struct _xfer_spec *pxs)
  1152. {
  1153. struct pl330_xfer *x = pxs->x;
  1154. u32 ccr = pxs->ccr;
  1155. unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
  1156. int off = 0;
  1157. while (bursts) {
  1158. c = bursts;
  1159. off += _loop(dry_run, &buf[off], &c, pxs);
  1160. bursts -= c;
  1161. }
  1162. return off;
  1163. }
  1164. static inline int _setup_xfer(unsigned dry_run, u8 buf[],
  1165. const struct _xfer_spec *pxs)
  1166. {
  1167. struct pl330_xfer *x = pxs->x;
  1168. int off = 0;
  1169. /* DMAMOV SAR, x->src_addr */
  1170. off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
  1171. /* DMAMOV DAR, x->dst_addr */
  1172. off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
  1173. /* Setup Loop(s) */
  1174. off += _setup_loops(dry_run, &buf[off], pxs);
  1175. return off;
  1176. }
  1177. /*
  1178. * A req is a sequence of one or more xfer units.
  1179. * Returns the number of bytes taken to setup the MC for the req.
  1180. */
  1181. static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
  1182. unsigned index, struct _xfer_spec *pxs)
  1183. {
  1184. struct _pl330_req *req = &thrd->req[index];
  1185. struct pl330_xfer *x;
  1186. u8 *buf = req->mc_cpu;
  1187. int off = 0;
  1188. PL330_DBGMC_START(req->mc_bus);
  1189. /* DMAMOV CCR, ccr */
  1190. off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
  1191. x = pxs->r->x;
  1192. do {
  1193. /* Error if xfer length is not aligned at burst size */
  1194. if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
  1195. return -EINVAL;
  1196. pxs->x = x;
  1197. off += _setup_xfer(dry_run, &buf[off], pxs);
  1198. x = x->next;
  1199. } while (x);
  1200. /* DMASEV peripheral/event */
  1201. off += _emit_SEV(dry_run, &buf[off], thrd->ev);
  1202. /* DMAEND */
  1203. off += _emit_END(dry_run, &buf[off]);
  1204. return off;
  1205. }
  1206. static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
  1207. {
  1208. u32 ccr = 0;
  1209. if (rqc->src_inc)
  1210. ccr |= CC_SRCINC;
  1211. if (rqc->dst_inc)
  1212. ccr |= CC_DSTINC;
  1213. /* We set same protection levels for Src and DST for now */
  1214. if (rqc->privileged)
  1215. ccr |= CC_SRCPRI | CC_DSTPRI;
  1216. if (rqc->nonsecure)
  1217. ccr |= CC_SRCNS | CC_DSTNS;
  1218. if (rqc->insnaccess)
  1219. ccr |= CC_SRCIA | CC_DSTIA;
  1220. ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
  1221. ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
  1222. ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
  1223. ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
  1224. ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
  1225. ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
  1226. ccr |= (rqc->swap << CC_SWAP_SHFT);
  1227. return ccr;
  1228. }
  1229. static inline bool _is_valid(u32 ccr)
  1230. {
  1231. enum pl330_dstcachectrl dcctl;
  1232. enum pl330_srccachectrl scctl;
  1233. dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
  1234. scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
  1235. if (dcctl == DINVALID1 || dcctl == DINVALID2
  1236. || scctl == SINVALID1 || scctl == SINVALID2)
  1237. return false;
  1238. else
  1239. return true;
  1240. }
  1241. /*
  1242. * Submit a list of xfers after which the client wants notification.
  1243. * Client is not notified after each xfer unit, just once after all
  1244. * xfer units are done or some error occurs.
  1245. */
  1246. static int pl330_submit_req(void *ch_id, struct pl330_req *r)
  1247. {
  1248. struct pl330_thread *thrd = ch_id;
  1249. struct pl330_dmac *pl330;
  1250. struct pl330_info *pi;
  1251. struct _xfer_spec xs;
  1252. unsigned long flags;
  1253. void __iomem *regs;
  1254. unsigned idx;
  1255. u32 ccr;
  1256. int ret = 0;
  1257. /* No Req or Unacquired Channel or DMAC */
  1258. if (!r || !thrd || thrd->free)
  1259. return -EINVAL;
  1260. pl330 = thrd->dmac;
  1261. pi = pl330->pinfo;
  1262. regs = pi->base;
  1263. if (pl330->state == DYING
  1264. || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
  1265. dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
  1266. __func__, __LINE__);
  1267. return -EAGAIN;
  1268. }
  1269. /* If request for non-existing peripheral */
  1270. if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
  1271. dev_info(thrd->dmac->pinfo->dev,
  1272. "%s:%d Invalid peripheral(%u)!\n",
  1273. __func__, __LINE__, r->peri);
  1274. return -EINVAL;
  1275. }
  1276. spin_lock_irqsave(&pl330->lock, flags);
  1277. if (_queue_full(thrd)) {
  1278. ret = -EAGAIN;
  1279. goto xfer_exit;
  1280. }
  1281. /* Prefer Secure Channel */
  1282. if (!_manager_ns(thrd))
  1283. r->cfg->nonsecure = 0;
  1284. else
  1285. r->cfg->nonsecure = 1;
  1286. /* Use last settings, if not provided */
  1287. if (r->cfg)
  1288. ccr = _prepare_ccr(r->cfg);
  1289. else
  1290. ccr = readl(regs + CC(thrd->id));
  1291. /* If this req doesn't have valid xfer settings */
  1292. if (!_is_valid(ccr)) {
  1293. ret = -EINVAL;
  1294. dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
  1295. __func__, __LINE__, ccr);
  1296. goto xfer_exit;
  1297. }
  1298. idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
  1299. xs.ccr = ccr;
  1300. xs.r = r;
  1301. /* First dry run to check if req is acceptable */
  1302. ret = _setup_req(1, thrd, idx, &xs);
  1303. if (ret < 0)
  1304. goto xfer_exit;
  1305. if (ret > pi->mcbufsz / 2) {
  1306. dev_info(thrd->dmac->pinfo->dev,
  1307. "%s:%d Trying increasing mcbufsz\n",
  1308. __func__, __LINE__);
  1309. ret = -ENOMEM;
  1310. goto xfer_exit;
  1311. }
  1312. /* Hook the request */
  1313. thrd->lstenq = idx;
  1314. thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
  1315. thrd->req[idx].r = r;
  1316. ret = 0;
  1317. xfer_exit:
  1318. spin_unlock_irqrestore(&pl330->lock, flags);
  1319. return ret;
  1320. }
  1321. static void pl330_dotask(unsigned long data)
  1322. {
  1323. struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
  1324. struct pl330_info *pi = pl330->pinfo;
  1325. unsigned long flags;
  1326. int i;
  1327. spin_lock_irqsave(&pl330->lock, flags);
  1328. /* The DMAC itself gone nuts */
  1329. if (pl330->dmac_tbd.reset_dmac) {
  1330. pl330->state = DYING;
  1331. /* Reset the manager too */
  1332. pl330->dmac_tbd.reset_mngr = true;
  1333. /* Clear the reset flag */
  1334. pl330->dmac_tbd.reset_dmac = false;
  1335. }
  1336. if (pl330->dmac_tbd.reset_mngr) {
  1337. _stop(pl330->manager);
  1338. /* Reset all channels */
  1339. pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
  1340. /* Clear the reset flag */
  1341. pl330->dmac_tbd.reset_mngr = false;
  1342. }
  1343. for (i = 0; i < pi->pcfg.num_chan; i++) {
  1344. if (pl330->dmac_tbd.reset_chan & (1 << i)) {
  1345. struct pl330_thread *thrd = &pl330->channels[i];
  1346. void __iomem *regs = pi->base;
  1347. enum pl330_op_err err;
  1348. _stop(thrd);
  1349. if (readl(regs + FSC) & (1 << thrd->id))
  1350. err = PL330_ERR_FAIL;
  1351. else
  1352. err = PL330_ERR_ABORT;
  1353. spin_unlock_irqrestore(&pl330->lock, flags);
  1354. _callback(thrd->req[1 - thrd->lstenq].r, err);
  1355. _callback(thrd->req[thrd->lstenq].r, err);
  1356. spin_lock_irqsave(&pl330->lock, flags);
  1357. thrd->req[0].r = NULL;
  1358. thrd->req[1].r = NULL;
  1359. mark_free(thrd, 0);
  1360. mark_free(thrd, 1);
  1361. /* Clear the reset flag */
  1362. pl330->dmac_tbd.reset_chan &= ~(1 << i);
  1363. }
  1364. }
  1365. spin_unlock_irqrestore(&pl330->lock, flags);
  1366. return;
  1367. }
  1368. /* Returns 1 if state was updated, 0 otherwise */
  1369. static int pl330_update(const struct pl330_info *pi)
  1370. {
  1371. struct _pl330_req *rqdone;
  1372. struct pl330_dmac *pl330;
  1373. unsigned long flags;
  1374. void __iomem *regs;
  1375. u32 val;
  1376. int id, ev, ret = 0;
  1377. if (!pi || !pi->pl330_data)
  1378. return 0;
  1379. regs = pi->base;
  1380. pl330 = pi->pl330_data;
  1381. spin_lock_irqsave(&pl330->lock, flags);
  1382. val = readl(regs + FSM) & 0x1;
  1383. if (val)
  1384. pl330->dmac_tbd.reset_mngr = true;
  1385. else
  1386. pl330->dmac_tbd.reset_mngr = false;
  1387. val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
  1388. pl330->dmac_tbd.reset_chan |= val;
  1389. if (val) {
  1390. int i = 0;
  1391. while (i < pi->pcfg.num_chan) {
  1392. if (val & (1 << i)) {
  1393. dev_info(pi->dev,
  1394. "Reset Channel-%d\t CS-%x FTC-%x\n",
  1395. i, readl(regs + CS(i)),
  1396. readl(regs + FTC(i)));
  1397. _stop(&pl330->channels[i]);
  1398. }
  1399. i++;
  1400. }
  1401. }
  1402. /* Check which event happened i.e, thread notified */
  1403. val = readl(regs + ES);
  1404. if (pi->pcfg.num_events < 32
  1405. && val & ~((1 << pi->pcfg.num_events) - 1)) {
  1406. pl330->dmac_tbd.reset_dmac = true;
  1407. dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
  1408. ret = 1;
  1409. goto updt_exit;
  1410. }
  1411. for (ev = 0; ev < pi->pcfg.num_events; ev++) {
  1412. if (val & (1 << ev)) { /* Event occurred */
  1413. struct pl330_thread *thrd;
  1414. u32 inten = readl(regs + INTEN);
  1415. int active;
  1416. /* Clear the event */
  1417. if (inten & (1 << ev))
  1418. writel(1 << ev, regs + INTCLR);
  1419. ret = 1;
  1420. id = pl330->events[ev];
  1421. thrd = &pl330->channels[id];
  1422. active = thrd->req_running;
  1423. if (active == -1) /* Aborted */
  1424. continue;
  1425. rqdone = &thrd->req[active];
  1426. mark_free(thrd, active);
  1427. /* Get going again ASAP */
  1428. _start(thrd);
  1429. /* For now, just make a list of callbacks to be done */
  1430. list_add_tail(&rqdone->rqd, &pl330->req_done);
  1431. }
  1432. }
  1433. /* Now that we are in no hurry, do the callbacks */
  1434. while (!list_empty(&pl330->req_done)) {
  1435. struct pl330_req *r;
  1436. rqdone = container_of(pl330->req_done.next,
  1437. struct _pl330_req, rqd);
  1438. list_del_init(&rqdone->rqd);
  1439. /* Detach the req */
  1440. r = rqdone->r;
  1441. rqdone->r = NULL;
  1442. spin_unlock_irqrestore(&pl330->lock, flags);
  1443. _callback(r, PL330_ERR_NONE);
  1444. spin_lock_irqsave(&pl330->lock, flags);
  1445. }
  1446. updt_exit:
  1447. spin_unlock_irqrestore(&pl330->lock, flags);
  1448. if (pl330->dmac_tbd.reset_dmac
  1449. || pl330->dmac_tbd.reset_mngr
  1450. || pl330->dmac_tbd.reset_chan) {
  1451. ret = 1;
  1452. tasklet_schedule(&pl330->tasks);
  1453. }
  1454. return ret;
  1455. }
  1456. static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
  1457. {
  1458. struct pl330_thread *thrd = ch_id;
  1459. struct pl330_dmac *pl330;
  1460. unsigned long flags;
  1461. int ret = 0, active;
  1462. if (!thrd || thrd->free || thrd->dmac->state == DYING)
  1463. return -EINVAL;
  1464. pl330 = thrd->dmac;
  1465. active = thrd->req_running;
  1466. spin_lock_irqsave(&pl330->lock, flags);
  1467. switch (op) {
  1468. case PL330_OP_FLUSH:
  1469. /* Make sure the channel is stopped */
  1470. _stop(thrd);
  1471. thrd->req[0].r = NULL;
  1472. thrd->req[1].r = NULL;
  1473. mark_free(thrd, 0);
  1474. mark_free(thrd, 1);
  1475. break;
  1476. case PL330_OP_ABORT:
  1477. /* Make sure the channel is stopped */
  1478. _stop(thrd);
  1479. /* ABORT is only for the active req */
  1480. if (active == -1)
  1481. break;
  1482. thrd->req[active].r = NULL;
  1483. mark_free(thrd, active);
  1484. /* Start the next */
  1485. case PL330_OP_START:
  1486. if ((active == -1) && !_start(thrd))
  1487. ret = -EIO;
  1488. break;
  1489. default:
  1490. ret = -EINVAL;
  1491. }
  1492. spin_unlock_irqrestore(&pl330->lock, flags);
  1493. return ret;
  1494. }
  1495. /* Reserve an event */
  1496. static inline int _alloc_event(struct pl330_thread *thrd)
  1497. {
  1498. struct pl330_dmac *pl330 = thrd->dmac;
  1499. struct pl330_info *pi = pl330->pinfo;
  1500. int ev;
  1501. for (ev = 0; ev < pi->pcfg.num_events; ev++)
  1502. if (pl330->events[ev] == -1) {
  1503. pl330->events[ev] = thrd->id;
  1504. return ev;
  1505. }
  1506. return -1;
  1507. }
  1508. static bool _chan_ns(const struct pl330_info *pi, int i)
  1509. {
  1510. return pi->pcfg.irq_ns & (1 << i);
  1511. }
  1512. /* Upon success, returns IdentityToken for the
  1513. * allocated channel, NULL otherwise.
  1514. */
  1515. static void *pl330_request_channel(const struct pl330_info *pi)
  1516. {
  1517. struct pl330_thread *thrd = NULL;
  1518. struct pl330_dmac *pl330;
  1519. unsigned long flags;
  1520. int chans, i;
  1521. if (!pi || !pi->pl330_data)
  1522. return NULL;
  1523. pl330 = pi->pl330_data;
  1524. if (pl330->state == DYING)
  1525. return NULL;
  1526. chans = pi->pcfg.num_chan;
  1527. spin_lock_irqsave(&pl330->lock, flags);
  1528. for (i = 0; i < chans; i++) {
  1529. thrd = &pl330->channels[i];
  1530. if ((thrd->free) && (!_manager_ns(thrd) ||
  1531. _chan_ns(pi, i))) {
  1532. thrd->ev = _alloc_event(thrd);
  1533. if (thrd->ev >= 0) {
  1534. thrd->free = false;
  1535. thrd->lstenq = 1;
  1536. thrd->req[0].r = NULL;
  1537. mark_free(thrd, 0);
  1538. thrd->req[1].r = NULL;
  1539. mark_free(thrd, 1);
  1540. break;
  1541. }
  1542. }
  1543. thrd = NULL;
  1544. }
  1545. spin_unlock_irqrestore(&pl330->lock, flags);
  1546. return thrd;
  1547. }
  1548. /* Release an event */
  1549. static inline void _free_event(struct pl330_thread *thrd, int ev)
  1550. {
  1551. struct pl330_dmac *pl330 = thrd->dmac;
  1552. struct pl330_info *pi = pl330->pinfo;
  1553. /* If the event is valid and was held by the thread */
  1554. if (ev >= 0 && ev < pi->pcfg.num_events
  1555. && pl330->events[ev] == thrd->id)
  1556. pl330->events[ev] = -1;
  1557. }
  1558. static void pl330_release_channel(void *ch_id)
  1559. {
  1560. struct pl330_thread *thrd = ch_id;
  1561. struct pl330_dmac *pl330;
  1562. unsigned long flags;
  1563. if (!thrd || thrd->free)
  1564. return;
  1565. _stop(thrd);
  1566. _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
  1567. _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
  1568. pl330 = thrd->dmac;
  1569. spin_lock_irqsave(&pl330->lock, flags);
  1570. _free_event(thrd, thrd->ev);
  1571. thrd->free = true;
  1572. spin_unlock_irqrestore(&pl330->lock, flags);
  1573. }
  1574. /* Initialize the structure for PL330 configuration, that can be used
  1575. * by the client driver the make best use of the DMAC
  1576. */
  1577. static void read_dmac_config(struct pl330_info *pi)
  1578. {
  1579. void __iomem *regs = pi->base;
  1580. u32 val;
  1581. val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
  1582. val &= CRD_DATA_WIDTH_MASK;
  1583. pi->pcfg.data_bus_width = 8 * (1 << val);
  1584. val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
  1585. val &= CRD_DATA_BUFF_MASK;
  1586. pi->pcfg.data_buf_dep = val + 1;
  1587. val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
  1588. val &= CR0_NUM_CHANS_MASK;
  1589. val += 1;
  1590. pi->pcfg.num_chan = val;
  1591. val = readl(regs + CR0);
  1592. if (val & CR0_PERIPH_REQ_SET) {
  1593. val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
  1594. val += 1;
  1595. pi->pcfg.num_peri = val;
  1596. pi->pcfg.peri_ns = readl(regs + CR4);
  1597. } else {
  1598. pi->pcfg.num_peri = 0;
  1599. }
  1600. val = readl(regs + CR0);
  1601. if (val & CR0_BOOT_MAN_NS)
  1602. pi->pcfg.mode |= DMAC_MODE_NS;
  1603. else
  1604. pi->pcfg.mode &= ~DMAC_MODE_NS;
  1605. val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
  1606. val &= CR0_NUM_EVENTS_MASK;
  1607. val += 1;
  1608. pi->pcfg.num_events = val;
  1609. pi->pcfg.irq_ns = readl(regs + CR3);
  1610. pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
  1611. pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
  1612. }
  1613. static inline void _reset_thread(struct pl330_thread *thrd)
  1614. {
  1615. struct pl330_dmac *pl330 = thrd->dmac;
  1616. struct pl330_info *pi = pl330->pinfo;
  1617. thrd->req[0].mc_cpu = pl330->mcode_cpu
  1618. + (thrd->id * pi->mcbufsz);
  1619. thrd->req[0].mc_bus = pl330->mcode_bus
  1620. + (thrd->id * pi->mcbufsz);
  1621. thrd->req[0].r = NULL;
  1622. mark_free(thrd, 0);
  1623. thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
  1624. + pi->mcbufsz / 2;
  1625. thrd->req[1].mc_bus = thrd->req[0].mc_bus
  1626. + pi->mcbufsz / 2;
  1627. thrd->req[1].r = NULL;
  1628. mark_free(thrd, 1);
  1629. }
  1630. static int dmac_alloc_threads(struct pl330_dmac *pl330)
  1631. {
  1632. struct pl330_info *pi = pl330->pinfo;
  1633. int chans = pi->pcfg.num_chan;
  1634. struct pl330_thread *thrd;
  1635. int i;
  1636. /* Allocate 1 Manager and 'chans' Channel threads */
  1637. pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
  1638. GFP_KERNEL);
  1639. if (!pl330->channels)
  1640. return -ENOMEM;
  1641. /* Init Channel threads */
  1642. for (i = 0; i < chans; i++) {
  1643. thrd = &pl330->channels[i];
  1644. thrd->id = i;
  1645. thrd->dmac = pl330;
  1646. _reset_thread(thrd);
  1647. thrd->free = true;
  1648. }
  1649. /* MANAGER is indexed at the end */
  1650. thrd = &pl330->channels[chans];
  1651. thrd->id = chans;
  1652. thrd->dmac = pl330;
  1653. thrd->free = false;
  1654. pl330->manager = thrd;
  1655. return 0;
  1656. }
  1657. static int dmac_alloc_resources(struct pl330_dmac *pl330)
  1658. {
  1659. struct pl330_info *pi = pl330->pinfo;
  1660. int chans = pi->pcfg.num_chan;
  1661. int ret;
  1662. /*
  1663. * Alloc MicroCode buffer for 'chans' Channel threads.
  1664. * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
  1665. */
  1666. pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
  1667. chans * pi->mcbufsz,
  1668. &pl330->mcode_bus, GFP_KERNEL);
  1669. if (!pl330->mcode_cpu) {
  1670. dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
  1671. __func__, __LINE__);
  1672. return -ENOMEM;
  1673. }
  1674. ret = dmac_alloc_threads(pl330);
  1675. if (ret) {
  1676. dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
  1677. __func__, __LINE__);
  1678. dma_free_coherent(pi->dev,
  1679. chans * pi->mcbufsz,
  1680. pl330->mcode_cpu, pl330->mcode_bus);
  1681. return ret;
  1682. }
  1683. return 0;
  1684. }
  1685. static int pl330_add(struct pl330_info *pi)
  1686. {
  1687. struct pl330_dmac *pl330;
  1688. void __iomem *regs;
  1689. int i, ret;
  1690. if (!pi || !pi->dev)
  1691. return -EINVAL;
  1692. /* If already added */
  1693. if (pi->pl330_data)
  1694. return -EINVAL;
  1695. /*
  1696. * If the SoC can perform reset on the DMAC, then do it
  1697. * before reading its configuration.
  1698. */
  1699. if (pi->dmac_reset)
  1700. pi->dmac_reset(pi);
  1701. regs = pi->base;
  1702. /* Check if we can handle this DMAC */
  1703. if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
  1704. || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
  1705. dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
  1706. get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
  1707. return -EINVAL;
  1708. }
  1709. /* Read the configuration of the DMAC */
  1710. read_dmac_config(pi);
  1711. if (pi->pcfg.num_events == 0) {
  1712. dev_err(pi->dev, "%s:%d Can't work without events!\n",
  1713. __func__, __LINE__);
  1714. return -EINVAL;
  1715. }
  1716. pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
  1717. if (!pl330) {
  1718. dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
  1719. __func__, __LINE__);
  1720. return -ENOMEM;
  1721. }
  1722. /* Assign the info structure and private data */
  1723. pl330->pinfo = pi;
  1724. pi->pl330_data = pl330;
  1725. spin_lock_init(&pl330->lock);
  1726. INIT_LIST_HEAD(&pl330->req_done);
  1727. /* Use default MC buffer size if not provided */
  1728. if (!pi->mcbufsz)
  1729. pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
  1730. /* Mark all events as free */
  1731. for (i = 0; i < pi->pcfg.num_events; i++)
  1732. pl330->events[i] = -1;
  1733. /* Allocate resources needed by the DMAC */
  1734. ret = dmac_alloc_resources(pl330);
  1735. if (ret) {
  1736. dev_err(pi->dev, "Unable to create channels for DMAC\n");
  1737. kfree(pl330);
  1738. return ret;
  1739. }
  1740. tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
  1741. pl330->state = INIT;
  1742. return 0;
  1743. }
  1744. static int dmac_free_threads(struct pl330_dmac *pl330)
  1745. {
  1746. struct pl330_info *pi = pl330->pinfo;
  1747. int chans = pi->pcfg.num_chan;
  1748. struct pl330_thread *thrd;
  1749. int i;
  1750. /* Release Channel threads */
  1751. for (i = 0; i < chans; i++) {
  1752. thrd = &pl330->channels[i];
  1753. pl330_release_channel((void *)thrd);
  1754. }
  1755. /* Free memory */
  1756. kfree(pl330->channels);
  1757. return 0;
  1758. }
  1759. static void dmac_free_resources(struct pl330_dmac *pl330)
  1760. {
  1761. struct pl330_info *pi = pl330->pinfo;
  1762. int chans = pi->pcfg.num_chan;
  1763. dmac_free_threads(pl330);
  1764. dma_free_coherent(pi->dev, chans * pi->mcbufsz,
  1765. pl330->mcode_cpu, pl330->mcode_bus);
  1766. }
  1767. static void pl330_del(struct pl330_info *pi)
  1768. {
  1769. struct pl330_dmac *pl330;
  1770. if (!pi || !pi->pl330_data)
  1771. return;
  1772. pl330 = pi->pl330_data;
  1773. pl330->state = UNINIT;
  1774. tasklet_kill(&pl330->tasks);
  1775. /* Free DMAC resources */
  1776. dmac_free_resources(pl330);
  1777. kfree(pl330);
  1778. pi->pl330_data = NULL;
  1779. }
  1780. /* forward declaration */
  1781. static struct amba_driver pl330_driver;
  1782. static inline struct dma_pl330_chan *
  1783. to_pchan(struct dma_chan *ch)
  1784. {
  1785. if (!ch)
  1786. return NULL;
  1787. return container_of(ch, struct dma_pl330_chan, chan);
  1788. }
  1789. static inline struct dma_pl330_desc *
  1790. to_desc(struct dma_async_tx_descriptor *tx)
  1791. {
  1792. return container_of(tx, struct dma_pl330_desc, txd);
  1793. }
  1794. static inline void free_desc_list(struct list_head *list)
  1795. {
  1796. struct dma_pl330_dmac *pdmac;
  1797. struct dma_pl330_desc *desc;
  1798. struct dma_pl330_chan *pch;
  1799. unsigned long flags;
  1800. if (list_empty(list))
  1801. return;
  1802. /* Finish off the work list */
  1803. list_for_each_entry(desc, list, node) {
  1804. dma_async_tx_callback callback;
  1805. void *param;
  1806. /* All desc in a list belong to same channel */
  1807. pch = desc->pchan;
  1808. callback = desc->txd.callback;
  1809. param = desc->txd.callback_param;
  1810. if (callback)
  1811. callback(param);
  1812. desc->pchan = NULL;
  1813. }
  1814. pdmac = pch->dmac;
  1815. spin_lock_irqsave(&pdmac->pool_lock, flags);
  1816. list_splice_tail_init(list, &pdmac->desc_pool);
  1817. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  1818. }
  1819. static inline void handle_cyclic_desc_list(struct list_head *list)
  1820. {
  1821. struct dma_pl330_desc *desc;
  1822. struct dma_pl330_chan *pch;
  1823. unsigned long flags;
  1824. if (list_empty(list))
  1825. return;
  1826. list_for_each_entry(desc, list, node) {
  1827. dma_async_tx_callback callback;
  1828. /* Change status to reload it */
  1829. desc->status = PREP;
  1830. pch = desc->pchan;
  1831. callback = desc->txd.callback;
  1832. if (callback)
  1833. callback(desc->txd.callback_param);
  1834. }
  1835. spin_lock_irqsave(&pch->lock, flags);
  1836. list_splice_tail_init(list, &pch->work_list);
  1837. spin_unlock_irqrestore(&pch->lock, flags);
  1838. }
  1839. static inline void fill_queue(struct dma_pl330_chan *pch)
  1840. {
  1841. struct dma_pl330_desc *desc;
  1842. int ret;
  1843. list_for_each_entry(desc, &pch->work_list, node) {
  1844. /* If already submitted */
  1845. if (desc->status == BUSY)
  1846. break;
  1847. ret = pl330_submit_req(pch->pl330_chid,
  1848. &desc->req);
  1849. if (!ret) {
  1850. desc->status = BUSY;
  1851. break;
  1852. } else if (ret == -EAGAIN) {
  1853. /* QFull or DMAC Dying */
  1854. break;
  1855. } else {
  1856. /* Unacceptable request */
  1857. desc->status = DONE;
  1858. dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
  1859. __func__, __LINE__, desc->txd.cookie);
  1860. tasklet_schedule(&pch->task);
  1861. }
  1862. }
  1863. }
  1864. static void pl330_tasklet(unsigned long data)
  1865. {
  1866. struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
  1867. struct dma_pl330_desc *desc, *_dt;
  1868. unsigned long flags;
  1869. LIST_HEAD(list);
  1870. spin_lock_irqsave(&pch->lock, flags);
  1871. /* Pick up ripe tomatoes */
  1872. list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
  1873. if (desc->status == DONE) {
  1874. dma_cookie_complete(&desc->txd);
  1875. list_move_tail(&desc->node, &list);
  1876. }
  1877. /* Try to submit a req imm. next to the last completed cookie */
  1878. fill_queue(pch);
  1879. /* Make sure the PL330 Channel thread is active */
  1880. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
  1881. spin_unlock_irqrestore(&pch->lock, flags);
  1882. if (pch->cyclic)
  1883. handle_cyclic_desc_list(&list);
  1884. else
  1885. free_desc_list(&list);
  1886. }
  1887. static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
  1888. {
  1889. struct dma_pl330_desc *desc = token;
  1890. struct dma_pl330_chan *pch = desc->pchan;
  1891. unsigned long flags;
  1892. /* If desc aborted */
  1893. if (!pch)
  1894. return;
  1895. spin_lock_irqsave(&pch->lock, flags);
  1896. desc->status = DONE;
  1897. spin_unlock_irqrestore(&pch->lock, flags);
  1898. tasklet_schedule(&pch->task);
  1899. }
  1900. bool pl330_filter(struct dma_chan *chan, void *param)
  1901. {
  1902. u8 *peri_id;
  1903. if (chan->device->dev->driver != &pl330_driver.drv)
  1904. return false;
  1905. #ifdef CONFIG_OF
  1906. if (chan->device->dev->of_node) {
  1907. const __be32 *prop_value;
  1908. phandle phandle;
  1909. struct device_node *node;
  1910. prop_value = ((struct property *)param)->value;
  1911. phandle = be32_to_cpup(prop_value++);
  1912. node = of_find_node_by_phandle(phandle);
  1913. return ((chan->private == node) &&
  1914. (chan->chan_id == be32_to_cpup(prop_value)));
  1915. }
  1916. #endif
  1917. peri_id = chan->private;
  1918. return *peri_id == (unsigned)param;
  1919. }
  1920. EXPORT_SYMBOL(pl330_filter);
  1921. static int pl330_alloc_chan_resources(struct dma_chan *chan)
  1922. {
  1923. struct dma_pl330_chan *pch = to_pchan(chan);
  1924. struct dma_pl330_dmac *pdmac = pch->dmac;
  1925. unsigned long flags;
  1926. spin_lock_irqsave(&pch->lock, flags);
  1927. dma_cookie_init(chan);
  1928. pch->cyclic = false;
  1929. pch->pl330_chid = pl330_request_channel(&pdmac->pif);
  1930. if (!pch->pl330_chid) {
  1931. spin_unlock_irqrestore(&pch->lock, flags);
  1932. return 0;
  1933. }
  1934. tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
  1935. spin_unlock_irqrestore(&pch->lock, flags);
  1936. return 1;
  1937. }
  1938. static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
  1939. {
  1940. struct dma_pl330_chan *pch = to_pchan(chan);
  1941. struct dma_pl330_desc *desc, *_dt;
  1942. unsigned long flags;
  1943. struct dma_pl330_dmac *pdmac = pch->dmac;
  1944. struct dma_slave_config *slave_config;
  1945. LIST_HEAD(list);
  1946. switch (cmd) {
  1947. case DMA_TERMINATE_ALL:
  1948. spin_lock_irqsave(&pch->lock, flags);
  1949. /* FLUSH the PL330 Channel thread */
  1950. pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
  1951. /* Mark all desc done */
  1952. list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
  1953. desc->status = DONE;
  1954. list_move_tail(&desc->node, &list);
  1955. }
  1956. list_splice_tail_init(&list, &pdmac->desc_pool);
  1957. spin_unlock_irqrestore(&pch->lock, flags);
  1958. break;
  1959. case DMA_SLAVE_CONFIG:
  1960. slave_config = (struct dma_slave_config *)arg;
  1961. if (slave_config->direction == DMA_MEM_TO_DEV) {
  1962. if (slave_config->dst_addr)
  1963. pch->fifo_addr = slave_config->dst_addr;
  1964. if (slave_config->dst_addr_width)
  1965. pch->burst_sz = __ffs(slave_config->dst_addr_width);
  1966. if (slave_config->dst_maxburst)
  1967. pch->burst_len = slave_config->dst_maxburst;
  1968. } else if (slave_config->direction == DMA_DEV_TO_MEM) {
  1969. if (slave_config->src_addr)
  1970. pch->fifo_addr = slave_config->src_addr;
  1971. if (slave_config->src_addr_width)
  1972. pch->burst_sz = __ffs(slave_config->src_addr_width);
  1973. if (slave_config->src_maxburst)
  1974. pch->burst_len = slave_config->src_maxburst;
  1975. }
  1976. break;
  1977. default:
  1978. dev_err(pch->dmac->pif.dev, "Not supported command.\n");
  1979. return -ENXIO;
  1980. }
  1981. return 0;
  1982. }
  1983. static void pl330_free_chan_resources(struct dma_chan *chan)
  1984. {
  1985. struct dma_pl330_chan *pch = to_pchan(chan);
  1986. unsigned long flags;
  1987. spin_lock_irqsave(&pch->lock, flags);
  1988. tasklet_kill(&pch->task);
  1989. pl330_release_channel(pch->pl330_chid);
  1990. pch->pl330_chid = NULL;
  1991. if (pch->cyclic)
  1992. list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
  1993. spin_unlock_irqrestore(&pch->lock, flags);
  1994. }
  1995. static enum dma_status
  1996. pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  1997. struct dma_tx_state *txstate)
  1998. {
  1999. return dma_cookie_status(chan, cookie, txstate);
  2000. }
  2001. static void pl330_issue_pending(struct dma_chan *chan)
  2002. {
  2003. pl330_tasklet((unsigned long) to_pchan(chan));
  2004. }
  2005. /*
  2006. * We returned the last one of the circular list of descriptor(s)
  2007. * from prep_xxx, so the argument to submit corresponds to the last
  2008. * descriptor of the list.
  2009. */
  2010. static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
  2011. {
  2012. struct dma_pl330_desc *desc, *last = to_desc(tx);
  2013. struct dma_pl330_chan *pch = to_pchan(tx->chan);
  2014. dma_cookie_t cookie;
  2015. unsigned long flags;
  2016. spin_lock_irqsave(&pch->lock, flags);
  2017. /* Assign cookies to all nodes */
  2018. while (!list_empty(&last->node)) {
  2019. desc = list_entry(last->node.next, struct dma_pl330_desc, node);
  2020. dma_cookie_assign(&desc->txd);
  2021. list_move_tail(&desc->node, &pch->work_list);
  2022. }
  2023. cookie = dma_cookie_assign(&last->txd);
  2024. list_add_tail(&last->node, &pch->work_list);
  2025. spin_unlock_irqrestore(&pch->lock, flags);
  2026. return cookie;
  2027. }
  2028. static inline void _init_desc(struct dma_pl330_desc *desc)
  2029. {
  2030. desc->pchan = NULL;
  2031. desc->req.x = &desc->px;
  2032. desc->req.token = desc;
  2033. desc->rqcfg.swap = SWAP_NO;
  2034. desc->rqcfg.privileged = 0;
  2035. desc->rqcfg.insnaccess = 0;
  2036. desc->rqcfg.scctl = SCCTRL0;
  2037. desc->rqcfg.dcctl = DCCTRL0;
  2038. desc->req.cfg = &desc->rqcfg;
  2039. desc->req.xfer_cb = dma_pl330_rqcb;
  2040. desc->txd.tx_submit = pl330_tx_submit;
  2041. INIT_LIST_HEAD(&desc->node);
  2042. }
  2043. /* Returns the number of descriptors added to the DMAC pool */
  2044. int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
  2045. {
  2046. struct dma_pl330_desc *desc;
  2047. unsigned long flags;
  2048. int i;
  2049. if (!pdmac)
  2050. return 0;
  2051. desc = kmalloc(count * sizeof(*desc), flg);
  2052. if (!desc)
  2053. return 0;
  2054. spin_lock_irqsave(&pdmac->pool_lock, flags);
  2055. for (i = 0; i < count; i++) {
  2056. _init_desc(&desc[i]);
  2057. list_add_tail(&desc[i].node, &pdmac->desc_pool);
  2058. }
  2059. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  2060. return count;
  2061. }
  2062. static struct dma_pl330_desc *
  2063. pluck_desc(struct dma_pl330_dmac *pdmac)
  2064. {
  2065. struct dma_pl330_desc *desc = NULL;
  2066. unsigned long flags;
  2067. if (!pdmac)
  2068. return NULL;
  2069. spin_lock_irqsave(&pdmac->pool_lock, flags);
  2070. if (!list_empty(&pdmac->desc_pool)) {
  2071. desc = list_entry(pdmac->desc_pool.next,
  2072. struct dma_pl330_desc, node);
  2073. list_del_init(&desc->node);
  2074. desc->status = PREP;
  2075. desc->txd.callback = NULL;
  2076. }
  2077. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  2078. return desc;
  2079. }
  2080. static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
  2081. {
  2082. struct dma_pl330_dmac *pdmac = pch->dmac;
  2083. u8 *peri_id = pch->chan.private;
  2084. struct dma_pl330_desc *desc;
  2085. /* Pluck one desc from the pool of DMAC */
  2086. desc = pluck_desc(pdmac);
  2087. /* If the DMAC pool is empty, alloc new */
  2088. if (!desc) {
  2089. if (!add_desc(pdmac, GFP_ATOMIC, 1))
  2090. return NULL;
  2091. /* Try again */
  2092. desc = pluck_desc(pdmac);
  2093. if (!desc) {
  2094. dev_err(pch->dmac->pif.dev,
  2095. "%s:%d ALERT!\n", __func__, __LINE__);
  2096. return NULL;
  2097. }
  2098. }
  2099. /* Initialize the descriptor */
  2100. desc->pchan = pch;
  2101. desc->txd.cookie = 0;
  2102. async_tx_ack(&desc->txd);
  2103. desc->req.peri = peri_id ? pch->chan.chan_id : 0;
  2104. desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
  2105. dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
  2106. return desc;
  2107. }
  2108. static inline void fill_px(struct pl330_xfer *px,
  2109. dma_addr_t dst, dma_addr_t src, size_t len)
  2110. {
  2111. px->next = NULL;
  2112. px->bytes = len;
  2113. px->dst_addr = dst;
  2114. px->src_addr = src;
  2115. }
  2116. static struct dma_pl330_desc *
  2117. __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
  2118. dma_addr_t src, size_t len)
  2119. {
  2120. struct dma_pl330_desc *desc = pl330_get_desc(pch);
  2121. if (!desc) {
  2122. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  2123. __func__, __LINE__);
  2124. return NULL;
  2125. }
  2126. /*
  2127. * Ideally we should lookout for reqs bigger than
  2128. * those that can be programmed with 256 bytes of
  2129. * MC buffer, but considering a req size is seldom
  2130. * going to be word-unaligned and more than 200MB,
  2131. * we take it easy.
  2132. * Also, should the limit is reached we'd rather
  2133. * have the platform increase MC buffer size than
  2134. * complicating this API driver.
  2135. */
  2136. fill_px(&desc->px, dst, src, len);
  2137. return desc;
  2138. }
  2139. /* Call after fixing burst size */
  2140. static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
  2141. {
  2142. struct dma_pl330_chan *pch = desc->pchan;
  2143. struct pl330_info *pi = &pch->dmac->pif;
  2144. int burst_len;
  2145. burst_len = pi->pcfg.data_bus_width / 8;
  2146. burst_len *= pi->pcfg.data_buf_dep;
  2147. burst_len >>= desc->rqcfg.brst_size;
  2148. /* src/dst_burst_len can't be more than 16 */
  2149. if (burst_len > 16)
  2150. burst_len = 16;
  2151. while (burst_len > 1) {
  2152. if (!(len % (burst_len << desc->rqcfg.brst_size)))
  2153. break;
  2154. burst_len--;
  2155. }
  2156. return burst_len;
  2157. }
  2158. static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
  2159. struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
  2160. size_t period_len, enum dma_transfer_direction direction,
  2161. void *context)
  2162. {
  2163. struct dma_pl330_desc *desc;
  2164. struct dma_pl330_chan *pch = to_pchan(chan);
  2165. dma_addr_t dst;
  2166. dma_addr_t src;
  2167. desc = pl330_get_desc(pch);
  2168. if (!desc) {
  2169. dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
  2170. __func__, __LINE__);
  2171. return NULL;
  2172. }
  2173. switch (direction) {
  2174. case DMA_MEM_TO_DEV:
  2175. desc->rqcfg.src_inc = 1;
  2176. desc->rqcfg.dst_inc = 0;
  2177. desc->req.rqtype = MEMTODEV;
  2178. src = dma_addr;
  2179. dst = pch->fifo_addr;
  2180. break;
  2181. case DMA_DEV_TO_MEM:
  2182. desc->rqcfg.src_inc = 0;
  2183. desc->rqcfg.dst_inc = 1;
  2184. desc->req.rqtype = DEVTOMEM;
  2185. src = pch->fifo_addr;
  2186. dst = dma_addr;
  2187. break;
  2188. default:
  2189. dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
  2190. __func__, __LINE__);
  2191. return NULL;
  2192. }
  2193. desc->rqcfg.brst_size = pch->burst_sz;
  2194. desc->rqcfg.brst_len = 1;
  2195. pch->cyclic = true;
  2196. fill_px(&desc->px, dst, src, period_len);
  2197. return &desc->txd;
  2198. }
  2199. static struct dma_async_tx_descriptor *
  2200. pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
  2201. dma_addr_t src, size_t len, unsigned long flags)
  2202. {
  2203. struct dma_pl330_desc *desc;
  2204. struct dma_pl330_chan *pch = to_pchan(chan);
  2205. struct pl330_info *pi;
  2206. int burst;
  2207. if (unlikely(!pch || !len))
  2208. return NULL;
  2209. pi = &pch->dmac->pif;
  2210. desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
  2211. if (!desc)
  2212. return NULL;
  2213. desc->rqcfg.src_inc = 1;
  2214. desc->rqcfg.dst_inc = 1;
  2215. desc->req.rqtype = MEMTOMEM;
  2216. /* Select max possible burst size */
  2217. burst = pi->pcfg.data_bus_width / 8;
  2218. while (burst > 1) {
  2219. if (!(len % burst))
  2220. break;
  2221. burst /= 2;
  2222. }
  2223. desc->rqcfg.brst_size = 0;
  2224. while (burst != (1 << desc->rqcfg.brst_size))
  2225. desc->rqcfg.brst_size++;
  2226. desc->rqcfg.brst_len = get_burst_len(desc, len);
  2227. desc->txd.flags = flags;
  2228. return &desc->txd;
  2229. }
  2230. static struct dma_async_tx_descriptor *
  2231. pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  2232. unsigned int sg_len, enum dma_transfer_direction direction,
  2233. unsigned long flg, void *context)
  2234. {
  2235. struct dma_pl330_desc *first, *desc = NULL;
  2236. struct dma_pl330_chan *pch = to_pchan(chan);
  2237. struct scatterlist *sg;
  2238. unsigned long flags;
  2239. int i;
  2240. dma_addr_t addr;
  2241. if (unlikely(!pch || !sgl || !sg_len))
  2242. return NULL;
  2243. addr = pch->fifo_addr;
  2244. first = NULL;
  2245. for_each_sg(sgl, sg, sg_len, i) {
  2246. desc = pl330_get_desc(pch);
  2247. if (!desc) {
  2248. struct dma_pl330_dmac *pdmac = pch->dmac;
  2249. dev_err(pch->dmac->pif.dev,
  2250. "%s:%d Unable to fetch desc\n",
  2251. __func__, __LINE__);
  2252. if (!first)
  2253. return NULL;
  2254. spin_lock_irqsave(&pdmac->pool_lock, flags);
  2255. while (!list_empty(&first->node)) {
  2256. desc = list_entry(first->node.next,
  2257. struct dma_pl330_desc, node);
  2258. list_move_tail(&desc->node, &pdmac->desc_pool);
  2259. }
  2260. list_move_tail(&first->node, &pdmac->desc_pool);
  2261. spin_unlock_irqrestore(&pdmac->pool_lock, flags);
  2262. return NULL;
  2263. }
  2264. if (!first)
  2265. first = desc;
  2266. else
  2267. list_add_tail(&desc->node, &first->node);
  2268. if (direction == DMA_MEM_TO_DEV) {
  2269. desc->rqcfg.src_inc = 1;
  2270. desc->rqcfg.dst_inc = 0;
  2271. desc->req.rqtype = MEMTODEV;
  2272. fill_px(&desc->px,
  2273. addr, sg_dma_address(sg), sg_dma_len(sg));
  2274. } else {
  2275. desc->rqcfg.src_inc = 0;
  2276. desc->rqcfg.dst_inc = 1;
  2277. desc->req.rqtype = DEVTOMEM;
  2278. fill_px(&desc->px,
  2279. sg_dma_address(sg), addr, sg_dma_len(sg));
  2280. }
  2281. desc->rqcfg.brst_size = pch->burst_sz;
  2282. desc->rqcfg.brst_len = 1;
  2283. }
  2284. /* Return the last desc in the chain */
  2285. desc->txd.flags = flg;
  2286. return &desc->txd;
  2287. }
  2288. static irqreturn_t pl330_irq_handler(int irq, void *data)
  2289. {
  2290. if (pl330_update(data))
  2291. return IRQ_HANDLED;
  2292. else
  2293. return IRQ_NONE;
  2294. }
  2295. static int __devinit
  2296. pl330_probe(struct amba_device *adev, const struct amba_id *id)
  2297. {
  2298. struct dma_pl330_platdata *pdat;
  2299. struct dma_pl330_dmac *pdmac;
  2300. struct dma_pl330_chan *pch;
  2301. struct pl330_info *pi;
  2302. struct dma_device *pd;
  2303. struct resource *res;
  2304. int i, ret, irq;
  2305. int num_chan;
  2306. pdat = adev->dev.platform_data;
  2307. /* Allocate a new DMAC and its Channels */
  2308. pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
  2309. if (!pdmac) {
  2310. dev_err(&adev->dev, "unable to allocate mem\n");
  2311. return -ENOMEM;
  2312. }
  2313. pi = &pdmac->pif;
  2314. pi->dev = &adev->dev;
  2315. pi->pl330_data = NULL;
  2316. pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
  2317. res = &adev->res;
  2318. request_mem_region(res->start, resource_size(res), "dma-pl330");
  2319. pi->base = ioremap(res->start, resource_size(res));
  2320. if (!pi->base) {
  2321. ret = -ENXIO;
  2322. goto probe_err1;
  2323. }
  2324. pdmac->clk = clk_get(&adev->dev, "dma");
  2325. if (IS_ERR(pdmac->clk)) {
  2326. dev_err(&adev->dev, "Cannot get operation clock.\n");
  2327. ret = -EINVAL;
  2328. goto probe_err2;
  2329. }
  2330. amba_set_drvdata(adev, pdmac);
  2331. #ifndef CONFIG_PM_RUNTIME
  2332. /* enable dma clk */
  2333. clk_enable(pdmac->clk);
  2334. #endif
  2335. irq = adev->irq[0];
  2336. ret = request_irq(irq, pl330_irq_handler, 0,
  2337. dev_name(&adev->dev), pi);
  2338. if (ret)
  2339. goto probe_err3;
  2340. ret = pl330_add(pi);
  2341. if (ret)
  2342. goto probe_err4;
  2343. INIT_LIST_HEAD(&pdmac->desc_pool);
  2344. spin_lock_init(&pdmac->pool_lock);
  2345. /* Create a descriptor pool of default size */
  2346. if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
  2347. dev_warn(&adev->dev, "unable to allocate desc\n");
  2348. pd = &pdmac->ddma;
  2349. INIT_LIST_HEAD(&pd->channels);
  2350. /* Initialize channel parameters */
  2351. num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
  2352. (u8)pi->pcfg.num_chan);
  2353. pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
  2354. for (i = 0; i < num_chan; i++) {
  2355. pch = &pdmac->peripherals[i];
  2356. if (!adev->dev.of_node)
  2357. pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
  2358. else
  2359. pch->chan.private = adev->dev.of_node;
  2360. INIT_LIST_HEAD(&pch->work_list);
  2361. spin_lock_init(&pch->lock);
  2362. pch->pl330_chid = NULL;
  2363. pch->chan.device = pd;
  2364. pch->dmac = pdmac;
  2365. /* Add the channel to the DMAC list */
  2366. list_add_tail(&pch->chan.device_node, &pd->channels);
  2367. }
  2368. pd->dev = &adev->dev;
  2369. if (pdat) {
  2370. pd->cap_mask = pdat->cap_mask;
  2371. } else {
  2372. dma_cap_set(DMA_MEMCPY, pd->cap_mask);
  2373. if (pi->pcfg.num_peri) {
  2374. dma_cap_set(DMA_SLAVE, pd->cap_mask);
  2375. dma_cap_set(DMA_CYCLIC, pd->cap_mask);
  2376. }
  2377. }
  2378. pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
  2379. pd->device_free_chan_resources = pl330_free_chan_resources;
  2380. pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
  2381. pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
  2382. pd->device_tx_status = pl330_tx_status;
  2383. pd->device_prep_slave_sg = pl330_prep_slave_sg;
  2384. pd->device_control = pl330_control;
  2385. pd->device_issue_pending = pl330_issue_pending;
  2386. ret = dma_async_device_register(pd);
  2387. if (ret) {
  2388. dev_err(&adev->dev, "unable to register DMAC\n");
  2389. goto probe_err5;
  2390. }
  2391. dev_info(&adev->dev,
  2392. "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
  2393. dev_info(&adev->dev,
  2394. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  2395. pi->pcfg.data_buf_dep,
  2396. pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
  2397. pi->pcfg.num_peri, pi->pcfg.num_events);
  2398. return 0;
  2399. probe_err5:
  2400. pl330_del(pi);
  2401. probe_err4:
  2402. free_irq(irq, pi);
  2403. probe_err3:
  2404. #ifndef CONFIG_PM_RUNTIME
  2405. clk_disable(pdmac->clk);
  2406. #endif
  2407. clk_put(pdmac->clk);
  2408. probe_err2:
  2409. iounmap(pi->base);
  2410. probe_err1:
  2411. release_mem_region(res->start, resource_size(res));
  2412. kfree(pdmac);
  2413. return ret;
  2414. }
  2415. static int __devexit pl330_remove(struct amba_device *adev)
  2416. {
  2417. struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
  2418. struct dma_pl330_chan *pch, *_p;
  2419. struct pl330_info *pi;
  2420. struct resource *res;
  2421. int irq;
  2422. if (!pdmac)
  2423. return 0;
  2424. amba_set_drvdata(adev, NULL);
  2425. /* Idle the DMAC */
  2426. list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
  2427. chan.device_node) {
  2428. /* Remove the channel */
  2429. list_del(&pch->chan.device_node);
  2430. /* Flush the channel */
  2431. pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
  2432. pl330_free_chan_resources(&pch->chan);
  2433. }
  2434. pi = &pdmac->pif;
  2435. pl330_del(pi);
  2436. irq = adev->irq[0];
  2437. free_irq(irq, pi);
  2438. iounmap(pi->base);
  2439. res = &adev->res;
  2440. release_mem_region(res->start, resource_size(res));
  2441. #ifndef CONFIG_PM_RUNTIME
  2442. clk_disable(pdmac->clk);
  2443. #endif
  2444. kfree(pdmac);
  2445. return 0;
  2446. }
  2447. static struct amba_id pl330_ids[] = {
  2448. {
  2449. .id = 0x00041330,
  2450. .mask = 0x000fffff,
  2451. },
  2452. { 0, 0 },
  2453. };
  2454. MODULE_DEVICE_TABLE(amba, pl330_ids);
  2455. #ifdef CONFIG_PM_RUNTIME
  2456. static int pl330_runtime_suspend(struct device *dev)
  2457. {
  2458. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  2459. if (!pdmac) {
  2460. dev_err(dev, "failed to get dmac\n");
  2461. return -ENODEV;
  2462. }
  2463. clk_disable(pdmac->clk);
  2464. return 0;
  2465. }
  2466. static int pl330_runtime_resume(struct device *dev)
  2467. {
  2468. struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
  2469. if (!pdmac) {
  2470. dev_err(dev, "failed to get dmac\n");
  2471. return -ENODEV;
  2472. }
  2473. clk_enable(pdmac->clk);
  2474. return 0;
  2475. }
  2476. #else
  2477. #define pl330_runtime_suspend NULL
  2478. #define pl330_runtime_resume NULL
  2479. #endif /* CONFIG_PM_RUNTIME */
  2480. static const struct dev_pm_ops pl330_pm_ops = {
  2481. .runtime_suspend = pl330_runtime_suspend,
  2482. .runtime_resume = pl330_runtime_resume,
  2483. };
  2484. static struct amba_driver pl330_driver = {
  2485. .drv = {
  2486. .owner = THIS_MODULE,
  2487. .name = "dma-pl330",
  2488. .pm = &pl330_pm_ops,
  2489. },
  2490. .id_table = pl330_ids,
  2491. .probe = pl330_probe,
  2492. .remove = pl330_remove,
  2493. };
  2494. module_amba_driver(pl330_driver);
  2495. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  2496. MODULE_DESCRIPTION("API Driver for PL330 DMAC");
  2497. MODULE_LICENSE("GPL");