amba-pl022.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871
  1. /*
  2. * drivers/spi/amba-pl022.c
  3. *
  4. * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
  5. *
  6. * Copyright (C) 2008-2009 ST-Ericsson AB
  7. * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
  8. *
  9. * Author: Linus Walleij <linus.walleij@stericsson.com>
  10. *
  11. * Initial version inspired by:
  12. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  13. * Initial adoption to PL022 by:
  14. * Sachin Verma <sachin.verma@st.com>
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. */
  26. /*
  27. * TODO:
  28. * - add timeout on polled transfers
  29. * - add generic DMA framework support
  30. */
  31. #include <linux/init.h>
  32. #include <linux/module.h>
  33. #include <linux/device.h>
  34. #include <linux/ioport.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/spi/spi.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/delay.h>
  40. #include <linux/clk.h>
  41. #include <linux/err.h>
  42. #include <linux/amba/bus.h>
  43. #include <linux/amba/pl022.h>
  44. #include <linux/io.h>
  45. #include <linux/slab.h>
  46. /*
  47. * This macro is used to define some register default values.
  48. * reg is masked with mask, the OR:ed with an (again masked)
  49. * val shifted sb steps to the left.
  50. */
  51. #define SSP_WRITE_BITS(reg, val, mask, sb) \
  52. ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
  53. /*
  54. * This macro is also used to define some default values.
  55. * It will just shift val by sb steps to the left and mask
  56. * the result with mask.
  57. */
  58. #define GEN_MASK_BITS(val, mask, sb) \
  59. (((val)<<(sb)) & (mask))
  60. #define DRIVE_TX 0
  61. #define DO_NOT_DRIVE_TX 1
  62. #define DO_NOT_QUEUE_DMA 0
  63. #define QUEUE_DMA 1
  64. #define RX_TRANSFER 1
  65. #define TX_TRANSFER 2
  66. /*
  67. * Macros to access SSP Registers with their offsets
  68. */
  69. #define SSP_CR0(r) (r + 0x000)
  70. #define SSP_CR1(r) (r + 0x004)
  71. #define SSP_DR(r) (r + 0x008)
  72. #define SSP_SR(r) (r + 0x00C)
  73. #define SSP_CPSR(r) (r + 0x010)
  74. #define SSP_IMSC(r) (r + 0x014)
  75. #define SSP_RIS(r) (r + 0x018)
  76. #define SSP_MIS(r) (r + 0x01C)
  77. #define SSP_ICR(r) (r + 0x020)
  78. #define SSP_DMACR(r) (r + 0x024)
  79. #define SSP_ITCR(r) (r + 0x080)
  80. #define SSP_ITIP(r) (r + 0x084)
  81. #define SSP_ITOP(r) (r + 0x088)
  82. #define SSP_TDR(r) (r + 0x08C)
  83. #define SSP_PID0(r) (r + 0xFE0)
  84. #define SSP_PID1(r) (r + 0xFE4)
  85. #define SSP_PID2(r) (r + 0xFE8)
  86. #define SSP_PID3(r) (r + 0xFEC)
  87. #define SSP_CID0(r) (r + 0xFF0)
  88. #define SSP_CID1(r) (r + 0xFF4)
  89. #define SSP_CID2(r) (r + 0xFF8)
  90. #define SSP_CID3(r) (r + 0xFFC)
  91. /*
  92. * SSP Control Register 0 - SSP_CR0
  93. */
  94. #define SSP_CR0_MASK_DSS (0x1FUL << 0)
  95. #define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
  96. #define SSP_CR0_MASK_SPO (0x1UL << 6)
  97. #define SSP_CR0_MASK_SPH (0x1UL << 7)
  98. #define SSP_CR0_MASK_SCR (0xFFUL << 8)
  99. #define SSP_CR0_MASK_CSS (0x1FUL << 16)
  100. #define SSP_CR0_MASK_FRF (0x3UL << 21)
  101. /*
  102. * SSP Control Register 0 - SSP_CR1
  103. */
  104. #define SSP_CR1_MASK_LBM (0x1UL << 0)
  105. #define SSP_CR1_MASK_SSE (0x1UL << 1)
  106. #define SSP_CR1_MASK_MS (0x1UL << 2)
  107. #define SSP_CR1_MASK_SOD (0x1UL << 3)
  108. #define SSP_CR1_MASK_RENDN (0x1UL << 4)
  109. #define SSP_CR1_MASK_TENDN (0x1UL << 5)
  110. #define SSP_CR1_MASK_MWAIT (0x1UL << 6)
  111. #define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
  112. #define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
  113. /*
  114. * SSP Data Register - SSP_DR
  115. */
  116. #define SSP_DR_MASK_DATA 0xFFFFFFFF
  117. /*
  118. * SSP Status Register - SSP_SR
  119. */
  120. #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
  121. #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
  122. #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
  123. #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
  124. #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
  125. /*
  126. * SSP Clock Prescale Register - SSP_CPSR
  127. */
  128. #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
  129. /*
  130. * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
  131. */
  132. #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
  133. #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
  134. #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
  135. #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
  136. /*
  137. * SSP Raw Interrupt Status Register - SSP_RIS
  138. */
  139. /* Receive Overrun Raw Interrupt status */
  140. #define SSP_RIS_MASK_RORRIS (0x1UL << 0)
  141. /* Receive Timeout Raw Interrupt status */
  142. #define SSP_RIS_MASK_RTRIS (0x1UL << 1)
  143. /* Receive FIFO Raw Interrupt status */
  144. #define SSP_RIS_MASK_RXRIS (0x1UL << 2)
  145. /* Transmit FIFO Raw Interrupt status */
  146. #define SSP_RIS_MASK_TXRIS (0x1UL << 3)
  147. /*
  148. * SSP Masked Interrupt Status Register - SSP_MIS
  149. */
  150. /* Receive Overrun Masked Interrupt status */
  151. #define SSP_MIS_MASK_RORMIS (0x1UL << 0)
  152. /* Receive Timeout Masked Interrupt status */
  153. #define SSP_MIS_MASK_RTMIS (0x1UL << 1)
  154. /* Receive FIFO Masked Interrupt status */
  155. #define SSP_MIS_MASK_RXMIS (0x1UL << 2)
  156. /* Transmit FIFO Masked Interrupt status */
  157. #define SSP_MIS_MASK_TXMIS (0x1UL << 3)
  158. /*
  159. * SSP Interrupt Clear Register - SSP_ICR
  160. */
  161. /* Receive Overrun Raw Clear Interrupt bit */
  162. #define SSP_ICR_MASK_RORIC (0x1UL << 0)
  163. /* Receive Timeout Clear Interrupt bit */
  164. #define SSP_ICR_MASK_RTIC (0x1UL << 1)
  165. /*
  166. * SSP DMA Control Register - SSP_DMACR
  167. */
  168. /* Receive DMA Enable bit */
  169. #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
  170. /* Transmit DMA Enable bit */
  171. #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
  172. /*
  173. * SSP Integration Test control Register - SSP_ITCR
  174. */
  175. #define SSP_ITCR_MASK_ITEN (0x1UL << 0)
  176. #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
  177. /*
  178. * SSP Integration Test Input Register - SSP_ITIP
  179. */
  180. #define ITIP_MASK_SSPRXD (0x1UL << 0)
  181. #define ITIP_MASK_SSPFSSIN (0x1UL << 1)
  182. #define ITIP_MASK_SSPCLKIN (0x1UL << 2)
  183. #define ITIP_MASK_RXDMAC (0x1UL << 3)
  184. #define ITIP_MASK_TXDMAC (0x1UL << 4)
  185. #define ITIP_MASK_SSPTXDIN (0x1UL << 5)
  186. /*
  187. * SSP Integration Test output Register - SSP_ITOP
  188. */
  189. #define ITOP_MASK_SSPTXD (0x1UL << 0)
  190. #define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
  191. #define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
  192. #define ITOP_MASK_SSPOEn (0x1UL << 3)
  193. #define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
  194. #define ITOP_MASK_RORINTR (0x1UL << 5)
  195. #define ITOP_MASK_RTINTR (0x1UL << 6)
  196. #define ITOP_MASK_RXINTR (0x1UL << 7)
  197. #define ITOP_MASK_TXINTR (0x1UL << 8)
  198. #define ITOP_MASK_INTR (0x1UL << 9)
  199. #define ITOP_MASK_RXDMABREQ (0x1UL << 10)
  200. #define ITOP_MASK_RXDMASREQ (0x1UL << 11)
  201. #define ITOP_MASK_TXDMABREQ (0x1UL << 12)
  202. #define ITOP_MASK_TXDMASREQ (0x1UL << 13)
  203. /*
  204. * SSP Test Data Register - SSP_TDR
  205. */
  206. #define TDR_MASK_TESTDATA (0xFFFFFFFF)
  207. /*
  208. * Message State
  209. * we use the spi_message.state (void *) pointer to
  210. * hold a single state value, that's why all this
  211. * (void *) casting is done here.
  212. */
  213. #define STATE_START ((void *) 0)
  214. #define STATE_RUNNING ((void *) 1)
  215. #define STATE_DONE ((void *) 2)
  216. #define STATE_ERROR ((void *) -1)
  217. /*
  218. * Queue State
  219. */
  220. #define QUEUE_RUNNING (0)
  221. #define QUEUE_STOPPED (1)
  222. /*
  223. * SSP State - Whether Enabled or Disabled
  224. */
  225. #define SSP_DISABLED (0)
  226. #define SSP_ENABLED (1)
  227. /*
  228. * SSP DMA State - Whether DMA Enabled or Disabled
  229. */
  230. #define SSP_DMA_DISABLED (0)
  231. #define SSP_DMA_ENABLED (1)
  232. /*
  233. * SSP Clock Defaults
  234. */
  235. #define NMDK_SSP_DEFAULT_CLKRATE 0x2
  236. #define NMDK_SSP_DEFAULT_PRESCALE 0x40
  237. /*
  238. * SSP Clock Parameter ranges
  239. */
  240. #define CPSDVR_MIN 0x02
  241. #define CPSDVR_MAX 0xFE
  242. #define SCR_MIN 0x00
  243. #define SCR_MAX 0xFF
  244. /*
  245. * SSP Interrupt related Macros
  246. */
  247. #define DEFAULT_SSP_REG_IMSC 0x0UL
  248. #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
  249. #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
  250. #define CLEAR_ALL_INTERRUPTS 0x3
  251. /*
  252. * The type of reading going on on this chip
  253. */
  254. enum ssp_reading {
  255. READING_NULL,
  256. READING_U8,
  257. READING_U16,
  258. READING_U32
  259. };
  260. /**
  261. * The type of writing going on on this chip
  262. */
  263. enum ssp_writing {
  264. WRITING_NULL,
  265. WRITING_U8,
  266. WRITING_U16,
  267. WRITING_U32
  268. };
  269. /**
  270. * struct vendor_data - vendor-specific config parameters
  271. * for PL022 derivates
  272. * @fifodepth: depth of FIFOs (both)
  273. * @max_bpw: maximum number of bits per word
  274. * @unidir: supports unidirection transfers
  275. */
  276. struct vendor_data {
  277. int fifodepth;
  278. int max_bpw;
  279. bool unidir;
  280. };
  281. /**
  282. * struct pl022 - This is the private SSP driver data structure
  283. * @adev: AMBA device model hookup
  284. * @phybase: The physical memory where the SSP device resides
  285. * @virtbase: The virtual memory where the SSP is mapped
  286. * @master: SPI framework hookup
  287. * @master_info: controller-specific data from machine setup
  288. * @regs: SSP controller register's virtual address
  289. * @pump_messages: Work struct for scheduling work to the workqueue
  290. * @lock: spinlock to syncronise access to driver data
  291. * @workqueue: a workqueue on which any spi_message request is queued
  292. * @busy: workqueue is busy
  293. * @run: workqueue is running
  294. * @pump_transfers: Tasklet used in Interrupt Transfer mode
  295. * @cur_msg: Pointer to current spi_message being processed
  296. * @cur_transfer: Pointer to current spi_transfer
  297. * @cur_chip: pointer to current clients chip(assigned from controller_state)
  298. * @tx: current position in TX buffer to be read
  299. * @tx_end: end position in TX buffer to be read
  300. * @rx: current position in RX buffer to be written
  301. * @rx_end: end position in RX buffer to be written
  302. * @readingtype: the type of read currently going on
  303. * @writingtype: the type or write currently going on
  304. */
  305. struct pl022 {
  306. struct amba_device *adev;
  307. struct vendor_data *vendor;
  308. resource_size_t phybase;
  309. void __iomem *virtbase;
  310. struct clk *clk;
  311. struct spi_master *master;
  312. struct pl022_ssp_controller *master_info;
  313. /* Driver message queue */
  314. struct workqueue_struct *workqueue;
  315. struct work_struct pump_messages;
  316. spinlock_t queue_lock;
  317. struct list_head queue;
  318. int busy;
  319. int run;
  320. /* Message transfer pump */
  321. struct tasklet_struct pump_transfers;
  322. struct spi_message *cur_msg;
  323. struct spi_transfer *cur_transfer;
  324. struct chip_data *cur_chip;
  325. void *tx;
  326. void *tx_end;
  327. void *rx;
  328. void *rx_end;
  329. enum ssp_reading read;
  330. enum ssp_writing write;
  331. u32 exp_fifo_level;
  332. };
  333. /**
  334. * struct chip_data - To maintain runtime state of SSP for each client chip
  335. * @cr0: Value of control register CR0 of SSP
  336. * @cr1: Value of control register CR1 of SSP
  337. * @dmacr: Value of DMA control Register of SSP
  338. * @cpsr: Value of Clock prescale register
  339. * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
  340. * @enable_dma: Whether to enable DMA or not
  341. * @write: function ptr to be used to write when doing xfer for this chip
  342. * @read: function ptr to be used to read when doing xfer for this chip
  343. * @cs_control: chip select callback provided by chip
  344. * @xfer_type: polling/interrupt/DMA
  345. *
  346. * Runtime state of the SSP controller, maintained per chip,
  347. * This would be set according to the current message that would be served
  348. */
  349. struct chip_data {
  350. u16 cr0;
  351. u16 cr1;
  352. u16 dmacr;
  353. u16 cpsr;
  354. u8 n_bytes;
  355. u8 enable_dma:1;
  356. enum ssp_reading read;
  357. enum ssp_writing write;
  358. void (*cs_control) (u32 command);
  359. int xfer_type;
  360. };
  361. /**
  362. * null_cs_control - Dummy chip select function
  363. * @command: select/delect the chip
  364. *
  365. * If no chip select function is provided by client this is used as dummy
  366. * chip select
  367. */
  368. static void null_cs_control(u32 command)
  369. {
  370. pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
  371. }
  372. /**
  373. * giveback - current spi_message is over, schedule next message and call
  374. * callback of this message. Assumes that caller already
  375. * set message->status; dma and pio irqs are blocked
  376. * @pl022: SSP driver private data structure
  377. */
  378. static void giveback(struct pl022 *pl022)
  379. {
  380. struct spi_transfer *last_transfer;
  381. unsigned long flags;
  382. struct spi_message *msg;
  383. void (*curr_cs_control) (u32 command);
  384. /*
  385. * This local reference to the chip select function
  386. * is needed because we set curr_chip to NULL
  387. * as a step toward termininating the message.
  388. */
  389. curr_cs_control = pl022->cur_chip->cs_control;
  390. spin_lock_irqsave(&pl022->queue_lock, flags);
  391. msg = pl022->cur_msg;
  392. pl022->cur_msg = NULL;
  393. pl022->cur_transfer = NULL;
  394. pl022->cur_chip = NULL;
  395. queue_work(pl022->workqueue, &pl022->pump_messages);
  396. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  397. last_transfer = list_entry(msg->transfers.prev,
  398. struct spi_transfer,
  399. transfer_list);
  400. /* Delay if requested before any change in chip select */
  401. if (last_transfer->delay_usecs)
  402. /*
  403. * FIXME: This runs in interrupt context.
  404. * Is this really smart?
  405. */
  406. udelay(last_transfer->delay_usecs);
  407. /*
  408. * Drop chip select UNLESS cs_change is true or we are returning
  409. * a message with an error, or next message is for another chip
  410. */
  411. if (!last_transfer->cs_change)
  412. curr_cs_control(SSP_CHIP_DESELECT);
  413. else {
  414. struct spi_message *next_msg;
  415. /* Holding of cs was hinted, but we need to make sure
  416. * the next message is for the same chip. Don't waste
  417. * time with the following tests unless this was hinted.
  418. *
  419. * We cannot postpone this until pump_messages, because
  420. * after calling msg->complete (below) the driver that
  421. * sent the current message could be unloaded, which
  422. * could invalidate the cs_control() callback...
  423. */
  424. /* get a pointer to the next message, if any */
  425. spin_lock_irqsave(&pl022->queue_lock, flags);
  426. if (list_empty(&pl022->queue))
  427. next_msg = NULL;
  428. else
  429. next_msg = list_entry(pl022->queue.next,
  430. struct spi_message, queue);
  431. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  432. /* see if the next and current messages point
  433. * to the same chip
  434. */
  435. if (next_msg && next_msg->spi != msg->spi)
  436. next_msg = NULL;
  437. if (!next_msg || msg->state == STATE_ERROR)
  438. curr_cs_control(SSP_CHIP_DESELECT);
  439. }
  440. msg->state = NULL;
  441. if (msg->complete)
  442. msg->complete(msg->context);
  443. /* This message is completed, so let's turn off the clock! */
  444. clk_disable(pl022->clk);
  445. }
  446. /**
  447. * flush - flush the FIFO to reach a clean state
  448. * @pl022: SSP driver private data structure
  449. */
  450. static int flush(struct pl022 *pl022)
  451. {
  452. unsigned long limit = loops_per_jiffy << 1;
  453. dev_dbg(&pl022->adev->dev, "flush\n");
  454. do {
  455. while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  456. readw(SSP_DR(pl022->virtbase));
  457. } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
  458. pl022->exp_fifo_level = 0;
  459. return limit;
  460. }
  461. /**
  462. * restore_state - Load configuration of current chip
  463. * @pl022: SSP driver private data structure
  464. */
  465. static void restore_state(struct pl022 *pl022)
  466. {
  467. struct chip_data *chip = pl022->cur_chip;
  468. writew(chip->cr0, SSP_CR0(pl022->virtbase));
  469. writew(chip->cr1, SSP_CR1(pl022->virtbase));
  470. writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
  471. writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
  472. writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  473. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  474. }
  475. /**
  476. * load_ssp_default_config - Load default configuration for SSP
  477. * @pl022: SSP driver private data structure
  478. */
  479. /*
  480. * Default SSP Register Values
  481. */
  482. #define DEFAULT_SSP_REG_CR0 ( \
  483. GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
  484. GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
  485. GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
  486. GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
  487. GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
  488. GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
  489. GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
  490. )
  491. #define DEFAULT_SSP_REG_CR1 ( \
  492. GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
  493. GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
  494. GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
  495. GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
  496. GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
  497. GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
  498. GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
  499. GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
  500. GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
  501. )
  502. #define DEFAULT_SSP_REG_CPSR ( \
  503. GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
  504. )
  505. #define DEFAULT_SSP_REG_DMACR (\
  506. GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
  507. GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
  508. )
  509. static void load_ssp_default_config(struct pl022 *pl022)
  510. {
  511. writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
  512. writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
  513. writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
  514. writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
  515. writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  516. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  517. }
  518. /**
  519. * This will write to TX and read from RX according to the parameters
  520. * set in pl022.
  521. */
  522. static void readwriter(struct pl022 *pl022)
  523. {
  524. /*
  525. * The FIFO depth is different inbetween primecell variants.
  526. * I believe filling in too much in the FIFO might cause
  527. * errons in 8bit wide transfers on ARM variants (just 8 words
  528. * FIFO, means only 8x8 = 64 bits in FIFO) at least.
  529. *
  530. * To prevent this issue, the TX FIFO is only filled to the
  531. * unused RX FIFO fill length, regardless of what the TX
  532. * FIFO status flag indicates.
  533. */
  534. dev_dbg(&pl022->adev->dev,
  535. "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
  536. __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
  537. /* Read as much as you can */
  538. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  539. && (pl022->rx < pl022->rx_end)) {
  540. switch (pl022->read) {
  541. case READING_NULL:
  542. readw(SSP_DR(pl022->virtbase));
  543. break;
  544. case READING_U8:
  545. *(u8 *) (pl022->rx) =
  546. readw(SSP_DR(pl022->virtbase)) & 0xFFU;
  547. break;
  548. case READING_U16:
  549. *(u16 *) (pl022->rx) =
  550. (u16) readw(SSP_DR(pl022->virtbase));
  551. break;
  552. case READING_U32:
  553. *(u32 *) (pl022->rx) =
  554. readl(SSP_DR(pl022->virtbase));
  555. break;
  556. }
  557. pl022->rx += (pl022->cur_chip->n_bytes);
  558. pl022->exp_fifo_level--;
  559. }
  560. /*
  561. * Write as much as possible up to the RX FIFO size
  562. */
  563. while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
  564. && (pl022->tx < pl022->tx_end)) {
  565. switch (pl022->write) {
  566. case WRITING_NULL:
  567. writew(0x0, SSP_DR(pl022->virtbase));
  568. break;
  569. case WRITING_U8:
  570. writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
  571. break;
  572. case WRITING_U16:
  573. writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
  574. break;
  575. case WRITING_U32:
  576. writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
  577. break;
  578. }
  579. pl022->tx += (pl022->cur_chip->n_bytes);
  580. pl022->exp_fifo_level++;
  581. /*
  582. * This inner reader takes care of things appearing in the RX
  583. * FIFO as we're transmitting. This will happen a lot since the
  584. * clock starts running when you put things into the TX FIFO,
  585. * and then things are continously clocked into the RX FIFO.
  586. */
  587. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  588. && (pl022->rx < pl022->rx_end)) {
  589. switch (pl022->read) {
  590. case READING_NULL:
  591. readw(SSP_DR(pl022->virtbase));
  592. break;
  593. case READING_U8:
  594. *(u8 *) (pl022->rx) =
  595. readw(SSP_DR(pl022->virtbase)) & 0xFFU;
  596. break;
  597. case READING_U16:
  598. *(u16 *) (pl022->rx) =
  599. (u16) readw(SSP_DR(pl022->virtbase));
  600. break;
  601. case READING_U32:
  602. *(u32 *) (pl022->rx) =
  603. readl(SSP_DR(pl022->virtbase));
  604. break;
  605. }
  606. pl022->rx += (pl022->cur_chip->n_bytes);
  607. pl022->exp_fifo_level--;
  608. }
  609. }
  610. /*
  611. * When we exit here the TX FIFO should be full and the RX FIFO
  612. * should be empty
  613. */
  614. }
  615. /**
  616. * next_transfer - Move to the Next transfer in the current spi message
  617. * @pl022: SSP driver private data structure
  618. *
  619. * This function moves though the linked list of spi transfers in the
  620. * current spi message and returns with the state of current spi
  621. * message i.e whether its last transfer is done(STATE_DONE) or
  622. * Next transfer is ready(STATE_RUNNING)
  623. */
  624. static void *next_transfer(struct pl022 *pl022)
  625. {
  626. struct spi_message *msg = pl022->cur_msg;
  627. struct spi_transfer *trans = pl022->cur_transfer;
  628. /* Move to next transfer */
  629. if (trans->transfer_list.next != &msg->transfers) {
  630. pl022->cur_transfer =
  631. list_entry(trans->transfer_list.next,
  632. struct spi_transfer, transfer_list);
  633. return STATE_RUNNING;
  634. }
  635. return STATE_DONE;
  636. }
  637. /**
  638. * pl022_interrupt_handler - Interrupt handler for SSP controller
  639. *
  640. * This function handles interrupts generated for an interrupt based transfer.
  641. * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
  642. * current message's state as STATE_ERROR and schedule the tasklet
  643. * pump_transfers which will do the postprocessing of the current message by
  644. * calling giveback(). Otherwise it reads data from RX FIFO till there is no
  645. * more data, and writes data in TX FIFO till it is not full. If we complete
  646. * the transfer we move to the next transfer and schedule the tasklet.
  647. */
  648. static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
  649. {
  650. struct pl022 *pl022 = dev_id;
  651. struct spi_message *msg = pl022->cur_msg;
  652. u16 irq_status = 0;
  653. u16 flag = 0;
  654. if (unlikely(!msg)) {
  655. dev_err(&pl022->adev->dev,
  656. "bad message state in interrupt handler");
  657. /* Never fail */
  658. return IRQ_HANDLED;
  659. }
  660. /* Read the Interrupt Status Register */
  661. irq_status = readw(SSP_MIS(pl022->virtbase));
  662. if (unlikely(!irq_status))
  663. return IRQ_NONE;
  664. /* This handles the error code interrupts */
  665. if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
  666. /*
  667. * Overrun interrupt - bail out since our Data has been
  668. * corrupted
  669. */
  670. dev_err(&pl022->adev->dev,
  671. "FIFO overrun\n");
  672. if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
  673. dev_err(&pl022->adev->dev,
  674. "RXFIFO is full\n");
  675. if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
  676. dev_err(&pl022->adev->dev,
  677. "TXFIFO is full\n");
  678. /*
  679. * Disable and clear interrupts, disable SSP,
  680. * mark message with bad status so it can be
  681. * retried.
  682. */
  683. writew(DISABLE_ALL_INTERRUPTS,
  684. SSP_IMSC(pl022->virtbase));
  685. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  686. writew((readw(SSP_CR1(pl022->virtbase)) &
  687. (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
  688. msg->state = STATE_ERROR;
  689. /* Schedule message queue handler */
  690. tasklet_schedule(&pl022->pump_transfers);
  691. return IRQ_HANDLED;
  692. }
  693. readwriter(pl022);
  694. if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
  695. flag = 1;
  696. /* Disable Transmit interrupt */
  697. writew(readw(SSP_IMSC(pl022->virtbase)) &
  698. (~SSP_IMSC_MASK_TXIM),
  699. SSP_IMSC(pl022->virtbase));
  700. }
  701. /*
  702. * Since all transactions must write as much as shall be read,
  703. * we can conclude the entire transaction once RX is complete.
  704. * At this point, all TX will always be finished.
  705. */
  706. if (pl022->rx >= pl022->rx_end) {
  707. writew(DISABLE_ALL_INTERRUPTS,
  708. SSP_IMSC(pl022->virtbase));
  709. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  710. if (unlikely(pl022->rx > pl022->rx_end)) {
  711. dev_warn(&pl022->adev->dev, "read %u surplus "
  712. "bytes (did you request an odd "
  713. "number of bytes on a 16bit bus?)\n",
  714. (u32) (pl022->rx - pl022->rx_end));
  715. }
  716. /* Update total bytes transfered */
  717. msg->actual_length += pl022->cur_transfer->len;
  718. if (pl022->cur_transfer->cs_change)
  719. pl022->cur_chip->
  720. cs_control(SSP_CHIP_DESELECT);
  721. /* Move to next transfer */
  722. msg->state = next_transfer(pl022);
  723. tasklet_schedule(&pl022->pump_transfers);
  724. return IRQ_HANDLED;
  725. }
  726. return IRQ_HANDLED;
  727. }
  728. /**
  729. * This sets up the pointers to memory for the next message to
  730. * send out on the SPI bus.
  731. */
  732. static int set_up_next_transfer(struct pl022 *pl022,
  733. struct spi_transfer *transfer)
  734. {
  735. int residue;
  736. /* Sanity check the message for this bus width */
  737. residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
  738. if (unlikely(residue != 0)) {
  739. dev_err(&pl022->adev->dev,
  740. "message of %u bytes to transmit but the current "
  741. "chip bus has a data width of %u bytes!\n",
  742. pl022->cur_transfer->len,
  743. pl022->cur_chip->n_bytes);
  744. dev_err(&pl022->adev->dev, "skipping this message\n");
  745. return -EIO;
  746. }
  747. pl022->tx = (void *)transfer->tx_buf;
  748. pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
  749. pl022->rx = (void *)transfer->rx_buf;
  750. pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
  751. pl022->write =
  752. pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
  753. pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
  754. return 0;
  755. }
  756. /**
  757. * pump_transfers - Tasklet function which schedules next interrupt transfer
  758. * when running in interrupt transfer mode.
  759. * @data: SSP driver private data structure
  760. *
  761. */
  762. static void pump_transfers(unsigned long data)
  763. {
  764. struct pl022 *pl022 = (struct pl022 *) data;
  765. struct spi_message *message = NULL;
  766. struct spi_transfer *transfer = NULL;
  767. struct spi_transfer *previous = NULL;
  768. /* Get current state information */
  769. message = pl022->cur_msg;
  770. transfer = pl022->cur_transfer;
  771. /* Handle for abort */
  772. if (message->state == STATE_ERROR) {
  773. message->status = -EIO;
  774. giveback(pl022);
  775. return;
  776. }
  777. /* Handle end of message */
  778. if (message->state == STATE_DONE) {
  779. message->status = 0;
  780. giveback(pl022);
  781. return;
  782. }
  783. /* Delay if requested at end of transfer before CS change */
  784. if (message->state == STATE_RUNNING) {
  785. previous = list_entry(transfer->transfer_list.prev,
  786. struct spi_transfer,
  787. transfer_list);
  788. if (previous->delay_usecs)
  789. /*
  790. * FIXME: This runs in interrupt context.
  791. * Is this really smart?
  792. */
  793. udelay(previous->delay_usecs);
  794. /* Drop chip select only if cs_change is requested */
  795. if (previous->cs_change)
  796. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  797. } else {
  798. /* STATE_START */
  799. message->state = STATE_RUNNING;
  800. }
  801. if (set_up_next_transfer(pl022, transfer)) {
  802. message->state = STATE_ERROR;
  803. message->status = -EIO;
  804. giveback(pl022);
  805. return;
  806. }
  807. /* Flush the FIFOs and let's go! */
  808. flush(pl022);
  809. writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  810. }
  811. /**
  812. * NOT IMPLEMENTED
  813. * configure_dma - It configures the DMA pipes for DMA transfers
  814. * @data: SSP driver's private data structure
  815. *
  816. */
  817. static int configure_dma(void *data)
  818. {
  819. struct pl022 *pl022 = data;
  820. dev_dbg(&pl022->adev->dev, "configure DMA\n");
  821. return -ENOTSUPP;
  822. }
  823. /**
  824. * do_dma_transfer - It handles transfers of the current message
  825. * if it is DMA xfer.
  826. * NOT FULLY IMPLEMENTED
  827. * @data: SSP driver's private data structure
  828. */
  829. static void do_dma_transfer(void *data)
  830. {
  831. struct pl022 *pl022 = data;
  832. if (configure_dma(data)) {
  833. dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
  834. goto err_config_dma;
  835. }
  836. /* TODO: Implememt DMA setup of pipes here */
  837. /* Enable target chip, set up transfer */
  838. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  839. if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
  840. /* Error path */
  841. pl022->cur_msg->state = STATE_ERROR;
  842. pl022->cur_msg->status = -EIO;
  843. giveback(pl022);
  844. return;
  845. }
  846. /* Enable SSP */
  847. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  848. SSP_CR1(pl022->virtbase));
  849. /* TODO: Enable the DMA transfer here */
  850. return;
  851. err_config_dma:
  852. pl022->cur_msg->state = STATE_ERROR;
  853. pl022->cur_msg->status = -EIO;
  854. giveback(pl022);
  855. return;
  856. }
  857. static void do_interrupt_transfer(void *data)
  858. {
  859. struct pl022 *pl022 = data;
  860. /* Enable target chip */
  861. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  862. if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
  863. /* Error path */
  864. pl022->cur_msg->state = STATE_ERROR;
  865. pl022->cur_msg->status = -EIO;
  866. giveback(pl022);
  867. return;
  868. }
  869. /* Enable SSP, turn on interrupts */
  870. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  871. SSP_CR1(pl022->virtbase));
  872. writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  873. }
  874. static void do_polling_transfer(void *data)
  875. {
  876. struct pl022 *pl022 = data;
  877. struct spi_message *message = NULL;
  878. struct spi_transfer *transfer = NULL;
  879. struct spi_transfer *previous = NULL;
  880. struct chip_data *chip;
  881. chip = pl022->cur_chip;
  882. message = pl022->cur_msg;
  883. while (message->state != STATE_DONE) {
  884. /* Handle for abort */
  885. if (message->state == STATE_ERROR)
  886. break;
  887. transfer = pl022->cur_transfer;
  888. /* Delay if requested at end of transfer */
  889. if (message->state == STATE_RUNNING) {
  890. previous =
  891. list_entry(transfer->transfer_list.prev,
  892. struct spi_transfer, transfer_list);
  893. if (previous->delay_usecs)
  894. udelay(previous->delay_usecs);
  895. if (previous->cs_change)
  896. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  897. } else {
  898. /* STATE_START */
  899. message->state = STATE_RUNNING;
  900. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  901. }
  902. /* Configuration Changing Per Transfer */
  903. if (set_up_next_transfer(pl022, transfer)) {
  904. /* Error path */
  905. message->state = STATE_ERROR;
  906. break;
  907. }
  908. /* Flush FIFOs and enable SSP */
  909. flush(pl022);
  910. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  911. SSP_CR1(pl022->virtbase));
  912. dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
  913. /* FIXME: insert a timeout so we don't hang here indefinately */
  914. while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
  915. readwriter(pl022);
  916. /* Update total byte transfered */
  917. message->actual_length += pl022->cur_transfer->len;
  918. if (pl022->cur_transfer->cs_change)
  919. pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
  920. /* Move to next transfer */
  921. message->state = next_transfer(pl022);
  922. }
  923. /* Handle end of message */
  924. if (message->state == STATE_DONE)
  925. message->status = 0;
  926. else
  927. message->status = -EIO;
  928. giveback(pl022);
  929. return;
  930. }
  931. /**
  932. * pump_messages - Workqueue function which processes spi message queue
  933. * @data: pointer to private data of SSP driver
  934. *
  935. * This function checks if there is any spi message in the queue that
  936. * needs processing and delegate control to appropriate function
  937. * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
  938. * based on the kind of the transfer
  939. *
  940. */
  941. static void pump_messages(struct work_struct *work)
  942. {
  943. struct pl022 *pl022 =
  944. container_of(work, struct pl022, pump_messages);
  945. unsigned long flags;
  946. /* Lock queue and check for queue work */
  947. spin_lock_irqsave(&pl022->queue_lock, flags);
  948. if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
  949. pl022->busy = 0;
  950. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  951. return;
  952. }
  953. /* Make sure we are not already running a message */
  954. if (pl022->cur_msg) {
  955. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  956. return;
  957. }
  958. /* Extract head of queue */
  959. pl022->cur_msg =
  960. list_entry(pl022->queue.next, struct spi_message, queue);
  961. list_del_init(&pl022->cur_msg->queue);
  962. pl022->busy = 1;
  963. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  964. /* Initial message state */
  965. pl022->cur_msg->state = STATE_START;
  966. pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
  967. struct spi_transfer,
  968. transfer_list);
  969. /* Setup the SPI using the per chip configuration */
  970. pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
  971. /*
  972. * We enable the clock here, then the clock will be disabled when
  973. * giveback() is called in each method (poll/interrupt/DMA)
  974. */
  975. clk_enable(pl022->clk);
  976. restore_state(pl022);
  977. flush(pl022);
  978. if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
  979. do_polling_transfer(pl022);
  980. else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
  981. do_interrupt_transfer(pl022);
  982. else
  983. do_dma_transfer(pl022);
  984. }
  985. static int __init init_queue(struct pl022 *pl022)
  986. {
  987. INIT_LIST_HEAD(&pl022->queue);
  988. spin_lock_init(&pl022->queue_lock);
  989. pl022->run = QUEUE_STOPPED;
  990. pl022->busy = 0;
  991. tasklet_init(&pl022->pump_transfers,
  992. pump_transfers, (unsigned long)pl022);
  993. INIT_WORK(&pl022->pump_messages, pump_messages);
  994. pl022->workqueue = create_singlethread_workqueue(
  995. dev_name(pl022->master->dev.parent));
  996. if (pl022->workqueue == NULL)
  997. return -EBUSY;
  998. return 0;
  999. }
  1000. static int start_queue(struct pl022 *pl022)
  1001. {
  1002. unsigned long flags;
  1003. spin_lock_irqsave(&pl022->queue_lock, flags);
  1004. if (pl022->run == QUEUE_RUNNING || pl022->busy) {
  1005. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1006. return -EBUSY;
  1007. }
  1008. pl022->run = QUEUE_RUNNING;
  1009. pl022->cur_msg = NULL;
  1010. pl022->cur_transfer = NULL;
  1011. pl022->cur_chip = NULL;
  1012. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1013. queue_work(pl022->workqueue, &pl022->pump_messages);
  1014. return 0;
  1015. }
  1016. static int stop_queue(struct pl022 *pl022)
  1017. {
  1018. unsigned long flags;
  1019. unsigned limit = 500;
  1020. int status = 0;
  1021. spin_lock_irqsave(&pl022->queue_lock, flags);
  1022. /* This is a bit lame, but is optimized for the common execution path.
  1023. * A wait_queue on the pl022->busy could be used, but then the common
  1024. * execution path (pump_messages) would be required to call wake_up or
  1025. * friends on every SPI message. Do this instead */
  1026. pl022->run = QUEUE_STOPPED;
  1027. while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
  1028. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1029. msleep(10);
  1030. spin_lock_irqsave(&pl022->queue_lock, flags);
  1031. }
  1032. if (!list_empty(&pl022->queue) || pl022->busy)
  1033. status = -EBUSY;
  1034. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1035. return status;
  1036. }
  1037. static int destroy_queue(struct pl022 *pl022)
  1038. {
  1039. int status;
  1040. status = stop_queue(pl022);
  1041. /* we are unloading the module or failing to load (only two calls
  1042. * to this routine), and neither call can handle a return value.
  1043. * However, destroy_workqueue calls flush_workqueue, and that will
  1044. * block until all work is done. If the reason that stop_queue
  1045. * timed out is that the work will never finish, then it does no
  1046. * good to call destroy_workqueue, so return anyway. */
  1047. if (status != 0)
  1048. return status;
  1049. destroy_workqueue(pl022->workqueue);
  1050. return 0;
  1051. }
  1052. static int verify_controller_parameters(struct pl022 *pl022,
  1053. struct pl022_config_chip *chip_info)
  1054. {
  1055. if ((chip_info->lbm != LOOPBACK_ENABLED)
  1056. && (chip_info->lbm != LOOPBACK_DISABLED)) {
  1057. dev_err(chip_info->dev,
  1058. "loopback Mode is configured incorrectly\n");
  1059. return -EINVAL;
  1060. }
  1061. if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
  1062. || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
  1063. dev_err(chip_info->dev,
  1064. "interface is configured incorrectly\n");
  1065. return -EINVAL;
  1066. }
  1067. if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
  1068. (!pl022->vendor->unidir)) {
  1069. dev_err(chip_info->dev,
  1070. "unidirectional mode not supported in this "
  1071. "hardware version\n");
  1072. return -EINVAL;
  1073. }
  1074. if ((chip_info->hierarchy != SSP_MASTER)
  1075. && (chip_info->hierarchy != SSP_SLAVE)) {
  1076. dev_err(chip_info->dev,
  1077. "hierarchy is configured incorrectly\n");
  1078. return -EINVAL;
  1079. }
  1080. if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
  1081. || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
  1082. dev_err(chip_info->dev,
  1083. "cpsdvsr is configured incorrectly\n");
  1084. return -EINVAL;
  1085. }
  1086. if ((chip_info->endian_rx != SSP_RX_MSB)
  1087. && (chip_info->endian_rx != SSP_RX_LSB)) {
  1088. dev_err(chip_info->dev,
  1089. "RX FIFO endianess is configured incorrectly\n");
  1090. return -EINVAL;
  1091. }
  1092. if ((chip_info->endian_tx != SSP_TX_MSB)
  1093. && (chip_info->endian_tx != SSP_TX_LSB)) {
  1094. dev_err(chip_info->dev,
  1095. "TX FIFO endianess is configured incorrectly\n");
  1096. return -EINVAL;
  1097. }
  1098. if ((chip_info->data_size < SSP_DATA_BITS_4)
  1099. || (chip_info->data_size > SSP_DATA_BITS_32)) {
  1100. dev_err(chip_info->dev,
  1101. "DATA Size is configured incorrectly\n");
  1102. return -EINVAL;
  1103. }
  1104. if ((chip_info->com_mode != INTERRUPT_TRANSFER)
  1105. && (chip_info->com_mode != DMA_TRANSFER)
  1106. && (chip_info->com_mode != POLLING_TRANSFER)) {
  1107. dev_err(chip_info->dev,
  1108. "Communication mode is configured incorrectly\n");
  1109. return -EINVAL;
  1110. }
  1111. if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
  1112. || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
  1113. dev_err(chip_info->dev,
  1114. "RX FIFO Trigger Level is configured incorrectly\n");
  1115. return -EINVAL;
  1116. }
  1117. if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
  1118. || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
  1119. dev_err(chip_info->dev,
  1120. "TX FIFO Trigger Level is configured incorrectly\n");
  1121. return -EINVAL;
  1122. }
  1123. if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
  1124. if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
  1125. && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
  1126. dev_err(chip_info->dev,
  1127. "Clock Phase is configured incorrectly\n");
  1128. return -EINVAL;
  1129. }
  1130. if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
  1131. && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
  1132. dev_err(chip_info->dev,
  1133. "Clock Polarity is configured incorrectly\n");
  1134. return -EINVAL;
  1135. }
  1136. }
  1137. if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
  1138. if ((chip_info->ctrl_len < SSP_BITS_4)
  1139. || (chip_info->ctrl_len > SSP_BITS_32)) {
  1140. dev_err(chip_info->dev,
  1141. "CTRL LEN is configured incorrectly\n");
  1142. return -EINVAL;
  1143. }
  1144. if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
  1145. && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
  1146. dev_err(chip_info->dev,
  1147. "Wait State is configured incorrectly\n");
  1148. return -EINVAL;
  1149. }
  1150. if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
  1151. && (chip_info->duplex !=
  1152. SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
  1153. dev_err(chip_info->dev,
  1154. "DUPLEX is configured incorrectly\n");
  1155. return -EINVAL;
  1156. }
  1157. }
  1158. if (chip_info->cs_control == NULL) {
  1159. dev_warn(chip_info->dev,
  1160. "Chip Select Function is NULL for this chip\n");
  1161. chip_info->cs_control = null_cs_control;
  1162. }
  1163. return 0;
  1164. }
  1165. /**
  1166. * pl022_transfer - transfer function registered to SPI master framework
  1167. * @spi: spi device which is requesting transfer
  1168. * @msg: spi message which is to handled is queued to driver queue
  1169. *
  1170. * This function is registered to the SPI framework for this SPI master
  1171. * controller. It will queue the spi_message in the queue of driver if
  1172. * the queue is not stopped and return.
  1173. */
  1174. static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
  1175. {
  1176. struct pl022 *pl022 = spi_master_get_devdata(spi->master);
  1177. unsigned long flags;
  1178. spin_lock_irqsave(&pl022->queue_lock, flags);
  1179. if (pl022->run == QUEUE_STOPPED) {
  1180. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1181. return -ESHUTDOWN;
  1182. }
  1183. msg->actual_length = 0;
  1184. msg->status = -EINPROGRESS;
  1185. msg->state = STATE_START;
  1186. list_add_tail(&msg->queue, &pl022->queue);
  1187. if (pl022->run == QUEUE_RUNNING && !pl022->busy)
  1188. queue_work(pl022->workqueue, &pl022->pump_messages);
  1189. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1190. return 0;
  1191. }
  1192. static int calculate_effective_freq(struct pl022 *pl022,
  1193. int freq,
  1194. struct ssp_clock_params *clk_freq)
  1195. {
  1196. /* Lets calculate the frequency parameters */
  1197. u16 cpsdvsr = 2;
  1198. u16 scr = 0;
  1199. bool freq_found = false;
  1200. u32 rate;
  1201. u32 max_tclk;
  1202. u32 min_tclk;
  1203. rate = clk_get_rate(pl022->clk);
  1204. /* cpsdvscr = 2 & scr 0 */
  1205. max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
  1206. /* cpsdvsr = 254 & scr = 255 */
  1207. min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
  1208. if ((freq <= max_tclk) && (freq >= min_tclk)) {
  1209. while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
  1210. while (scr <= SCR_MAX && !freq_found) {
  1211. if ((rate /
  1212. (cpsdvsr * (1 + scr))) > freq)
  1213. scr += 1;
  1214. else {
  1215. /*
  1216. * This bool is made true when
  1217. * effective frequency >=
  1218. * target frequency is found
  1219. */
  1220. freq_found = true;
  1221. if ((rate /
  1222. (cpsdvsr * (1 + scr))) != freq) {
  1223. if (scr == SCR_MIN) {
  1224. cpsdvsr -= 2;
  1225. scr = SCR_MAX;
  1226. } else
  1227. scr -= 1;
  1228. }
  1229. }
  1230. }
  1231. if (!freq_found) {
  1232. cpsdvsr += 2;
  1233. scr = SCR_MIN;
  1234. }
  1235. }
  1236. if (cpsdvsr != 0) {
  1237. dev_dbg(&pl022->adev->dev,
  1238. "SSP Effective Frequency is %u\n",
  1239. (rate / (cpsdvsr * (1 + scr))));
  1240. clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
  1241. clk_freq->scr = (u8) (scr & 0xFF);
  1242. dev_dbg(&pl022->adev->dev,
  1243. "SSP cpsdvsr = %d, scr = %d\n",
  1244. clk_freq->cpsdvsr, clk_freq->scr);
  1245. }
  1246. } else {
  1247. dev_err(&pl022->adev->dev,
  1248. "controller data is incorrect: out of range frequency");
  1249. return -EINVAL;
  1250. }
  1251. return 0;
  1252. }
  1253. /**
  1254. * NOT IMPLEMENTED
  1255. * process_dma_info - Processes the DMA info provided by client drivers
  1256. * @chip_info: chip info provided by client device
  1257. * @chip: Runtime state maintained by the SSP controller for each spi device
  1258. *
  1259. * This function processes and stores DMA config provided by client driver
  1260. * into the runtime state maintained by the SSP controller driver
  1261. */
  1262. static int process_dma_info(struct pl022_config_chip *chip_info,
  1263. struct chip_data *chip)
  1264. {
  1265. dev_err(chip_info->dev,
  1266. "cannot process DMA info, DMA not implemented!\n");
  1267. return -ENOTSUPP;
  1268. }
  1269. /**
  1270. * pl022_setup - setup function registered to SPI master framework
  1271. * @spi: spi device which is requesting setup
  1272. *
  1273. * This function is registered to the SPI framework for this SPI master
  1274. * controller. If it is the first time when setup is called by this device,
  1275. * this function will initialize the runtime state for this chip and save
  1276. * the same in the device structure. Else it will update the runtime info
  1277. * with the updated chip info. Nothing is really being written to the
  1278. * controller hardware here, that is not done until the actual transfer
  1279. * commence.
  1280. */
  1281. /* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
  1282. #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
  1283. | SPI_LSB_FIRST | SPI_LOOP)
  1284. static int pl022_setup(struct spi_device *spi)
  1285. {
  1286. struct pl022_config_chip *chip_info;
  1287. struct chip_data *chip;
  1288. int status = 0;
  1289. struct pl022 *pl022 = spi_master_get_devdata(spi->master);
  1290. if (spi->mode & ~MODEBITS) {
  1291. dev_dbg(&spi->dev, "unsupported mode bits %x\n",
  1292. spi->mode & ~MODEBITS);
  1293. return -EINVAL;
  1294. }
  1295. if (!spi->max_speed_hz)
  1296. return -EINVAL;
  1297. /* Get controller_state if one is supplied */
  1298. chip = spi_get_ctldata(spi);
  1299. if (chip == NULL) {
  1300. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1301. if (!chip) {
  1302. dev_err(&spi->dev,
  1303. "cannot allocate controller state\n");
  1304. return -ENOMEM;
  1305. }
  1306. dev_dbg(&spi->dev,
  1307. "allocated memory for controller's runtime state\n");
  1308. }
  1309. /* Get controller data if one is supplied */
  1310. chip_info = spi->controller_data;
  1311. if (chip_info == NULL) {
  1312. /* spi_board_info.controller_data not is supplied */
  1313. dev_dbg(&spi->dev,
  1314. "using default controller_data settings\n");
  1315. chip_info =
  1316. kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
  1317. if (!chip_info) {
  1318. dev_err(&spi->dev,
  1319. "cannot allocate controller data\n");
  1320. status = -ENOMEM;
  1321. goto err_first_setup;
  1322. }
  1323. dev_dbg(&spi->dev, "allocated memory for controller data\n");
  1324. /* Pointer back to the SPI device */
  1325. chip_info->dev = &spi->dev;
  1326. /*
  1327. * Set controller data default values:
  1328. * Polling is supported by default
  1329. */
  1330. chip_info->lbm = LOOPBACK_DISABLED;
  1331. chip_info->com_mode = POLLING_TRANSFER;
  1332. chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
  1333. chip_info->hierarchy = SSP_SLAVE;
  1334. chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
  1335. chip_info->endian_tx = SSP_TX_LSB;
  1336. chip_info->endian_rx = SSP_RX_LSB;
  1337. chip_info->data_size = SSP_DATA_BITS_12;
  1338. chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
  1339. chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
  1340. chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
  1341. chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
  1342. chip_info->ctrl_len = SSP_BITS_8;
  1343. chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
  1344. chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
  1345. chip_info->cs_control = null_cs_control;
  1346. } else {
  1347. dev_dbg(&spi->dev,
  1348. "using user supplied controller_data settings\n");
  1349. }
  1350. /*
  1351. * We can override with custom divisors, else we use the board
  1352. * frequency setting
  1353. */
  1354. if ((0 == chip_info->clk_freq.cpsdvsr)
  1355. && (0 == chip_info->clk_freq.scr)) {
  1356. status = calculate_effective_freq(pl022,
  1357. spi->max_speed_hz,
  1358. &chip_info->clk_freq);
  1359. if (status < 0)
  1360. goto err_config_params;
  1361. } else {
  1362. if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
  1363. chip_info->clk_freq.cpsdvsr =
  1364. chip_info->clk_freq.cpsdvsr - 1;
  1365. }
  1366. status = verify_controller_parameters(pl022, chip_info);
  1367. if (status) {
  1368. dev_err(&spi->dev, "controller data is incorrect");
  1369. goto err_config_params;
  1370. }
  1371. /* Now set controller state based on controller data */
  1372. chip->xfer_type = chip_info->com_mode;
  1373. chip->cs_control = chip_info->cs_control;
  1374. if (chip_info->data_size <= 8) {
  1375. dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
  1376. chip->n_bytes = 1;
  1377. chip->read = READING_U8;
  1378. chip->write = WRITING_U8;
  1379. } else if (chip_info->data_size <= 16) {
  1380. dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
  1381. chip->n_bytes = 2;
  1382. chip->read = READING_U16;
  1383. chip->write = WRITING_U16;
  1384. } else {
  1385. if (pl022->vendor->max_bpw >= 32) {
  1386. dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
  1387. chip->n_bytes = 4;
  1388. chip->read = READING_U32;
  1389. chip->write = WRITING_U32;
  1390. } else {
  1391. dev_err(&spi->dev,
  1392. "illegal data size for this controller!\n");
  1393. dev_err(&spi->dev,
  1394. "a standard pl022 can only handle "
  1395. "1 <= n <= 16 bit words\n");
  1396. goto err_config_params;
  1397. }
  1398. }
  1399. /* Now Initialize all register settings required for this chip */
  1400. chip->cr0 = 0;
  1401. chip->cr1 = 0;
  1402. chip->dmacr = 0;
  1403. chip->cpsr = 0;
  1404. if ((chip_info->com_mode == DMA_TRANSFER)
  1405. && ((pl022->master_info)->enable_dma)) {
  1406. chip->enable_dma = 1;
  1407. dev_dbg(&spi->dev, "DMA mode set in controller state\n");
  1408. status = process_dma_info(chip_info, chip);
  1409. if (status < 0)
  1410. goto err_config_params;
  1411. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
  1412. SSP_DMACR_MASK_RXDMAE, 0);
  1413. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
  1414. SSP_DMACR_MASK_TXDMAE, 1);
  1415. } else {
  1416. chip->enable_dma = 0;
  1417. dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
  1418. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
  1419. SSP_DMACR_MASK_RXDMAE, 0);
  1420. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
  1421. SSP_DMACR_MASK_TXDMAE, 1);
  1422. }
  1423. chip->cpsr = chip_info->clk_freq.cpsdvsr;
  1424. SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
  1425. SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
  1426. SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
  1427. SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
  1428. SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
  1429. SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
  1430. SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
  1431. SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
  1432. SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
  1433. SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
  1434. SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
  1435. SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
  1436. SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
  1437. SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
  1438. SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
  1439. SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
  1440. /* Save controller_state */
  1441. spi_set_ctldata(spi, chip);
  1442. return status;
  1443. err_config_params:
  1444. err_first_setup:
  1445. kfree(chip);
  1446. return status;
  1447. }
  1448. /**
  1449. * pl022_cleanup - cleanup function registered to SPI master framework
  1450. * @spi: spi device which is requesting cleanup
  1451. *
  1452. * This function is registered to the SPI framework for this SPI master
  1453. * controller. It will free the runtime state of chip.
  1454. */
  1455. static void pl022_cleanup(struct spi_device *spi)
  1456. {
  1457. struct chip_data *chip = spi_get_ctldata(spi);
  1458. spi_set_ctldata(spi, NULL);
  1459. kfree(chip);
  1460. }
  1461. static int __init
  1462. pl022_probe(struct amba_device *adev, struct amba_id *id)
  1463. {
  1464. struct device *dev = &adev->dev;
  1465. struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
  1466. struct spi_master *master;
  1467. struct pl022 *pl022 = NULL; /*Data for this driver */
  1468. int status = 0;
  1469. dev_info(&adev->dev,
  1470. "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
  1471. if (platform_info == NULL) {
  1472. dev_err(&adev->dev, "probe - no platform data supplied\n");
  1473. status = -ENODEV;
  1474. goto err_no_pdata;
  1475. }
  1476. /* Allocate master with space for data */
  1477. master = spi_alloc_master(dev, sizeof(struct pl022));
  1478. if (master == NULL) {
  1479. dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
  1480. status = -ENOMEM;
  1481. goto err_no_master;
  1482. }
  1483. pl022 = spi_master_get_devdata(master);
  1484. pl022->master = master;
  1485. pl022->master_info = platform_info;
  1486. pl022->adev = adev;
  1487. pl022->vendor = id->data;
  1488. /*
  1489. * Bus Number Which has been Assigned to this SSP controller
  1490. * on this board
  1491. */
  1492. master->bus_num = platform_info->bus_id;
  1493. master->num_chipselect = platform_info->num_chipselect;
  1494. master->cleanup = pl022_cleanup;
  1495. master->setup = pl022_setup;
  1496. master->transfer = pl022_transfer;
  1497. dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
  1498. status = amba_request_regions(adev, NULL);
  1499. if (status)
  1500. goto err_no_ioregion;
  1501. pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
  1502. if (pl022->virtbase == NULL) {
  1503. status = -ENOMEM;
  1504. goto err_no_ioremap;
  1505. }
  1506. printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
  1507. adev->res.start, pl022->virtbase);
  1508. pl022->clk = clk_get(&adev->dev, NULL);
  1509. if (IS_ERR(pl022->clk)) {
  1510. status = PTR_ERR(pl022->clk);
  1511. dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
  1512. goto err_no_clk;
  1513. }
  1514. /* Disable SSP */
  1515. clk_enable(pl022->clk);
  1516. writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
  1517. SSP_CR1(pl022->virtbase));
  1518. load_ssp_default_config(pl022);
  1519. clk_disable(pl022->clk);
  1520. status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
  1521. pl022);
  1522. if (status < 0) {
  1523. dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
  1524. goto err_no_irq;
  1525. }
  1526. /* Initialize and start queue */
  1527. status = init_queue(pl022);
  1528. if (status != 0) {
  1529. dev_err(&adev->dev, "probe - problem initializing queue\n");
  1530. goto err_init_queue;
  1531. }
  1532. status = start_queue(pl022);
  1533. if (status != 0) {
  1534. dev_err(&adev->dev, "probe - problem starting queue\n");
  1535. goto err_start_queue;
  1536. }
  1537. /* Register with the SPI framework */
  1538. amba_set_drvdata(adev, pl022);
  1539. status = spi_register_master(master);
  1540. if (status != 0) {
  1541. dev_err(&adev->dev,
  1542. "probe - problem registering spi master\n");
  1543. goto err_spi_register;
  1544. }
  1545. dev_dbg(dev, "probe succeded\n");
  1546. return 0;
  1547. err_spi_register:
  1548. err_start_queue:
  1549. err_init_queue:
  1550. destroy_queue(pl022);
  1551. free_irq(adev->irq[0], pl022);
  1552. err_no_irq:
  1553. clk_put(pl022->clk);
  1554. err_no_clk:
  1555. iounmap(pl022->virtbase);
  1556. err_no_ioremap:
  1557. amba_release_regions(adev);
  1558. err_no_ioregion:
  1559. spi_master_put(master);
  1560. err_no_master:
  1561. err_no_pdata:
  1562. return status;
  1563. }
  1564. static int __exit
  1565. pl022_remove(struct amba_device *adev)
  1566. {
  1567. struct pl022 *pl022 = amba_get_drvdata(adev);
  1568. int status = 0;
  1569. if (!pl022)
  1570. return 0;
  1571. /* Remove the queue */
  1572. status = destroy_queue(pl022);
  1573. if (status != 0) {
  1574. dev_err(&adev->dev,
  1575. "queue remove failed (%d)\n", status);
  1576. return status;
  1577. }
  1578. load_ssp_default_config(pl022);
  1579. free_irq(adev->irq[0], pl022);
  1580. clk_disable(pl022->clk);
  1581. clk_put(pl022->clk);
  1582. iounmap(pl022->virtbase);
  1583. amba_release_regions(adev);
  1584. tasklet_disable(&pl022->pump_transfers);
  1585. spi_unregister_master(pl022->master);
  1586. spi_master_put(pl022->master);
  1587. amba_set_drvdata(adev, NULL);
  1588. dev_dbg(&adev->dev, "remove succeded\n");
  1589. return 0;
  1590. }
  1591. #ifdef CONFIG_PM
  1592. static int pl022_suspend(struct amba_device *adev, pm_message_t state)
  1593. {
  1594. struct pl022 *pl022 = amba_get_drvdata(adev);
  1595. int status = 0;
  1596. status = stop_queue(pl022);
  1597. if (status) {
  1598. dev_warn(&adev->dev, "suspend cannot stop queue\n");
  1599. return status;
  1600. }
  1601. clk_enable(pl022->clk);
  1602. load_ssp_default_config(pl022);
  1603. clk_disable(pl022->clk);
  1604. dev_dbg(&adev->dev, "suspended\n");
  1605. return 0;
  1606. }
  1607. static int pl022_resume(struct amba_device *adev)
  1608. {
  1609. struct pl022 *pl022 = amba_get_drvdata(adev);
  1610. int status = 0;
  1611. /* Start the queue running */
  1612. status = start_queue(pl022);
  1613. if (status)
  1614. dev_err(&adev->dev, "problem starting queue (%d)\n", status);
  1615. else
  1616. dev_dbg(&adev->dev, "resumed\n");
  1617. return status;
  1618. }
  1619. #else
  1620. #define pl022_suspend NULL
  1621. #define pl022_resume NULL
  1622. #endif /* CONFIG_PM */
  1623. static struct vendor_data vendor_arm = {
  1624. .fifodepth = 8,
  1625. .max_bpw = 16,
  1626. .unidir = false,
  1627. };
  1628. static struct vendor_data vendor_st = {
  1629. .fifodepth = 32,
  1630. .max_bpw = 32,
  1631. .unidir = false,
  1632. };
  1633. static struct amba_id pl022_ids[] = {
  1634. {
  1635. /*
  1636. * ARM PL022 variant, this has a 16bit wide
  1637. * and 8 locations deep TX/RX FIFO
  1638. */
  1639. .id = 0x00041022,
  1640. .mask = 0x000fffff,
  1641. .data = &vendor_arm,
  1642. },
  1643. {
  1644. /*
  1645. * ST Micro derivative, this has 32bit wide
  1646. * and 32 locations deep TX/RX FIFO
  1647. */
  1648. .id = 0x01080022,
  1649. .mask = 0xffffffff,
  1650. .data = &vendor_st,
  1651. },
  1652. { 0, 0 },
  1653. };
  1654. static struct amba_driver pl022_driver = {
  1655. .drv = {
  1656. .name = "ssp-pl022",
  1657. },
  1658. .id_table = pl022_ids,
  1659. .probe = pl022_probe,
  1660. .remove = __exit_p(pl022_remove),
  1661. .suspend = pl022_suspend,
  1662. .resume = pl022_resume,
  1663. };
  1664. static int __init pl022_init(void)
  1665. {
  1666. return amba_driver_register(&pl022_driver);
  1667. }
  1668. module_init(pl022_init);
  1669. static void __exit pl022_exit(void)
  1670. {
  1671. amba_driver_unregister(&pl022_driver);
  1672. }
  1673. module_exit(pl022_exit);
  1674. MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
  1675. MODULE_DESCRIPTION("PL022 SSP Controller Driver");
  1676. MODULE_LICENSE("GPL");