amba-pl022.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870
  1. /*
  2. * drivers/spi/amba-pl022.c
  3. *
  4. * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
  5. *
  6. * Copyright (C) 2008-2009 ST-Ericsson AB
  7. * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
  8. *
  9. * Author: Linus Walleij <linus.walleij@stericsson.com>
  10. *
  11. * Initial version inspired by:
  12. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  13. * Initial adoption to PL022 by:
  14. * Sachin Verma <sachin.verma@st.com>
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. */
  26. /*
  27. * TODO:
  28. * - add timeout on polled transfers
  29. * - add generic DMA framework support
  30. */
  31. #include <linux/init.h>
  32. #include <linux/module.h>
  33. #include <linux/device.h>
  34. #include <linux/ioport.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/spi/spi.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/delay.h>
  40. #include <linux/clk.h>
  41. #include <linux/err.h>
  42. #include <linux/amba/bus.h>
  43. #include <linux/amba/pl022.h>
  44. #include <linux/io.h>
  45. /*
  46. * This macro is used to define some register default values.
  47. * reg is masked with mask, the OR:ed with an (again masked)
  48. * val shifted sb steps to the left.
  49. */
  50. #define SSP_WRITE_BITS(reg, val, mask, sb) \
  51. ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
  52. /*
  53. * This macro is also used to define some default values.
  54. * It will just shift val by sb steps to the left and mask
  55. * the result with mask.
  56. */
  57. #define GEN_MASK_BITS(val, mask, sb) \
  58. (((val)<<(sb)) & (mask))
  59. #define DRIVE_TX 0
  60. #define DO_NOT_DRIVE_TX 1
  61. #define DO_NOT_QUEUE_DMA 0
  62. #define QUEUE_DMA 1
  63. #define RX_TRANSFER 1
  64. #define TX_TRANSFER 2
  65. /*
  66. * Macros to access SSP Registers with their offsets
  67. */
  68. #define SSP_CR0(r) (r + 0x000)
  69. #define SSP_CR1(r) (r + 0x004)
  70. #define SSP_DR(r) (r + 0x008)
  71. #define SSP_SR(r) (r + 0x00C)
  72. #define SSP_CPSR(r) (r + 0x010)
  73. #define SSP_IMSC(r) (r + 0x014)
  74. #define SSP_RIS(r) (r + 0x018)
  75. #define SSP_MIS(r) (r + 0x01C)
  76. #define SSP_ICR(r) (r + 0x020)
  77. #define SSP_DMACR(r) (r + 0x024)
  78. #define SSP_ITCR(r) (r + 0x080)
  79. #define SSP_ITIP(r) (r + 0x084)
  80. #define SSP_ITOP(r) (r + 0x088)
  81. #define SSP_TDR(r) (r + 0x08C)
  82. #define SSP_PID0(r) (r + 0xFE0)
  83. #define SSP_PID1(r) (r + 0xFE4)
  84. #define SSP_PID2(r) (r + 0xFE8)
  85. #define SSP_PID3(r) (r + 0xFEC)
  86. #define SSP_CID0(r) (r + 0xFF0)
  87. #define SSP_CID1(r) (r + 0xFF4)
  88. #define SSP_CID2(r) (r + 0xFF8)
  89. #define SSP_CID3(r) (r + 0xFFC)
  90. /*
  91. * SSP Control Register 0 - SSP_CR0
  92. */
  93. #define SSP_CR0_MASK_DSS (0x1FUL << 0)
  94. #define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
  95. #define SSP_CR0_MASK_SPO (0x1UL << 6)
  96. #define SSP_CR0_MASK_SPH (0x1UL << 7)
  97. #define SSP_CR0_MASK_SCR (0xFFUL << 8)
  98. #define SSP_CR0_MASK_CSS (0x1FUL << 16)
  99. #define SSP_CR0_MASK_FRF (0x3UL << 21)
  100. /*
  101. * SSP Control Register 0 - SSP_CR1
  102. */
  103. #define SSP_CR1_MASK_LBM (0x1UL << 0)
  104. #define SSP_CR1_MASK_SSE (0x1UL << 1)
  105. #define SSP_CR1_MASK_MS (0x1UL << 2)
  106. #define SSP_CR1_MASK_SOD (0x1UL << 3)
  107. #define SSP_CR1_MASK_RENDN (0x1UL << 4)
  108. #define SSP_CR1_MASK_TENDN (0x1UL << 5)
  109. #define SSP_CR1_MASK_MWAIT (0x1UL << 6)
  110. #define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
  111. #define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
  112. /*
  113. * SSP Data Register - SSP_DR
  114. */
  115. #define SSP_DR_MASK_DATA 0xFFFFFFFF
  116. /*
  117. * SSP Status Register - SSP_SR
  118. */
  119. #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
  120. #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
  121. #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
  122. #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
  123. #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
  124. /*
  125. * SSP Clock Prescale Register - SSP_CPSR
  126. */
  127. #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
  128. /*
  129. * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
  130. */
  131. #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
  132. #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
  133. #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
  134. #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
  135. /*
  136. * SSP Raw Interrupt Status Register - SSP_RIS
  137. */
  138. /* Receive Overrun Raw Interrupt status */
  139. #define SSP_RIS_MASK_RORRIS (0x1UL << 0)
  140. /* Receive Timeout Raw Interrupt status */
  141. #define SSP_RIS_MASK_RTRIS (0x1UL << 1)
  142. /* Receive FIFO Raw Interrupt status */
  143. #define SSP_RIS_MASK_RXRIS (0x1UL << 2)
  144. /* Transmit FIFO Raw Interrupt status */
  145. #define SSP_RIS_MASK_TXRIS (0x1UL << 3)
  146. /*
  147. * SSP Masked Interrupt Status Register - SSP_MIS
  148. */
  149. /* Receive Overrun Masked Interrupt status */
  150. #define SSP_MIS_MASK_RORMIS (0x1UL << 0)
  151. /* Receive Timeout Masked Interrupt status */
  152. #define SSP_MIS_MASK_RTMIS (0x1UL << 1)
  153. /* Receive FIFO Masked Interrupt status */
  154. #define SSP_MIS_MASK_RXMIS (0x1UL << 2)
  155. /* Transmit FIFO Masked Interrupt status */
  156. #define SSP_MIS_MASK_TXMIS (0x1UL << 3)
  157. /*
  158. * SSP Interrupt Clear Register - SSP_ICR
  159. */
  160. /* Receive Overrun Raw Clear Interrupt bit */
  161. #define SSP_ICR_MASK_RORIC (0x1UL << 0)
  162. /* Receive Timeout Clear Interrupt bit */
  163. #define SSP_ICR_MASK_RTIC (0x1UL << 1)
  164. /*
  165. * SSP DMA Control Register - SSP_DMACR
  166. */
  167. /* Receive DMA Enable bit */
  168. #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
  169. /* Transmit DMA Enable bit */
  170. #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
  171. /*
  172. * SSP Integration Test control Register - SSP_ITCR
  173. */
  174. #define SSP_ITCR_MASK_ITEN (0x1UL << 0)
  175. #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
  176. /*
  177. * SSP Integration Test Input Register - SSP_ITIP
  178. */
  179. #define ITIP_MASK_SSPRXD (0x1UL << 0)
  180. #define ITIP_MASK_SSPFSSIN (0x1UL << 1)
  181. #define ITIP_MASK_SSPCLKIN (0x1UL << 2)
  182. #define ITIP_MASK_RXDMAC (0x1UL << 3)
  183. #define ITIP_MASK_TXDMAC (0x1UL << 4)
  184. #define ITIP_MASK_SSPTXDIN (0x1UL << 5)
  185. /*
  186. * SSP Integration Test output Register - SSP_ITOP
  187. */
  188. #define ITOP_MASK_SSPTXD (0x1UL << 0)
  189. #define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
  190. #define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
  191. #define ITOP_MASK_SSPOEn (0x1UL << 3)
  192. #define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
  193. #define ITOP_MASK_RORINTR (0x1UL << 5)
  194. #define ITOP_MASK_RTINTR (0x1UL << 6)
  195. #define ITOP_MASK_RXINTR (0x1UL << 7)
  196. #define ITOP_MASK_TXINTR (0x1UL << 8)
  197. #define ITOP_MASK_INTR (0x1UL << 9)
  198. #define ITOP_MASK_RXDMABREQ (0x1UL << 10)
  199. #define ITOP_MASK_RXDMASREQ (0x1UL << 11)
  200. #define ITOP_MASK_TXDMABREQ (0x1UL << 12)
  201. #define ITOP_MASK_TXDMASREQ (0x1UL << 13)
  202. /*
  203. * SSP Test Data Register - SSP_TDR
  204. */
  205. #define TDR_MASK_TESTDATA (0xFFFFFFFF)
  206. /*
  207. * Message State
  208. * we use the spi_message.state (void *) pointer to
  209. * hold a single state value, that's why all this
  210. * (void *) casting is done here.
  211. */
  212. #define STATE_START ((void *) 0)
  213. #define STATE_RUNNING ((void *) 1)
  214. #define STATE_DONE ((void *) 2)
  215. #define STATE_ERROR ((void *) -1)
  216. /*
  217. * Queue State
  218. */
  219. #define QUEUE_RUNNING (0)
  220. #define QUEUE_STOPPED (1)
  221. /*
  222. * SSP State - Whether Enabled or Disabled
  223. */
  224. #define SSP_DISABLED (0)
  225. #define SSP_ENABLED (1)
  226. /*
  227. * SSP DMA State - Whether DMA Enabled or Disabled
  228. */
  229. #define SSP_DMA_DISABLED (0)
  230. #define SSP_DMA_ENABLED (1)
  231. /*
  232. * SSP Clock Defaults
  233. */
  234. #define NMDK_SSP_DEFAULT_CLKRATE 0x2
  235. #define NMDK_SSP_DEFAULT_PRESCALE 0x40
  236. /*
  237. * SSP Clock Parameter ranges
  238. */
  239. #define CPSDVR_MIN 0x02
  240. #define CPSDVR_MAX 0xFE
  241. #define SCR_MIN 0x00
  242. #define SCR_MAX 0xFF
  243. /*
  244. * SSP Interrupt related Macros
  245. */
  246. #define DEFAULT_SSP_REG_IMSC 0x0UL
  247. #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
  248. #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
  249. #define CLEAR_ALL_INTERRUPTS 0x3
  250. /*
  251. * The type of reading going on on this chip
  252. */
  253. enum ssp_reading {
  254. READING_NULL,
  255. READING_U8,
  256. READING_U16,
  257. READING_U32
  258. };
  259. /**
  260. * The type of writing going on on this chip
  261. */
  262. enum ssp_writing {
  263. WRITING_NULL,
  264. WRITING_U8,
  265. WRITING_U16,
  266. WRITING_U32
  267. };
  268. /**
  269. * struct vendor_data - vendor-specific config parameters
  270. * for PL022 derivates
  271. * @fifodepth: depth of FIFOs (both)
  272. * @max_bpw: maximum number of bits per word
  273. * @unidir: supports unidirection transfers
  274. */
  275. struct vendor_data {
  276. int fifodepth;
  277. int max_bpw;
  278. bool unidir;
  279. };
  280. /**
  281. * struct pl022 - This is the private SSP driver data structure
  282. * @adev: AMBA device model hookup
  283. * @phybase: The physical memory where the SSP device resides
  284. * @virtbase: The virtual memory where the SSP is mapped
  285. * @master: SPI framework hookup
  286. * @master_info: controller-specific data from machine setup
  287. * @regs: SSP controller register's virtual address
  288. * @pump_messages: Work struct for scheduling work to the workqueue
  289. * @lock: spinlock to syncronise access to driver data
  290. * @workqueue: a workqueue on which any spi_message request is queued
  291. * @busy: workqueue is busy
  292. * @run: workqueue is running
  293. * @pump_transfers: Tasklet used in Interrupt Transfer mode
  294. * @cur_msg: Pointer to current spi_message being processed
  295. * @cur_transfer: Pointer to current spi_transfer
  296. * @cur_chip: pointer to current clients chip(assigned from controller_state)
  297. * @tx: current position in TX buffer to be read
  298. * @tx_end: end position in TX buffer to be read
  299. * @rx: current position in RX buffer to be written
  300. * @rx_end: end position in RX buffer to be written
  301. * @readingtype: the type of read currently going on
  302. * @writingtype: the type or write currently going on
  303. */
  304. struct pl022 {
  305. struct amba_device *adev;
  306. struct vendor_data *vendor;
  307. resource_size_t phybase;
  308. void __iomem *virtbase;
  309. struct clk *clk;
  310. struct spi_master *master;
  311. struct pl022_ssp_controller *master_info;
  312. /* Driver message queue */
  313. struct workqueue_struct *workqueue;
  314. struct work_struct pump_messages;
  315. spinlock_t queue_lock;
  316. struct list_head queue;
  317. int busy;
  318. int run;
  319. /* Message transfer pump */
  320. struct tasklet_struct pump_transfers;
  321. struct spi_message *cur_msg;
  322. struct spi_transfer *cur_transfer;
  323. struct chip_data *cur_chip;
  324. void *tx;
  325. void *tx_end;
  326. void *rx;
  327. void *rx_end;
  328. enum ssp_reading read;
  329. enum ssp_writing write;
  330. u32 exp_fifo_level;
  331. };
  332. /**
  333. * struct chip_data - To maintain runtime state of SSP for each client chip
  334. * @cr0: Value of control register CR0 of SSP
  335. * @cr1: Value of control register CR1 of SSP
  336. * @dmacr: Value of DMA control Register of SSP
  337. * @cpsr: Value of Clock prescale register
  338. * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
  339. * @enable_dma: Whether to enable DMA or not
  340. * @write: function ptr to be used to write when doing xfer for this chip
  341. * @read: function ptr to be used to read when doing xfer for this chip
  342. * @cs_control: chip select callback provided by chip
  343. * @xfer_type: polling/interrupt/DMA
  344. *
  345. * Runtime state of the SSP controller, maintained per chip,
  346. * This would be set according to the current message that would be served
  347. */
  348. struct chip_data {
  349. u16 cr0;
  350. u16 cr1;
  351. u16 dmacr;
  352. u16 cpsr;
  353. u8 n_bytes;
  354. u8 enable_dma:1;
  355. enum ssp_reading read;
  356. enum ssp_writing write;
  357. void (*cs_control) (u32 command);
  358. int xfer_type;
  359. };
  360. /**
  361. * null_cs_control - Dummy chip select function
  362. * @command: select/delect the chip
  363. *
  364. * If no chip select function is provided by client this is used as dummy
  365. * chip select
  366. */
  367. static void null_cs_control(u32 command)
  368. {
  369. pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
  370. }
  371. /**
  372. * giveback - current spi_message is over, schedule next message and call
  373. * callback of this message. Assumes that caller already
  374. * set message->status; dma and pio irqs are blocked
  375. * @pl022: SSP driver private data structure
  376. */
  377. static void giveback(struct pl022 *pl022)
  378. {
  379. struct spi_transfer *last_transfer;
  380. unsigned long flags;
  381. struct spi_message *msg;
  382. void (*curr_cs_control) (u32 command);
  383. /*
  384. * This local reference to the chip select function
  385. * is needed because we set curr_chip to NULL
  386. * as a step toward termininating the message.
  387. */
  388. curr_cs_control = pl022->cur_chip->cs_control;
  389. spin_lock_irqsave(&pl022->queue_lock, flags);
  390. msg = pl022->cur_msg;
  391. pl022->cur_msg = NULL;
  392. pl022->cur_transfer = NULL;
  393. pl022->cur_chip = NULL;
  394. queue_work(pl022->workqueue, &pl022->pump_messages);
  395. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  396. last_transfer = list_entry(msg->transfers.prev,
  397. struct spi_transfer,
  398. transfer_list);
  399. /* Delay if requested before any change in chip select */
  400. if (last_transfer->delay_usecs)
  401. /*
  402. * FIXME: This runs in interrupt context.
  403. * Is this really smart?
  404. */
  405. udelay(last_transfer->delay_usecs);
  406. /*
  407. * Drop chip select UNLESS cs_change is true or we are returning
  408. * a message with an error, or next message is for another chip
  409. */
  410. if (!last_transfer->cs_change)
  411. curr_cs_control(SSP_CHIP_DESELECT);
  412. else {
  413. struct spi_message *next_msg;
  414. /* Holding of cs was hinted, but we need to make sure
  415. * the next message is for the same chip. Don't waste
  416. * time with the following tests unless this was hinted.
  417. *
  418. * We cannot postpone this until pump_messages, because
  419. * after calling msg->complete (below) the driver that
  420. * sent the current message could be unloaded, which
  421. * could invalidate the cs_control() callback...
  422. */
  423. /* get a pointer to the next message, if any */
  424. spin_lock_irqsave(&pl022->queue_lock, flags);
  425. if (list_empty(&pl022->queue))
  426. next_msg = NULL;
  427. else
  428. next_msg = list_entry(pl022->queue.next,
  429. struct spi_message, queue);
  430. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  431. /* see if the next and current messages point
  432. * to the same chip
  433. */
  434. if (next_msg && next_msg->spi != msg->spi)
  435. next_msg = NULL;
  436. if (!next_msg || msg->state == STATE_ERROR)
  437. curr_cs_control(SSP_CHIP_DESELECT);
  438. }
  439. msg->state = NULL;
  440. if (msg->complete)
  441. msg->complete(msg->context);
  442. /* This message is completed, so let's turn off the clock! */
  443. clk_disable(pl022->clk);
  444. }
  445. /**
  446. * flush - flush the FIFO to reach a clean state
  447. * @pl022: SSP driver private data structure
  448. */
  449. static int flush(struct pl022 *pl022)
  450. {
  451. unsigned long limit = loops_per_jiffy << 1;
  452. dev_dbg(&pl022->adev->dev, "flush\n");
  453. do {
  454. while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  455. readw(SSP_DR(pl022->virtbase));
  456. } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
  457. pl022->exp_fifo_level = 0;
  458. return limit;
  459. }
  460. /**
  461. * restore_state - Load configuration of current chip
  462. * @pl022: SSP driver private data structure
  463. */
  464. static void restore_state(struct pl022 *pl022)
  465. {
  466. struct chip_data *chip = pl022->cur_chip;
  467. writew(chip->cr0, SSP_CR0(pl022->virtbase));
  468. writew(chip->cr1, SSP_CR1(pl022->virtbase));
  469. writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
  470. writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
  471. writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  472. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  473. }
  474. /**
  475. * load_ssp_default_config - Load default configuration for SSP
  476. * @pl022: SSP driver private data structure
  477. */
  478. /*
  479. * Default SSP Register Values
  480. */
  481. #define DEFAULT_SSP_REG_CR0 ( \
  482. GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
  483. GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
  484. GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
  485. GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
  486. GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
  487. GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
  488. GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
  489. )
  490. #define DEFAULT_SSP_REG_CR1 ( \
  491. GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
  492. GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
  493. GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
  494. GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
  495. GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
  496. GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
  497. GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
  498. GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
  499. GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
  500. )
  501. #define DEFAULT_SSP_REG_CPSR ( \
  502. GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
  503. )
  504. #define DEFAULT_SSP_REG_DMACR (\
  505. GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
  506. GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
  507. )
  508. static void load_ssp_default_config(struct pl022 *pl022)
  509. {
  510. writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
  511. writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
  512. writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
  513. writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
  514. writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  515. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  516. }
  517. /**
  518. * This will write to TX and read from RX according to the parameters
  519. * set in pl022.
  520. */
  521. static void readwriter(struct pl022 *pl022)
  522. {
  523. /*
  524. * The FIFO depth is different inbetween primecell variants.
  525. * I believe filling in too much in the FIFO might cause
  526. * errons in 8bit wide transfers on ARM variants (just 8 words
  527. * FIFO, means only 8x8 = 64 bits in FIFO) at least.
  528. *
  529. * To prevent this issue, the TX FIFO is only filled to the
  530. * unused RX FIFO fill length, regardless of what the TX
  531. * FIFO status flag indicates.
  532. */
  533. dev_dbg(&pl022->adev->dev,
  534. "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
  535. __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
  536. /* Read as much as you can */
  537. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  538. && (pl022->rx < pl022->rx_end)) {
  539. switch (pl022->read) {
  540. case READING_NULL:
  541. readw(SSP_DR(pl022->virtbase));
  542. break;
  543. case READING_U8:
  544. *(u8 *) (pl022->rx) =
  545. readw(SSP_DR(pl022->virtbase)) & 0xFFU;
  546. break;
  547. case READING_U16:
  548. *(u16 *) (pl022->rx) =
  549. (u16) readw(SSP_DR(pl022->virtbase));
  550. break;
  551. case READING_U32:
  552. *(u32 *) (pl022->rx) =
  553. readl(SSP_DR(pl022->virtbase));
  554. break;
  555. }
  556. pl022->rx += (pl022->cur_chip->n_bytes);
  557. pl022->exp_fifo_level--;
  558. }
  559. /*
  560. * Write as much as possible up to the RX FIFO size
  561. */
  562. while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
  563. && (pl022->tx < pl022->tx_end)) {
  564. switch (pl022->write) {
  565. case WRITING_NULL:
  566. writew(0x0, SSP_DR(pl022->virtbase));
  567. break;
  568. case WRITING_U8:
  569. writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
  570. break;
  571. case WRITING_U16:
  572. writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
  573. break;
  574. case WRITING_U32:
  575. writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
  576. break;
  577. }
  578. pl022->tx += (pl022->cur_chip->n_bytes);
  579. pl022->exp_fifo_level++;
  580. /*
  581. * This inner reader takes care of things appearing in the RX
  582. * FIFO as we're transmitting. This will happen a lot since the
  583. * clock starts running when you put things into the TX FIFO,
  584. * and then things are continously clocked into the RX FIFO.
  585. */
  586. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  587. && (pl022->rx < pl022->rx_end)) {
  588. switch (pl022->read) {
  589. case READING_NULL:
  590. readw(SSP_DR(pl022->virtbase));
  591. break;
  592. case READING_U8:
  593. *(u8 *) (pl022->rx) =
  594. readw(SSP_DR(pl022->virtbase)) & 0xFFU;
  595. break;
  596. case READING_U16:
  597. *(u16 *) (pl022->rx) =
  598. (u16) readw(SSP_DR(pl022->virtbase));
  599. break;
  600. case READING_U32:
  601. *(u32 *) (pl022->rx) =
  602. readl(SSP_DR(pl022->virtbase));
  603. break;
  604. }
  605. pl022->rx += (pl022->cur_chip->n_bytes);
  606. pl022->exp_fifo_level--;
  607. }
  608. }
  609. /*
  610. * When we exit here the TX FIFO should be full and the RX FIFO
  611. * should be empty
  612. */
  613. }
  614. /**
  615. * next_transfer - Move to the Next transfer in the current spi message
  616. * @pl022: SSP driver private data structure
  617. *
  618. * This function moves though the linked list of spi transfers in the
  619. * current spi message and returns with the state of current spi
  620. * message i.e whether its last transfer is done(STATE_DONE) or
  621. * Next transfer is ready(STATE_RUNNING)
  622. */
  623. static void *next_transfer(struct pl022 *pl022)
  624. {
  625. struct spi_message *msg = pl022->cur_msg;
  626. struct spi_transfer *trans = pl022->cur_transfer;
  627. /* Move to next transfer */
  628. if (trans->transfer_list.next != &msg->transfers) {
  629. pl022->cur_transfer =
  630. list_entry(trans->transfer_list.next,
  631. struct spi_transfer, transfer_list);
  632. return STATE_RUNNING;
  633. }
  634. return STATE_DONE;
  635. }
  636. /**
  637. * pl022_interrupt_handler - Interrupt handler for SSP controller
  638. *
  639. * This function handles interrupts generated for an interrupt based transfer.
  640. * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
  641. * current message's state as STATE_ERROR and schedule the tasklet
  642. * pump_transfers which will do the postprocessing of the current message by
  643. * calling giveback(). Otherwise it reads data from RX FIFO till there is no
  644. * more data, and writes data in TX FIFO till it is not full. If we complete
  645. * the transfer we move to the next transfer and schedule the tasklet.
  646. */
  647. static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
  648. {
  649. struct pl022 *pl022 = dev_id;
  650. struct spi_message *msg = pl022->cur_msg;
  651. u16 irq_status = 0;
  652. u16 flag = 0;
  653. if (unlikely(!msg)) {
  654. dev_err(&pl022->adev->dev,
  655. "bad message state in interrupt handler");
  656. /* Never fail */
  657. return IRQ_HANDLED;
  658. }
  659. /* Read the Interrupt Status Register */
  660. irq_status = readw(SSP_MIS(pl022->virtbase));
  661. if (unlikely(!irq_status))
  662. return IRQ_NONE;
  663. /* This handles the error code interrupts */
  664. if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
  665. /*
  666. * Overrun interrupt - bail out since our Data has been
  667. * corrupted
  668. */
  669. dev_err(&pl022->adev->dev,
  670. "FIFO overrun\n");
  671. if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
  672. dev_err(&pl022->adev->dev,
  673. "RXFIFO is full\n");
  674. if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
  675. dev_err(&pl022->adev->dev,
  676. "TXFIFO is full\n");
  677. /*
  678. * Disable and clear interrupts, disable SSP,
  679. * mark message with bad status so it can be
  680. * retried.
  681. */
  682. writew(DISABLE_ALL_INTERRUPTS,
  683. SSP_IMSC(pl022->virtbase));
  684. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  685. writew((readw(SSP_CR1(pl022->virtbase)) &
  686. (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
  687. msg->state = STATE_ERROR;
  688. /* Schedule message queue handler */
  689. tasklet_schedule(&pl022->pump_transfers);
  690. return IRQ_HANDLED;
  691. }
  692. readwriter(pl022);
  693. if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
  694. flag = 1;
  695. /* Disable Transmit interrupt */
  696. writew(readw(SSP_IMSC(pl022->virtbase)) &
  697. (~SSP_IMSC_MASK_TXIM),
  698. SSP_IMSC(pl022->virtbase));
  699. }
  700. /*
  701. * Since all transactions must write as much as shall be read,
  702. * we can conclude the entire transaction once RX is complete.
  703. * At this point, all TX will always be finished.
  704. */
  705. if (pl022->rx >= pl022->rx_end) {
  706. writew(DISABLE_ALL_INTERRUPTS,
  707. SSP_IMSC(pl022->virtbase));
  708. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  709. if (unlikely(pl022->rx > pl022->rx_end)) {
  710. dev_warn(&pl022->adev->dev, "read %u surplus "
  711. "bytes (did you request an odd "
  712. "number of bytes on a 16bit bus?)\n",
  713. (u32) (pl022->rx - pl022->rx_end));
  714. }
  715. /* Update total bytes transfered */
  716. msg->actual_length += pl022->cur_transfer->len;
  717. if (pl022->cur_transfer->cs_change)
  718. pl022->cur_chip->
  719. cs_control(SSP_CHIP_DESELECT);
  720. /* Move to next transfer */
  721. msg->state = next_transfer(pl022);
  722. tasklet_schedule(&pl022->pump_transfers);
  723. return IRQ_HANDLED;
  724. }
  725. return IRQ_HANDLED;
  726. }
  727. /**
  728. * This sets up the pointers to memory for the next message to
  729. * send out on the SPI bus.
  730. */
  731. static int set_up_next_transfer(struct pl022 *pl022,
  732. struct spi_transfer *transfer)
  733. {
  734. int residue;
  735. /* Sanity check the message for this bus width */
  736. residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
  737. if (unlikely(residue != 0)) {
  738. dev_err(&pl022->adev->dev,
  739. "message of %u bytes to transmit but the current "
  740. "chip bus has a data width of %u bytes!\n",
  741. pl022->cur_transfer->len,
  742. pl022->cur_chip->n_bytes);
  743. dev_err(&pl022->adev->dev, "skipping this message\n");
  744. return -EIO;
  745. }
  746. pl022->tx = (void *)transfer->tx_buf;
  747. pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
  748. pl022->rx = (void *)transfer->rx_buf;
  749. pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
  750. pl022->write =
  751. pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
  752. pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
  753. return 0;
  754. }
  755. /**
  756. * pump_transfers - Tasklet function which schedules next interrupt transfer
  757. * when running in interrupt transfer mode.
  758. * @data: SSP driver private data structure
  759. *
  760. */
  761. static void pump_transfers(unsigned long data)
  762. {
  763. struct pl022 *pl022 = (struct pl022 *) data;
  764. struct spi_message *message = NULL;
  765. struct spi_transfer *transfer = NULL;
  766. struct spi_transfer *previous = NULL;
  767. /* Get current state information */
  768. message = pl022->cur_msg;
  769. transfer = pl022->cur_transfer;
  770. /* Handle for abort */
  771. if (message->state == STATE_ERROR) {
  772. message->status = -EIO;
  773. giveback(pl022);
  774. return;
  775. }
  776. /* Handle end of message */
  777. if (message->state == STATE_DONE) {
  778. message->status = 0;
  779. giveback(pl022);
  780. return;
  781. }
  782. /* Delay if requested at end of transfer before CS change */
  783. if (message->state == STATE_RUNNING) {
  784. previous = list_entry(transfer->transfer_list.prev,
  785. struct spi_transfer,
  786. transfer_list);
  787. if (previous->delay_usecs)
  788. /*
  789. * FIXME: This runs in interrupt context.
  790. * Is this really smart?
  791. */
  792. udelay(previous->delay_usecs);
  793. /* Drop chip select only if cs_change is requested */
  794. if (previous->cs_change)
  795. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  796. } else {
  797. /* STATE_START */
  798. message->state = STATE_RUNNING;
  799. }
  800. if (set_up_next_transfer(pl022, transfer)) {
  801. message->state = STATE_ERROR;
  802. message->status = -EIO;
  803. giveback(pl022);
  804. return;
  805. }
  806. /* Flush the FIFOs and let's go! */
  807. flush(pl022);
  808. writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  809. }
  810. /**
  811. * NOT IMPLEMENTED
  812. * configure_dma - It configures the DMA pipes for DMA transfers
  813. * @data: SSP driver's private data structure
  814. *
  815. */
  816. static int configure_dma(void *data)
  817. {
  818. struct pl022 *pl022 = data;
  819. dev_dbg(&pl022->adev->dev, "configure DMA\n");
  820. return -ENOTSUPP;
  821. }
  822. /**
  823. * do_dma_transfer - It handles transfers of the current message
  824. * if it is DMA xfer.
  825. * NOT FULLY IMPLEMENTED
  826. * @data: SSP driver's private data structure
  827. */
  828. static void do_dma_transfer(void *data)
  829. {
  830. struct pl022 *pl022 = data;
  831. if (configure_dma(data)) {
  832. dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
  833. goto err_config_dma;
  834. }
  835. /* TODO: Implememt DMA setup of pipes here */
  836. /* Enable target chip, set up transfer */
  837. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  838. if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
  839. /* Error path */
  840. pl022->cur_msg->state = STATE_ERROR;
  841. pl022->cur_msg->status = -EIO;
  842. giveback(pl022);
  843. return;
  844. }
  845. /* Enable SSP */
  846. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  847. SSP_CR1(pl022->virtbase));
  848. /* TODO: Enable the DMA transfer here */
  849. return;
  850. err_config_dma:
  851. pl022->cur_msg->state = STATE_ERROR;
  852. pl022->cur_msg->status = -EIO;
  853. giveback(pl022);
  854. return;
  855. }
  856. static void do_interrupt_transfer(void *data)
  857. {
  858. struct pl022 *pl022 = data;
  859. /* Enable target chip */
  860. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  861. if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
  862. /* Error path */
  863. pl022->cur_msg->state = STATE_ERROR;
  864. pl022->cur_msg->status = -EIO;
  865. giveback(pl022);
  866. return;
  867. }
  868. /* Enable SSP, turn on interrupts */
  869. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  870. SSP_CR1(pl022->virtbase));
  871. writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  872. }
  873. static void do_polling_transfer(void *data)
  874. {
  875. struct pl022 *pl022 = data;
  876. struct spi_message *message = NULL;
  877. struct spi_transfer *transfer = NULL;
  878. struct spi_transfer *previous = NULL;
  879. struct chip_data *chip;
  880. chip = pl022->cur_chip;
  881. message = pl022->cur_msg;
  882. while (message->state != STATE_DONE) {
  883. /* Handle for abort */
  884. if (message->state == STATE_ERROR)
  885. break;
  886. transfer = pl022->cur_transfer;
  887. /* Delay if requested at end of transfer */
  888. if (message->state == STATE_RUNNING) {
  889. previous =
  890. list_entry(transfer->transfer_list.prev,
  891. struct spi_transfer, transfer_list);
  892. if (previous->delay_usecs)
  893. udelay(previous->delay_usecs);
  894. if (previous->cs_change)
  895. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  896. } else {
  897. /* STATE_START */
  898. message->state = STATE_RUNNING;
  899. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  900. }
  901. /* Configuration Changing Per Transfer */
  902. if (set_up_next_transfer(pl022, transfer)) {
  903. /* Error path */
  904. message->state = STATE_ERROR;
  905. break;
  906. }
  907. /* Flush FIFOs and enable SSP */
  908. flush(pl022);
  909. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  910. SSP_CR1(pl022->virtbase));
  911. dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
  912. /* FIXME: insert a timeout so we don't hang here indefinately */
  913. while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
  914. readwriter(pl022);
  915. /* Update total byte transfered */
  916. message->actual_length += pl022->cur_transfer->len;
  917. if (pl022->cur_transfer->cs_change)
  918. pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
  919. /* Move to next transfer */
  920. message->state = next_transfer(pl022);
  921. }
  922. /* Handle end of message */
  923. if (message->state == STATE_DONE)
  924. message->status = 0;
  925. else
  926. message->status = -EIO;
  927. giveback(pl022);
  928. return;
  929. }
  930. /**
  931. * pump_messages - Workqueue function which processes spi message queue
  932. * @data: pointer to private data of SSP driver
  933. *
  934. * This function checks if there is any spi message in the queue that
  935. * needs processing and delegate control to appropriate function
  936. * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
  937. * based on the kind of the transfer
  938. *
  939. */
  940. static void pump_messages(struct work_struct *work)
  941. {
  942. struct pl022 *pl022 =
  943. container_of(work, struct pl022, pump_messages);
  944. unsigned long flags;
  945. /* Lock queue and check for queue work */
  946. spin_lock_irqsave(&pl022->queue_lock, flags);
  947. if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
  948. pl022->busy = 0;
  949. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  950. return;
  951. }
  952. /* Make sure we are not already running a message */
  953. if (pl022->cur_msg) {
  954. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  955. return;
  956. }
  957. /* Extract head of queue */
  958. pl022->cur_msg =
  959. list_entry(pl022->queue.next, struct spi_message, queue);
  960. list_del_init(&pl022->cur_msg->queue);
  961. pl022->busy = 1;
  962. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  963. /* Initial message state */
  964. pl022->cur_msg->state = STATE_START;
  965. pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
  966. struct spi_transfer,
  967. transfer_list);
  968. /* Setup the SPI using the per chip configuration */
  969. pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
  970. /*
  971. * We enable the clock here, then the clock will be disabled when
  972. * giveback() is called in each method (poll/interrupt/DMA)
  973. */
  974. clk_enable(pl022->clk);
  975. restore_state(pl022);
  976. flush(pl022);
  977. if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
  978. do_polling_transfer(pl022);
  979. else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
  980. do_interrupt_transfer(pl022);
  981. else
  982. do_dma_transfer(pl022);
  983. }
  984. static int __init init_queue(struct pl022 *pl022)
  985. {
  986. INIT_LIST_HEAD(&pl022->queue);
  987. spin_lock_init(&pl022->queue_lock);
  988. pl022->run = QUEUE_STOPPED;
  989. pl022->busy = 0;
  990. tasklet_init(&pl022->pump_transfers,
  991. pump_transfers, (unsigned long)pl022);
  992. INIT_WORK(&pl022->pump_messages, pump_messages);
  993. pl022->workqueue = create_singlethread_workqueue(
  994. dev_name(pl022->master->dev.parent));
  995. if (pl022->workqueue == NULL)
  996. return -EBUSY;
  997. return 0;
  998. }
  999. static int start_queue(struct pl022 *pl022)
  1000. {
  1001. unsigned long flags;
  1002. spin_lock_irqsave(&pl022->queue_lock, flags);
  1003. if (pl022->run == QUEUE_RUNNING || pl022->busy) {
  1004. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1005. return -EBUSY;
  1006. }
  1007. pl022->run = QUEUE_RUNNING;
  1008. pl022->cur_msg = NULL;
  1009. pl022->cur_transfer = NULL;
  1010. pl022->cur_chip = NULL;
  1011. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1012. queue_work(pl022->workqueue, &pl022->pump_messages);
  1013. return 0;
  1014. }
  1015. static int stop_queue(struct pl022 *pl022)
  1016. {
  1017. unsigned long flags;
  1018. unsigned limit = 500;
  1019. int status = 0;
  1020. spin_lock_irqsave(&pl022->queue_lock, flags);
  1021. /* This is a bit lame, but is optimized for the common execution path.
  1022. * A wait_queue on the pl022->busy could be used, but then the common
  1023. * execution path (pump_messages) would be required to call wake_up or
  1024. * friends on every SPI message. Do this instead */
  1025. pl022->run = QUEUE_STOPPED;
  1026. while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
  1027. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1028. msleep(10);
  1029. spin_lock_irqsave(&pl022->queue_lock, flags);
  1030. }
  1031. if (!list_empty(&pl022->queue) || pl022->busy)
  1032. status = -EBUSY;
  1033. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1034. return status;
  1035. }
  1036. static int destroy_queue(struct pl022 *pl022)
  1037. {
  1038. int status;
  1039. status = stop_queue(pl022);
  1040. /* we are unloading the module or failing to load (only two calls
  1041. * to this routine), and neither call can handle a return value.
  1042. * However, destroy_workqueue calls flush_workqueue, and that will
  1043. * block until all work is done. If the reason that stop_queue
  1044. * timed out is that the work will never finish, then it does no
  1045. * good to call destroy_workqueue, so return anyway. */
  1046. if (status != 0)
  1047. return status;
  1048. destroy_workqueue(pl022->workqueue);
  1049. return 0;
  1050. }
  1051. static int verify_controller_parameters(struct pl022 *pl022,
  1052. struct pl022_config_chip *chip_info)
  1053. {
  1054. if ((chip_info->lbm != LOOPBACK_ENABLED)
  1055. && (chip_info->lbm != LOOPBACK_DISABLED)) {
  1056. dev_err(chip_info->dev,
  1057. "loopback Mode is configured incorrectly\n");
  1058. return -EINVAL;
  1059. }
  1060. if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
  1061. || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
  1062. dev_err(chip_info->dev,
  1063. "interface is configured incorrectly\n");
  1064. return -EINVAL;
  1065. }
  1066. if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
  1067. (!pl022->vendor->unidir)) {
  1068. dev_err(chip_info->dev,
  1069. "unidirectional mode not supported in this "
  1070. "hardware version\n");
  1071. return -EINVAL;
  1072. }
  1073. if ((chip_info->hierarchy != SSP_MASTER)
  1074. && (chip_info->hierarchy != SSP_SLAVE)) {
  1075. dev_err(chip_info->dev,
  1076. "hierarchy is configured incorrectly\n");
  1077. return -EINVAL;
  1078. }
  1079. if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
  1080. || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
  1081. dev_err(chip_info->dev,
  1082. "cpsdvsr is configured incorrectly\n");
  1083. return -EINVAL;
  1084. }
  1085. if ((chip_info->endian_rx != SSP_RX_MSB)
  1086. && (chip_info->endian_rx != SSP_RX_LSB)) {
  1087. dev_err(chip_info->dev,
  1088. "RX FIFO endianess is configured incorrectly\n");
  1089. return -EINVAL;
  1090. }
  1091. if ((chip_info->endian_tx != SSP_TX_MSB)
  1092. && (chip_info->endian_tx != SSP_TX_LSB)) {
  1093. dev_err(chip_info->dev,
  1094. "TX FIFO endianess is configured incorrectly\n");
  1095. return -EINVAL;
  1096. }
  1097. if ((chip_info->data_size < SSP_DATA_BITS_4)
  1098. || (chip_info->data_size > SSP_DATA_BITS_32)) {
  1099. dev_err(chip_info->dev,
  1100. "DATA Size is configured incorrectly\n");
  1101. return -EINVAL;
  1102. }
  1103. if ((chip_info->com_mode != INTERRUPT_TRANSFER)
  1104. && (chip_info->com_mode != DMA_TRANSFER)
  1105. && (chip_info->com_mode != POLLING_TRANSFER)) {
  1106. dev_err(chip_info->dev,
  1107. "Communication mode is configured incorrectly\n");
  1108. return -EINVAL;
  1109. }
  1110. if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
  1111. || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
  1112. dev_err(chip_info->dev,
  1113. "RX FIFO Trigger Level is configured incorrectly\n");
  1114. return -EINVAL;
  1115. }
  1116. if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
  1117. || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
  1118. dev_err(chip_info->dev,
  1119. "TX FIFO Trigger Level is configured incorrectly\n");
  1120. return -EINVAL;
  1121. }
  1122. if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
  1123. if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
  1124. && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
  1125. dev_err(chip_info->dev,
  1126. "Clock Phase is configured incorrectly\n");
  1127. return -EINVAL;
  1128. }
  1129. if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
  1130. && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
  1131. dev_err(chip_info->dev,
  1132. "Clock Polarity is configured incorrectly\n");
  1133. return -EINVAL;
  1134. }
  1135. }
  1136. if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
  1137. if ((chip_info->ctrl_len < SSP_BITS_4)
  1138. || (chip_info->ctrl_len > SSP_BITS_32)) {
  1139. dev_err(chip_info->dev,
  1140. "CTRL LEN is configured incorrectly\n");
  1141. return -EINVAL;
  1142. }
  1143. if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
  1144. && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
  1145. dev_err(chip_info->dev,
  1146. "Wait State is configured incorrectly\n");
  1147. return -EINVAL;
  1148. }
  1149. if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
  1150. && (chip_info->duplex !=
  1151. SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
  1152. dev_err(chip_info->dev,
  1153. "DUPLEX is configured incorrectly\n");
  1154. return -EINVAL;
  1155. }
  1156. }
  1157. if (chip_info->cs_control == NULL) {
  1158. dev_warn(chip_info->dev,
  1159. "Chip Select Function is NULL for this chip\n");
  1160. chip_info->cs_control = null_cs_control;
  1161. }
  1162. return 0;
  1163. }
  1164. /**
  1165. * pl022_transfer - transfer function registered to SPI master framework
  1166. * @spi: spi device which is requesting transfer
  1167. * @msg: spi message which is to handled is queued to driver queue
  1168. *
  1169. * This function is registered to the SPI framework for this SPI master
  1170. * controller. It will queue the spi_message in the queue of driver if
  1171. * the queue is not stopped and return.
  1172. */
  1173. static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
  1174. {
  1175. struct pl022 *pl022 = spi_master_get_devdata(spi->master);
  1176. unsigned long flags;
  1177. spin_lock_irqsave(&pl022->queue_lock, flags);
  1178. if (pl022->run == QUEUE_STOPPED) {
  1179. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1180. return -ESHUTDOWN;
  1181. }
  1182. msg->actual_length = 0;
  1183. msg->status = -EINPROGRESS;
  1184. msg->state = STATE_START;
  1185. list_add_tail(&msg->queue, &pl022->queue);
  1186. if (pl022->run == QUEUE_RUNNING && !pl022->busy)
  1187. queue_work(pl022->workqueue, &pl022->pump_messages);
  1188. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1189. return 0;
  1190. }
  1191. static int calculate_effective_freq(struct pl022 *pl022,
  1192. int freq,
  1193. struct ssp_clock_params *clk_freq)
  1194. {
  1195. /* Lets calculate the frequency parameters */
  1196. u16 cpsdvsr = 2;
  1197. u16 scr = 0;
  1198. bool freq_found = false;
  1199. u32 rate;
  1200. u32 max_tclk;
  1201. u32 min_tclk;
  1202. rate = clk_get_rate(pl022->clk);
  1203. /* cpsdvscr = 2 & scr 0 */
  1204. max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
  1205. /* cpsdvsr = 254 & scr = 255 */
  1206. min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
  1207. if ((freq <= max_tclk) && (freq >= min_tclk)) {
  1208. while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
  1209. while (scr <= SCR_MAX && !freq_found) {
  1210. if ((rate /
  1211. (cpsdvsr * (1 + scr))) > freq)
  1212. scr += 1;
  1213. else {
  1214. /*
  1215. * This bool is made true when
  1216. * effective frequency >=
  1217. * target frequency is found
  1218. */
  1219. freq_found = true;
  1220. if ((rate /
  1221. (cpsdvsr * (1 + scr))) != freq) {
  1222. if (scr == SCR_MIN) {
  1223. cpsdvsr -= 2;
  1224. scr = SCR_MAX;
  1225. } else
  1226. scr -= 1;
  1227. }
  1228. }
  1229. }
  1230. if (!freq_found) {
  1231. cpsdvsr += 2;
  1232. scr = SCR_MIN;
  1233. }
  1234. }
  1235. if (cpsdvsr != 0) {
  1236. dev_dbg(&pl022->adev->dev,
  1237. "SSP Effective Frequency is %u\n",
  1238. (rate / (cpsdvsr * (1 + scr))));
  1239. clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
  1240. clk_freq->scr = (u8) (scr & 0xFF);
  1241. dev_dbg(&pl022->adev->dev,
  1242. "SSP cpsdvsr = %d, scr = %d\n",
  1243. clk_freq->cpsdvsr, clk_freq->scr);
  1244. }
  1245. } else {
  1246. dev_err(&pl022->adev->dev,
  1247. "controller data is incorrect: out of range frequency");
  1248. return -EINVAL;
  1249. }
  1250. return 0;
  1251. }
  1252. /**
  1253. * NOT IMPLEMENTED
  1254. * process_dma_info - Processes the DMA info provided by client drivers
  1255. * @chip_info: chip info provided by client device
  1256. * @chip: Runtime state maintained by the SSP controller for each spi device
  1257. *
  1258. * This function processes and stores DMA config provided by client driver
  1259. * into the runtime state maintained by the SSP controller driver
  1260. */
  1261. static int process_dma_info(struct pl022_config_chip *chip_info,
  1262. struct chip_data *chip)
  1263. {
  1264. dev_err(chip_info->dev,
  1265. "cannot process DMA info, DMA not implemented!\n");
  1266. return -ENOTSUPP;
  1267. }
  1268. /**
  1269. * pl022_setup - setup function registered to SPI master framework
  1270. * @spi: spi device which is requesting setup
  1271. *
  1272. * This function is registered to the SPI framework for this SPI master
  1273. * controller. If it is the first time when setup is called by this device,
  1274. * this function will initialize the runtime state for this chip and save
  1275. * the same in the device structure. Else it will update the runtime info
  1276. * with the updated chip info. Nothing is really being written to the
  1277. * controller hardware here, that is not done until the actual transfer
  1278. * commence.
  1279. */
  1280. /* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
  1281. #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
  1282. | SPI_LSB_FIRST | SPI_LOOP)
  1283. static int pl022_setup(struct spi_device *spi)
  1284. {
  1285. struct pl022_config_chip *chip_info;
  1286. struct chip_data *chip;
  1287. int status = 0;
  1288. struct pl022 *pl022 = spi_master_get_devdata(spi->master);
  1289. if (spi->mode & ~MODEBITS) {
  1290. dev_dbg(&spi->dev, "unsupported mode bits %x\n",
  1291. spi->mode & ~MODEBITS);
  1292. return -EINVAL;
  1293. }
  1294. if (!spi->max_speed_hz)
  1295. return -EINVAL;
  1296. /* Get controller_state if one is supplied */
  1297. chip = spi_get_ctldata(spi);
  1298. if (chip == NULL) {
  1299. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1300. if (!chip) {
  1301. dev_err(&spi->dev,
  1302. "cannot allocate controller state\n");
  1303. return -ENOMEM;
  1304. }
  1305. dev_dbg(&spi->dev,
  1306. "allocated memory for controller's runtime state\n");
  1307. }
  1308. /* Get controller data if one is supplied */
  1309. chip_info = spi->controller_data;
  1310. if (chip_info == NULL) {
  1311. /* spi_board_info.controller_data not is supplied */
  1312. dev_dbg(&spi->dev,
  1313. "using default controller_data settings\n");
  1314. chip_info =
  1315. kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
  1316. if (!chip_info) {
  1317. dev_err(&spi->dev,
  1318. "cannot allocate controller data\n");
  1319. status = -ENOMEM;
  1320. goto err_first_setup;
  1321. }
  1322. dev_dbg(&spi->dev, "allocated memory for controller data\n");
  1323. /* Pointer back to the SPI device */
  1324. chip_info->dev = &spi->dev;
  1325. /*
  1326. * Set controller data default values:
  1327. * Polling is supported by default
  1328. */
  1329. chip_info->lbm = LOOPBACK_DISABLED;
  1330. chip_info->com_mode = POLLING_TRANSFER;
  1331. chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
  1332. chip_info->hierarchy = SSP_SLAVE;
  1333. chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
  1334. chip_info->endian_tx = SSP_TX_LSB;
  1335. chip_info->endian_rx = SSP_RX_LSB;
  1336. chip_info->data_size = SSP_DATA_BITS_12;
  1337. chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
  1338. chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
  1339. chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
  1340. chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
  1341. chip_info->ctrl_len = SSP_BITS_8;
  1342. chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
  1343. chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
  1344. chip_info->cs_control = null_cs_control;
  1345. } else {
  1346. dev_dbg(&spi->dev,
  1347. "using user supplied controller_data settings\n");
  1348. }
  1349. /*
  1350. * We can override with custom divisors, else we use the board
  1351. * frequency setting
  1352. */
  1353. if ((0 == chip_info->clk_freq.cpsdvsr)
  1354. && (0 == chip_info->clk_freq.scr)) {
  1355. status = calculate_effective_freq(pl022,
  1356. spi->max_speed_hz,
  1357. &chip_info->clk_freq);
  1358. if (status < 0)
  1359. goto err_config_params;
  1360. } else {
  1361. if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
  1362. chip_info->clk_freq.cpsdvsr =
  1363. chip_info->clk_freq.cpsdvsr - 1;
  1364. }
  1365. status = verify_controller_parameters(pl022, chip_info);
  1366. if (status) {
  1367. dev_err(&spi->dev, "controller data is incorrect");
  1368. goto err_config_params;
  1369. }
  1370. /* Now set controller state based on controller data */
  1371. chip->xfer_type = chip_info->com_mode;
  1372. chip->cs_control = chip_info->cs_control;
  1373. if (chip_info->data_size <= 8) {
  1374. dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
  1375. chip->n_bytes = 1;
  1376. chip->read = READING_U8;
  1377. chip->write = WRITING_U8;
  1378. } else if (chip_info->data_size <= 16) {
  1379. dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
  1380. chip->n_bytes = 2;
  1381. chip->read = READING_U16;
  1382. chip->write = WRITING_U16;
  1383. } else {
  1384. if (pl022->vendor->max_bpw >= 32) {
  1385. dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
  1386. chip->n_bytes = 4;
  1387. chip->read = READING_U32;
  1388. chip->write = WRITING_U32;
  1389. } else {
  1390. dev_err(&spi->dev,
  1391. "illegal data size for this controller!\n");
  1392. dev_err(&spi->dev,
  1393. "a standard pl022 can only handle "
  1394. "1 <= n <= 16 bit words\n");
  1395. goto err_config_params;
  1396. }
  1397. }
  1398. /* Now Initialize all register settings required for this chip */
  1399. chip->cr0 = 0;
  1400. chip->cr1 = 0;
  1401. chip->dmacr = 0;
  1402. chip->cpsr = 0;
  1403. if ((chip_info->com_mode == DMA_TRANSFER)
  1404. && ((pl022->master_info)->enable_dma)) {
  1405. chip->enable_dma = 1;
  1406. dev_dbg(&spi->dev, "DMA mode set in controller state\n");
  1407. status = process_dma_info(chip_info, chip);
  1408. if (status < 0)
  1409. goto err_config_params;
  1410. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
  1411. SSP_DMACR_MASK_RXDMAE, 0);
  1412. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
  1413. SSP_DMACR_MASK_TXDMAE, 1);
  1414. } else {
  1415. chip->enable_dma = 0;
  1416. dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
  1417. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
  1418. SSP_DMACR_MASK_RXDMAE, 0);
  1419. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
  1420. SSP_DMACR_MASK_TXDMAE, 1);
  1421. }
  1422. chip->cpsr = chip_info->clk_freq.cpsdvsr;
  1423. SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
  1424. SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
  1425. SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
  1426. SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
  1427. SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
  1428. SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
  1429. SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
  1430. SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
  1431. SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
  1432. SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
  1433. SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
  1434. SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
  1435. SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
  1436. SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
  1437. SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
  1438. SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
  1439. /* Save controller_state */
  1440. spi_set_ctldata(spi, chip);
  1441. return status;
  1442. err_config_params:
  1443. err_first_setup:
  1444. kfree(chip);
  1445. return status;
  1446. }
  1447. /**
  1448. * pl022_cleanup - cleanup function registered to SPI master framework
  1449. * @spi: spi device which is requesting cleanup
  1450. *
  1451. * This function is registered to the SPI framework for this SPI master
  1452. * controller. It will free the runtime state of chip.
  1453. */
  1454. static void pl022_cleanup(struct spi_device *spi)
  1455. {
  1456. struct chip_data *chip = spi_get_ctldata(spi);
  1457. spi_set_ctldata(spi, NULL);
  1458. kfree(chip);
  1459. }
  1460. static int __init
  1461. pl022_probe(struct amba_device *adev, struct amba_id *id)
  1462. {
  1463. struct device *dev = &adev->dev;
  1464. struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
  1465. struct spi_master *master;
  1466. struct pl022 *pl022 = NULL; /*Data for this driver */
  1467. int status = 0;
  1468. dev_info(&adev->dev,
  1469. "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
  1470. if (platform_info == NULL) {
  1471. dev_err(&adev->dev, "probe - no platform data supplied\n");
  1472. status = -ENODEV;
  1473. goto err_no_pdata;
  1474. }
  1475. /* Allocate master with space for data */
  1476. master = spi_alloc_master(dev, sizeof(struct pl022));
  1477. if (master == NULL) {
  1478. dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
  1479. status = -ENOMEM;
  1480. goto err_no_master;
  1481. }
  1482. pl022 = spi_master_get_devdata(master);
  1483. pl022->master = master;
  1484. pl022->master_info = platform_info;
  1485. pl022->adev = adev;
  1486. pl022->vendor = id->data;
  1487. /*
  1488. * Bus Number Which has been Assigned to this SSP controller
  1489. * on this board
  1490. */
  1491. master->bus_num = platform_info->bus_id;
  1492. master->num_chipselect = platform_info->num_chipselect;
  1493. master->cleanup = pl022_cleanup;
  1494. master->setup = pl022_setup;
  1495. master->transfer = pl022_transfer;
  1496. dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
  1497. status = amba_request_regions(adev, NULL);
  1498. if (status)
  1499. goto err_no_ioregion;
  1500. pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
  1501. if (pl022->virtbase == NULL) {
  1502. status = -ENOMEM;
  1503. goto err_no_ioremap;
  1504. }
  1505. printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
  1506. adev->res.start, pl022->virtbase);
  1507. pl022->clk = clk_get(&adev->dev, NULL);
  1508. if (IS_ERR(pl022->clk)) {
  1509. status = PTR_ERR(pl022->clk);
  1510. dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
  1511. goto err_no_clk;
  1512. }
  1513. /* Disable SSP */
  1514. clk_enable(pl022->clk);
  1515. writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
  1516. SSP_CR1(pl022->virtbase));
  1517. load_ssp_default_config(pl022);
  1518. clk_disable(pl022->clk);
  1519. status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
  1520. pl022);
  1521. if (status < 0) {
  1522. dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
  1523. goto err_no_irq;
  1524. }
  1525. /* Initialize and start queue */
  1526. status = init_queue(pl022);
  1527. if (status != 0) {
  1528. dev_err(&adev->dev, "probe - problem initializing queue\n");
  1529. goto err_init_queue;
  1530. }
  1531. status = start_queue(pl022);
  1532. if (status != 0) {
  1533. dev_err(&adev->dev, "probe - problem starting queue\n");
  1534. goto err_start_queue;
  1535. }
  1536. /* Register with the SPI framework */
  1537. amba_set_drvdata(adev, pl022);
  1538. status = spi_register_master(master);
  1539. if (status != 0) {
  1540. dev_err(&adev->dev,
  1541. "probe - problem registering spi master\n");
  1542. goto err_spi_register;
  1543. }
  1544. dev_dbg(dev, "probe succeded\n");
  1545. return 0;
  1546. err_spi_register:
  1547. err_start_queue:
  1548. err_init_queue:
  1549. destroy_queue(pl022);
  1550. free_irq(adev->irq[0], pl022);
  1551. err_no_irq:
  1552. clk_put(pl022->clk);
  1553. err_no_clk:
  1554. iounmap(pl022->virtbase);
  1555. err_no_ioremap:
  1556. amba_release_regions(adev);
  1557. err_no_ioregion:
  1558. spi_master_put(master);
  1559. err_no_master:
  1560. err_no_pdata:
  1561. return status;
  1562. }
  1563. static int __exit
  1564. pl022_remove(struct amba_device *adev)
  1565. {
  1566. struct pl022 *pl022 = amba_get_drvdata(adev);
  1567. int status = 0;
  1568. if (!pl022)
  1569. return 0;
  1570. /* Remove the queue */
  1571. status = destroy_queue(pl022);
  1572. if (status != 0) {
  1573. dev_err(&adev->dev,
  1574. "queue remove failed (%d)\n", status);
  1575. return status;
  1576. }
  1577. load_ssp_default_config(pl022);
  1578. free_irq(adev->irq[0], pl022);
  1579. clk_disable(pl022->clk);
  1580. clk_put(pl022->clk);
  1581. iounmap(pl022->virtbase);
  1582. amba_release_regions(adev);
  1583. tasklet_disable(&pl022->pump_transfers);
  1584. spi_unregister_master(pl022->master);
  1585. spi_master_put(pl022->master);
  1586. amba_set_drvdata(adev, NULL);
  1587. dev_dbg(&adev->dev, "remove succeded\n");
  1588. return 0;
  1589. }
  1590. #ifdef CONFIG_PM
  1591. static int pl022_suspend(struct amba_device *adev, pm_message_t state)
  1592. {
  1593. struct pl022 *pl022 = amba_get_drvdata(adev);
  1594. int status = 0;
  1595. status = stop_queue(pl022);
  1596. if (status) {
  1597. dev_warn(&adev->dev, "suspend cannot stop queue\n");
  1598. return status;
  1599. }
  1600. clk_enable(pl022->clk);
  1601. load_ssp_default_config(pl022);
  1602. clk_disable(pl022->clk);
  1603. dev_dbg(&adev->dev, "suspended\n");
  1604. return 0;
  1605. }
  1606. static int pl022_resume(struct amba_device *adev)
  1607. {
  1608. struct pl022 *pl022 = amba_get_drvdata(adev);
  1609. int status = 0;
  1610. /* Start the queue running */
  1611. status = start_queue(pl022);
  1612. if (status)
  1613. dev_err(&adev->dev, "problem starting queue (%d)\n", status);
  1614. else
  1615. dev_dbg(&adev->dev, "resumed\n");
  1616. return status;
  1617. }
  1618. #else
  1619. #define pl022_suspend NULL
  1620. #define pl022_resume NULL
  1621. #endif /* CONFIG_PM */
  1622. static struct vendor_data vendor_arm = {
  1623. .fifodepth = 8,
  1624. .max_bpw = 16,
  1625. .unidir = false,
  1626. };
  1627. static struct vendor_data vendor_st = {
  1628. .fifodepth = 32,
  1629. .max_bpw = 32,
  1630. .unidir = false,
  1631. };
  1632. static struct amba_id pl022_ids[] = {
  1633. {
  1634. /*
  1635. * ARM PL022 variant, this has a 16bit wide
  1636. * and 8 locations deep TX/RX FIFO
  1637. */
  1638. .id = 0x00041022,
  1639. .mask = 0x000fffff,
  1640. .data = &vendor_arm,
  1641. },
  1642. {
  1643. /*
  1644. * ST Micro derivative, this has 32bit wide
  1645. * and 32 locations deep TX/RX FIFO
  1646. */
  1647. .id = 0x01080022,
  1648. .mask = 0xffffffff,
  1649. .data = &vendor_st,
  1650. },
  1651. { 0, 0 },
  1652. };
  1653. static struct amba_driver pl022_driver = {
  1654. .drv = {
  1655. .name = "ssp-pl022",
  1656. },
  1657. .id_table = pl022_ids,
  1658. .probe = pl022_probe,
  1659. .remove = __exit_p(pl022_remove),
  1660. .suspend = pl022_suspend,
  1661. .resume = pl022_resume,
  1662. };
  1663. static int __init pl022_init(void)
  1664. {
  1665. return amba_driver_register(&pl022_driver);
  1666. }
  1667. module_init(pl022_init);
  1668. static void __exit pl022_exit(void)
  1669. {
  1670. amba_driver_unregister(&pl022_driver);
  1671. }
  1672. module_exit(pl022_exit);
  1673. MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
  1674. MODULE_DESCRIPTION("PL022 SSP Controller Driver");
  1675. MODULE_LICENSE("GPL");