amba-pl022.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864
  1. /*
  2. * drivers/spi/amba-pl022.c
  3. *
  4. * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
  5. *
  6. * Copyright (C) 2008-2009 ST-Ericsson AB
  7. * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
  8. *
  9. * Author: Linus Walleij <linus.walleij@stericsson.com>
  10. *
  11. * Initial version inspired by:
  12. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  13. * Initial adoption to PL022 by:
  14. * Sachin Verma <sachin.verma@st.com>
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. */
  26. /*
  27. * TODO:
  28. * - add timeout on polled transfers
  29. * - add generic DMA framework support
  30. */
  31. #include <linux/init.h>
  32. #include <linux/module.h>
  33. #include <linux/device.h>
  34. #include <linux/ioport.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/spi/spi.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/delay.h>
  40. #include <linux/clk.h>
  41. #include <linux/err.h>
  42. #include <linux/amba/bus.h>
  43. #include <linux/amba/pl022.h>
  44. #include <linux/io.h>
  45. /*
  46. * This macro is used to define some register default values.
  47. * reg is masked with mask, the OR:ed with an (again masked)
  48. * val shifted sb steps to the left.
  49. */
  50. #define SSP_WRITE_BITS(reg, val, mask, sb) \
  51. ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
  52. /*
  53. * This macro is also used to define some default values.
  54. * It will just shift val by sb steps to the left and mask
  55. * the result with mask.
  56. */
  57. #define GEN_MASK_BITS(val, mask, sb) \
  58. (((val)<<(sb)) & (mask))
  59. #define DRIVE_TX 0
  60. #define DO_NOT_DRIVE_TX 1
  61. #define DO_NOT_QUEUE_DMA 0
  62. #define QUEUE_DMA 1
  63. #define RX_TRANSFER 1
  64. #define TX_TRANSFER 2
  65. /*
  66. * Macros to access SSP Registers with their offsets
  67. */
  68. #define SSP_CR0(r) (r + 0x000)
  69. #define SSP_CR1(r) (r + 0x004)
  70. #define SSP_DR(r) (r + 0x008)
  71. #define SSP_SR(r) (r + 0x00C)
  72. #define SSP_CPSR(r) (r + 0x010)
  73. #define SSP_IMSC(r) (r + 0x014)
  74. #define SSP_RIS(r) (r + 0x018)
  75. #define SSP_MIS(r) (r + 0x01C)
  76. #define SSP_ICR(r) (r + 0x020)
  77. #define SSP_DMACR(r) (r + 0x024)
  78. #define SSP_ITCR(r) (r + 0x080)
  79. #define SSP_ITIP(r) (r + 0x084)
  80. #define SSP_ITOP(r) (r + 0x088)
  81. #define SSP_TDR(r) (r + 0x08C)
  82. #define SSP_PID0(r) (r + 0xFE0)
  83. #define SSP_PID1(r) (r + 0xFE4)
  84. #define SSP_PID2(r) (r + 0xFE8)
  85. #define SSP_PID3(r) (r + 0xFEC)
  86. #define SSP_CID0(r) (r + 0xFF0)
  87. #define SSP_CID1(r) (r + 0xFF4)
  88. #define SSP_CID2(r) (r + 0xFF8)
  89. #define SSP_CID3(r) (r + 0xFFC)
  90. /*
  91. * SSP Control Register 0 - SSP_CR0
  92. */
  93. #define SSP_CR0_MASK_DSS (0x1FUL << 0)
  94. #define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
  95. #define SSP_CR0_MASK_SPO (0x1UL << 6)
  96. #define SSP_CR0_MASK_SPH (0x1UL << 7)
  97. #define SSP_CR0_MASK_SCR (0xFFUL << 8)
  98. #define SSP_CR0_MASK_CSS (0x1FUL << 16)
  99. #define SSP_CR0_MASK_FRF (0x3UL << 21)
  100. /*
  101. * SSP Control Register 0 - SSP_CR1
  102. */
  103. #define SSP_CR1_MASK_LBM (0x1UL << 0)
  104. #define SSP_CR1_MASK_SSE (0x1UL << 1)
  105. #define SSP_CR1_MASK_MS (0x1UL << 2)
  106. #define SSP_CR1_MASK_SOD (0x1UL << 3)
  107. #define SSP_CR1_MASK_RENDN (0x1UL << 4)
  108. #define SSP_CR1_MASK_TENDN (0x1UL << 5)
  109. #define SSP_CR1_MASK_MWAIT (0x1UL << 6)
  110. #define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
  111. #define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
  112. /*
  113. * SSP Data Register - SSP_DR
  114. */
  115. #define SSP_DR_MASK_DATA 0xFFFFFFFF
  116. /*
  117. * SSP Status Register - SSP_SR
  118. */
  119. #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
  120. #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
  121. #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
  122. #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
  123. #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
  124. /*
  125. * SSP Clock Prescale Register - SSP_CPSR
  126. */
  127. #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
  128. /*
  129. * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
  130. */
  131. #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
  132. #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
  133. #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
  134. #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
  135. /*
  136. * SSP Raw Interrupt Status Register - SSP_RIS
  137. */
  138. /* Receive Overrun Raw Interrupt status */
  139. #define SSP_RIS_MASK_RORRIS (0x1UL << 0)
  140. /* Receive Timeout Raw Interrupt status */
  141. #define SSP_RIS_MASK_RTRIS (0x1UL << 1)
  142. /* Receive FIFO Raw Interrupt status */
  143. #define SSP_RIS_MASK_RXRIS (0x1UL << 2)
  144. /* Transmit FIFO Raw Interrupt status */
  145. #define SSP_RIS_MASK_TXRIS (0x1UL << 3)
  146. /*
  147. * SSP Masked Interrupt Status Register - SSP_MIS
  148. */
  149. /* Receive Overrun Masked Interrupt status */
  150. #define SSP_MIS_MASK_RORMIS (0x1UL << 0)
  151. /* Receive Timeout Masked Interrupt status */
  152. #define SSP_MIS_MASK_RTMIS (0x1UL << 1)
  153. /* Receive FIFO Masked Interrupt status */
  154. #define SSP_MIS_MASK_RXMIS (0x1UL << 2)
  155. /* Transmit FIFO Masked Interrupt status */
  156. #define SSP_MIS_MASK_TXMIS (0x1UL << 3)
  157. /*
  158. * SSP Interrupt Clear Register - SSP_ICR
  159. */
  160. /* Receive Overrun Raw Clear Interrupt bit */
  161. #define SSP_ICR_MASK_RORIC (0x1UL << 0)
  162. /* Receive Timeout Clear Interrupt bit */
  163. #define SSP_ICR_MASK_RTIC (0x1UL << 1)
  164. /*
  165. * SSP DMA Control Register - SSP_DMACR
  166. */
  167. /* Receive DMA Enable bit */
  168. #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
  169. /* Transmit DMA Enable bit */
  170. #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
  171. /*
  172. * SSP Integration Test control Register - SSP_ITCR
  173. */
  174. #define SSP_ITCR_MASK_ITEN (0x1UL << 0)
  175. #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
  176. /*
  177. * SSP Integration Test Input Register - SSP_ITIP
  178. */
  179. #define ITIP_MASK_SSPRXD (0x1UL << 0)
  180. #define ITIP_MASK_SSPFSSIN (0x1UL << 1)
  181. #define ITIP_MASK_SSPCLKIN (0x1UL << 2)
  182. #define ITIP_MASK_RXDMAC (0x1UL << 3)
  183. #define ITIP_MASK_TXDMAC (0x1UL << 4)
  184. #define ITIP_MASK_SSPTXDIN (0x1UL << 5)
  185. /*
  186. * SSP Integration Test output Register - SSP_ITOP
  187. */
  188. #define ITOP_MASK_SSPTXD (0x1UL << 0)
  189. #define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
  190. #define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
  191. #define ITOP_MASK_SSPOEn (0x1UL << 3)
  192. #define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
  193. #define ITOP_MASK_RORINTR (0x1UL << 5)
  194. #define ITOP_MASK_RTINTR (0x1UL << 6)
  195. #define ITOP_MASK_RXINTR (0x1UL << 7)
  196. #define ITOP_MASK_TXINTR (0x1UL << 8)
  197. #define ITOP_MASK_INTR (0x1UL << 9)
  198. #define ITOP_MASK_RXDMABREQ (0x1UL << 10)
  199. #define ITOP_MASK_RXDMASREQ (0x1UL << 11)
  200. #define ITOP_MASK_TXDMABREQ (0x1UL << 12)
  201. #define ITOP_MASK_TXDMASREQ (0x1UL << 13)
  202. /*
  203. * SSP Test Data Register - SSP_TDR
  204. */
  205. #define TDR_MASK_TESTDATA (0xFFFFFFFF)
  206. /*
  207. * Message State
  208. * we use the spi_message.state (void *) pointer to
  209. * hold a single state value, that's why all this
  210. * (void *) casting is done here.
  211. */
  212. #define STATE_START ((void *) 0)
  213. #define STATE_RUNNING ((void *) 1)
  214. #define STATE_DONE ((void *) 2)
  215. #define STATE_ERROR ((void *) -1)
  216. /*
  217. * Queue State
  218. */
  219. #define QUEUE_RUNNING (0)
  220. #define QUEUE_STOPPED (1)
  221. /*
  222. * SSP State - Whether Enabled or Disabled
  223. */
  224. #define SSP_DISABLED (0)
  225. #define SSP_ENABLED (1)
  226. /*
  227. * SSP DMA State - Whether DMA Enabled or Disabled
  228. */
  229. #define SSP_DMA_DISABLED (0)
  230. #define SSP_DMA_ENABLED (1)
  231. /*
  232. * SSP Clock Defaults
  233. */
  234. #define NMDK_SSP_DEFAULT_CLKRATE 0x2
  235. #define NMDK_SSP_DEFAULT_PRESCALE 0x40
  236. /*
  237. * SSP Clock Parameter ranges
  238. */
  239. #define CPSDVR_MIN 0x02
  240. #define CPSDVR_MAX 0xFE
  241. #define SCR_MIN 0x00
  242. #define SCR_MAX 0xFF
  243. /*
  244. * SSP Interrupt related Macros
  245. */
  246. #define DEFAULT_SSP_REG_IMSC 0x0UL
  247. #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
  248. #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
  249. #define CLEAR_ALL_INTERRUPTS 0x3
  250. /*
  251. * The type of reading going on on this chip
  252. */
  253. enum ssp_reading {
  254. READING_NULL,
  255. READING_U8,
  256. READING_U16,
  257. READING_U32
  258. };
  259. /**
  260. * The type of writing going on on this chip
  261. */
  262. enum ssp_writing {
  263. WRITING_NULL,
  264. WRITING_U8,
  265. WRITING_U16,
  266. WRITING_U32
  267. };
  268. /**
  269. * struct vendor_data - vendor-specific config parameters
  270. * for PL022 derivates
  271. * @fifodepth: depth of FIFOs (both)
  272. * @max_bpw: maximum number of bits per word
  273. * @unidir: supports unidirection transfers
  274. */
  275. struct vendor_data {
  276. int fifodepth;
  277. int max_bpw;
  278. bool unidir;
  279. };
  280. /**
  281. * struct pl022 - This is the private SSP driver data structure
  282. * @adev: AMBA device model hookup
  283. * @phybase: The physical memory where the SSP device resides
  284. * @virtbase: The virtual memory where the SSP is mapped
  285. * @master: SPI framework hookup
  286. * @master_info: controller-specific data from machine setup
  287. * @regs: SSP controller register's virtual address
  288. * @pump_messages: Work struct for scheduling work to the workqueue
  289. * @lock: spinlock to syncronise access to driver data
  290. * @workqueue: a workqueue on which any spi_message request is queued
  291. * @busy: workqueue is busy
  292. * @run: workqueue is running
  293. * @pump_transfers: Tasklet used in Interrupt Transfer mode
  294. * @cur_msg: Pointer to current spi_message being processed
  295. * @cur_transfer: Pointer to current spi_transfer
  296. * @cur_chip: pointer to current clients chip(assigned from controller_state)
  297. * @tx: current position in TX buffer to be read
  298. * @tx_end: end position in TX buffer to be read
  299. * @rx: current position in RX buffer to be written
  300. * @rx_end: end position in RX buffer to be written
  301. * @readingtype: the type of read currently going on
  302. * @writingtype: the type or write currently going on
  303. */
  304. struct pl022 {
  305. struct amba_device *adev;
  306. struct vendor_data *vendor;
  307. resource_size_t phybase;
  308. void __iomem *virtbase;
  309. struct clk *clk;
  310. struct spi_master *master;
  311. struct pl022_ssp_controller *master_info;
  312. /* Driver message queue */
  313. struct workqueue_struct *workqueue;
  314. struct work_struct pump_messages;
  315. spinlock_t queue_lock;
  316. struct list_head queue;
  317. int busy;
  318. int run;
  319. /* Message transfer pump */
  320. struct tasklet_struct pump_transfers;
  321. struct spi_message *cur_msg;
  322. struct spi_transfer *cur_transfer;
  323. struct chip_data *cur_chip;
  324. void *tx;
  325. void *tx_end;
  326. void *rx;
  327. void *rx_end;
  328. enum ssp_reading read;
  329. enum ssp_writing write;
  330. };
  331. /**
  332. * struct chip_data - To maintain runtime state of SSP for each client chip
  333. * @cr0: Value of control register CR0 of SSP
  334. * @cr1: Value of control register CR1 of SSP
  335. * @dmacr: Value of DMA control Register of SSP
  336. * @cpsr: Value of Clock prescale register
  337. * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
  338. * @enable_dma: Whether to enable DMA or not
  339. * @write: function ptr to be used to write when doing xfer for this chip
  340. * @read: function ptr to be used to read when doing xfer for this chip
  341. * @cs_control: chip select callback provided by chip
  342. * @xfer_type: polling/interrupt/DMA
  343. *
  344. * Runtime state of the SSP controller, maintained per chip,
  345. * This would be set according to the current message that would be served
  346. */
  347. struct chip_data {
  348. u16 cr0;
  349. u16 cr1;
  350. u16 dmacr;
  351. u16 cpsr;
  352. u8 n_bytes;
  353. u8 enable_dma:1;
  354. enum ssp_reading read;
  355. enum ssp_writing write;
  356. void (*cs_control) (u32 command);
  357. int xfer_type;
  358. };
  359. /**
  360. * null_cs_control - Dummy chip select function
  361. * @command: select/delect the chip
  362. *
  363. * If no chip select function is provided by client this is used as dummy
  364. * chip select
  365. */
  366. static void null_cs_control(u32 command)
  367. {
  368. pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
  369. }
  370. /**
  371. * giveback - current spi_message is over, schedule next message and call
  372. * callback of this message. Assumes that caller already
  373. * set message->status; dma and pio irqs are blocked
  374. * @pl022: SSP driver private data structure
  375. */
  376. static void giveback(struct pl022 *pl022)
  377. {
  378. struct spi_transfer *last_transfer;
  379. unsigned long flags;
  380. struct spi_message *msg;
  381. void (*curr_cs_control) (u32 command);
  382. /*
  383. * This local reference to the chip select function
  384. * is needed because we set curr_chip to NULL
  385. * as a step toward termininating the message.
  386. */
  387. curr_cs_control = pl022->cur_chip->cs_control;
  388. spin_lock_irqsave(&pl022->queue_lock, flags);
  389. msg = pl022->cur_msg;
  390. pl022->cur_msg = NULL;
  391. pl022->cur_transfer = NULL;
  392. pl022->cur_chip = NULL;
  393. queue_work(pl022->workqueue, &pl022->pump_messages);
  394. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  395. last_transfer = list_entry(msg->transfers.prev,
  396. struct spi_transfer,
  397. transfer_list);
  398. /* Delay if requested before any change in chip select */
  399. if (last_transfer->delay_usecs)
  400. /*
  401. * FIXME: This runs in interrupt context.
  402. * Is this really smart?
  403. */
  404. udelay(last_transfer->delay_usecs);
  405. /*
  406. * Drop chip select UNLESS cs_change is true or we are returning
  407. * a message with an error, or next message is for another chip
  408. */
  409. if (!last_transfer->cs_change)
  410. curr_cs_control(SSP_CHIP_DESELECT);
  411. else {
  412. struct spi_message *next_msg;
  413. /* Holding of cs was hinted, but we need to make sure
  414. * the next message is for the same chip. Don't waste
  415. * time with the following tests unless this was hinted.
  416. *
  417. * We cannot postpone this until pump_messages, because
  418. * after calling msg->complete (below) the driver that
  419. * sent the current message could be unloaded, which
  420. * could invalidate the cs_control() callback...
  421. */
  422. /* get a pointer to the next message, if any */
  423. spin_lock_irqsave(&pl022->queue_lock, flags);
  424. if (list_empty(&pl022->queue))
  425. next_msg = NULL;
  426. else
  427. next_msg = list_entry(pl022->queue.next,
  428. struct spi_message, queue);
  429. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  430. /* see if the next and current messages point
  431. * to the same chip
  432. */
  433. if (next_msg && next_msg->spi != msg->spi)
  434. next_msg = NULL;
  435. if (!next_msg || msg->state == STATE_ERROR)
  436. curr_cs_control(SSP_CHIP_DESELECT);
  437. }
  438. msg->state = NULL;
  439. if (msg->complete)
  440. msg->complete(msg->context);
  441. /* This message is completed, so let's turn off the clock! */
  442. clk_disable(pl022->clk);
  443. }
  444. /**
  445. * flush - flush the FIFO to reach a clean state
  446. * @pl022: SSP driver private data structure
  447. */
  448. static int flush(struct pl022 *pl022)
  449. {
  450. unsigned long limit = loops_per_jiffy << 1;
  451. dev_dbg(&pl022->adev->dev, "flush\n");
  452. do {
  453. while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  454. readw(SSP_DR(pl022->virtbase));
  455. } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
  456. return limit;
  457. }
  458. /**
  459. * restore_state - Load configuration of current chip
  460. * @pl022: SSP driver private data structure
  461. */
  462. static void restore_state(struct pl022 *pl022)
  463. {
  464. struct chip_data *chip = pl022->cur_chip;
  465. writew(chip->cr0, SSP_CR0(pl022->virtbase));
  466. writew(chip->cr1, SSP_CR1(pl022->virtbase));
  467. writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
  468. writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
  469. writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  470. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  471. }
  472. /**
  473. * load_ssp_default_config - Load default configuration for SSP
  474. * @pl022: SSP driver private data structure
  475. */
  476. /*
  477. * Default SSP Register Values
  478. */
  479. #define DEFAULT_SSP_REG_CR0 ( \
  480. GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
  481. GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
  482. GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
  483. GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
  484. GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
  485. GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
  486. GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
  487. )
  488. #define DEFAULT_SSP_REG_CR1 ( \
  489. GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
  490. GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
  491. GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
  492. GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
  493. GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
  494. GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
  495. GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
  496. GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
  497. GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
  498. )
  499. #define DEFAULT_SSP_REG_CPSR ( \
  500. GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
  501. )
  502. #define DEFAULT_SSP_REG_DMACR (\
  503. GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
  504. GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
  505. )
  506. static void load_ssp_default_config(struct pl022 *pl022)
  507. {
  508. writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
  509. writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
  510. writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
  511. writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
  512. writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  513. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  514. }
  515. /**
  516. * This will write to TX and read from RX according to the parameters
  517. * set in pl022.
  518. */
  519. static void readwriter(struct pl022 *pl022)
  520. {
  521. /*
  522. * The FIFO depth is different inbetween primecell variants.
  523. * I believe filling in too much in the FIFO might cause
  524. * errons in 8bit wide transfers on ARM variants (just 8 words
  525. * FIFO, means only 8x8 = 64 bits in FIFO) at least.
  526. *
  527. * FIXME: currently we have no logic to account for this.
  528. * perhaps there is even something broken in HW regarding
  529. * 8bit transfers (it doesn't fail on 16bit) so this needs
  530. * more investigation...
  531. */
  532. dev_dbg(&pl022->adev->dev,
  533. "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
  534. __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
  535. /* Read as much as you can */
  536. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  537. && (pl022->rx < pl022->rx_end)) {
  538. switch (pl022->read) {
  539. case READING_NULL:
  540. readw(SSP_DR(pl022->virtbase));
  541. break;
  542. case READING_U8:
  543. *(u8 *) (pl022->rx) =
  544. readw(SSP_DR(pl022->virtbase)) & 0xFFU;
  545. break;
  546. case READING_U16:
  547. *(u16 *) (pl022->rx) =
  548. (u16) readw(SSP_DR(pl022->virtbase));
  549. break;
  550. case READING_U32:
  551. *(u32 *) (pl022->rx) =
  552. readl(SSP_DR(pl022->virtbase));
  553. break;
  554. }
  555. pl022->rx += (pl022->cur_chip->n_bytes);
  556. }
  557. /*
  558. * Write as much as you can, while keeping an eye on the RX FIFO!
  559. */
  560. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
  561. && (pl022->tx < pl022->tx_end)) {
  562. switch (pl022->write) {
  563. case WRITING_NULL:
  564. writew(0x0, SSP_DR(pl022->virtbase));
  565. break;
  566. case WRITING_U8:
  567. writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
  568. break;
  569. case WRITING_U16:
  570. writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
  571. break;
  572. case WRITING_U32:
  573. writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
  574. break;
  575. }
  576. pl022->tx += (pl022->cur_chip->n_bytes);
  577. /*
  578. * This inner reader takes care of things appearing in the RX
  579. * FIFO as we're transmitting. This will happen a lot since the
  580. * clock starts running when you put things into the TX FIFO,
  581. * and then things are continously clocked into the RX FIFO.
  582. */
  583. while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
  584. && (pl022->rx < pl022->rx_end)) {
  585. switch (pl022->read) {
  586. case READING_NULL:
  587. readw(SSP_DR(pl022->virtbase));
  588. break;
  589. case READING_U8:
  590. *(u8 *) (pl022->rx) =
  591. readw(SSP_DR(pl022->virtbase)) & 0xFFU;
  592. break;
  593. case READING_U16:
  594. *(u16 *) (pl022->rx) =
  595. (u16) readw(SSP_DR(pl022->virtbase));
  596. break;
  597. case READING_U32:
  598. *(u32 *) (pl022->rx) =
  599. readl(SSP_DR(pl022->virtbase));
  600. break;
  601. }
  602. pl022->rx += (pl022->cur_chip->n_bytes);
  603. }
  604. }
  605. /*
  606. * When we exit here the TX FIFO should be full and the RX FIFO
  607. * should be empty
  608. */
  609. }
  610. /**
  611. * next_transfer - Move to the Next transfer in the current spi message
  612. * @pl022: SSP driver private data structure
  613. *
  614. * This function moves though the linked list of spi transfers in the
  615. * current spi message and returns with the state of current spi
  616. * message i.e whether its last transfer is done(STATE_DONE) or
  617. * Next transfer is ready(STATE_RUNNING)
  618. */
  619. static void *next_transfer(struct pl022 *pl022)
  620. {
  621. struct spi_message *msg = pl022->cur_msg;
  622. struct spi_transfer *trans = pl022->cur_transfer;
  623. /* Move to next transfer */
  624. if (trans->transfer_list.next != &msg->transfers) {
  625. pl022->cur_transfer =
  626. list_entry(trans->transfer_list.next,
  627. struct spi_transfer, transfer_list);
  628. return STATE_RUNNING;
  629. }
  630. return STATE_DONE;
  631. }
  632. /**
  633. * pl022_interrupt_handler - Interrupt handler for SSP controller
  634. *
  635. * This function handles interrupts generated for an interrupt based transfer.
  636. * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
  637. * current message's state as STATE_ERROR and schedule the tasklet
  638. * pump_transfers which will do the postprocessing of the current message by
  639. * calling giveback(). Otherwise it reads data from RX FIFO till there is no
  640. * more data, and writes data in TX FIFO till it is not full. If we complete
  641. * the transfer we move to the next transfer and schedule the tasklet.
  642. */
  643. static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
  644. {
  645. struct pl022 *pl022 = dev_id;
  646. struct spi_message *msg = pl022->cur_msg;
  647. u16 irq_status = 0;
  648. u16 flag = 0;
  649. if (unlikely(!msg)) {
  650. dev_err(&pl022->adev->dev,
  651. "bad message state in interrupt handler");
  652. /* Never fail */
  653. return IRQ_HANDLED;
  654. }
  655. /* Read the Interrupt Status Register */
  656. irq_status = readw(SSP_MIS(pl022->virtbase));
  657. if (unlikely(!irq_status))
  658. return IRQ_NONE;
  659. /* This handles the error code interrupts */
  660. if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
  661. /*
  662. * Overrun interrupt - bail out since our Data has been
  663. * corrupted
  664. */
  665. dev_err(&pl022->adev->dev,
  666. "FIFO overrun\n");
  667. if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
  668. dev_err(&pl022->adev->dev,
  669. "RXFIFO is full\n");
  670. if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
  671. dev_err(&pl022->adev->dev,
  672. "TXFIFO is full\n");
  673. /*
  674. * Disable and clear interrupts, disable SSP,
  675. * mark message with bad status so it can be
  676. * retried.
  677. */
  678. writew(DISABLE_ALL_INTERRUPTS,
  679. SSP_IMSC(pl022->virtbase));
  680. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  681. writew((readw(SSP_CR1(pl022->virtbase)) &
  682. (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
  683. msg->state = STATE_ERROR;
  684. /* Schedule message queue handler */
  685. tasklet_schedule(&pl022->pump_transfers);
  686. return IRQ_HANDLED;
  687. }
  688. readwriter(pl022);
  689. if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
  690. flag = 1;
  691. /* Disable Transmit interrupt */
  692. writew(readw(SSP_IMSC(pl022->virtbase)) &
  693. (~SSP_IMSC_MASK_TXIM),
  694. SSP_IMSC(pl022->virtbase));
  695. }
  696. /*
  697. * Since all transactions must write as much as shall be read,
  698. * we can conclude the entire transaction once RX is complete.
  699. * At this point, all TX will always be finished.
  700. */
  701. if (pl022->rx >= pl022->rx_end) {
  702. writew(DISABLE_ALL_INTERRUPTS,
  703. SSP_IMSC(pl022->virtbase));
  704. writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
  705. if (unlikely(pl022->rx > pl022->rx_end)) {
  706. dev_warn(&pl022->adev->dev, "read %u surplus "
  707. "bytes (did you request an odd "
  708. "number of bytes on a 16bit bus?)\n",
  709. (u32) (pl022->rx - pl022->rx_end));
  710. }
  711. /* Update total bytes transfered */
  712. msg->actual_length += pl022->cur_transfer->len;
  713. if (pl022->cur_transfer->cs_change)
  714. pl022->cur_chip->
  715. cs_control(SSP_CHIP_DESELECT);
  716. /* Move to next transfer */
  717. msg->state = next_transfer(pl022);
  718. tasklet_schedule(&pl022->pump_transfers);
  719. return IRQ_HANDLED;
  720. }
  721. return IRQ_HANDLED;
  722. }
  723. /**
  724. * This sets up the pointers to memory for the next message to
  725. * send out on the SPI bus.
  726. */
  727. static int set_up_next_transfer(struct pl022 *pl022,
  728. struct spi_transfer *transfer)
  729. {
  730. int residue;
  731. /* Sanity check the message for this bus width */
  732. residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
  733. if (unlikely(residue != 0)) {
  734. dev_err(&pl022->adev->dev,
  735. "message of %u bytes to transmit but the current "
  736. "chip bus has a data width of %u bytes!\n",
  737. pl022->cur_transfer->len,
  738. pl022->cur_chip->n_bytes);
  739. dev_err(&pl022->adev->dev, "skipping this message\n");
  740. return -EIO;
  741. }
  742. pl022->tx = (void *)transfer->tx_buf;
  743. pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
  744. pl022->rx = (void *)transfer->rx_buf;
  745. pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
  746. pl022->write =
  747. pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
  748. pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
  749. return 0;
  750. }
  751. /**
  752. * pump_transfers - Tasklet function which schedules next interrupt transfer
  753. * when running in interrupt transfer mode.
  754. * @data: SSP driver private data structure
  755. *
  756. */
  757. static void pump_transfers(unsigned long data)
  758. {
  759. struct pl022 *pl022 = (struct pl022 *) data;
  760. struct spi_message *message = NULL;
  761. struct spi_transfer *transfer = NULL;
  762. struct spi_transfer *previous = NULL;
  763. /* Get current state information */
  764. message = pl022->cur_msg;
  765. transfer = pl022->cur_transfer;
  766. /* Handle for abort */
  767. if (message->state == STATE_ERROR) {
  768. message->status = -EIO;
  769. giveback(pl022);
  770. return;
  771. }
  772. /* Handle end of message */
  773. if (message->state == STATE_DONE) {
  774. message->status = 0;
  775. giveback(pl022);
  776. return;
  777. }
  778. /* Delay if requested at end of transfer before CS change */
  779. if (message->state == STATE_RUNNING) {
  780. previous = list_entry(transfer->transfer_list.prev,
  781. struct spi_transfer,
  782. transfer_list);
  783. if (previous->delay_usecs)
  784. /*
  785. * FIXME: This runs in interrupt context.
  786. * Is this really smart?
  787. */
  788. udelay(previous->delay_usecs);
  789. /* Drop chip select only if cs_change is requested */
  790. if (previous->cs_change)
  791. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  792. } else {
  793. /* STATE_START */
  794. message->state = STATE_RUNNING;
  795. }
  796. if (set_up_next_transfer(pl022, transfer)) {
  797. message->state = STATE_ERROR;
  798. message->status = -EIO;
  799. giveback(pl022);
  800. return;
  801. }
  802. /* Flush the FIFOs and let's go! */
  803. flush(pl022);
  804. writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  805. }
  806. /**
  807. * NOT IMPLEMENTED
  808. * configure_dma - It configures the DMA pipes for DMA transfers
  809. * @data: SSP driver's private data structure
  810. *
  811. */
  812. static int configure_dma(void *data)
  813. {
  814. struct pl022 *pl022 = data;
  815. dev_dbg(&pl022->adev->dev, "configure DMA\n");
  816. return -ENOTSUPP;
  817. }
  818. /**
  819. * do_dma_transfer - It handles transfers of the current message
  820. * if it is DMA xfer.
  821. * NOT FULLY IMPLEMENTED
  822. * @data: SSP driver's private data structure
  823. */
  824. static void do_dma_transfer(void *data)
  825. {
  826. struct pl022 *pl022 = data;
  827. if (configure_dma(data)) {
  828. dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
  829. goto err_config_dma;
  830. }
  831. /* TODO: Implememt DMA setup of pipes here */
  832. /* Enable target chip, set up transfer */
  833. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  834. if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
  835. /* Error path */
  836. pl022->cur_msg->state = STATE_ERROR;
  837. pl022->cur_msg->status = -EIO;
  838. giveback(pl022);
  839. return;
  840. }
  841. /* Enable SSP */
  842. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  843. SSP_CR1(pl022->virtbase));
  844. /* TODO: Enable the DMA transfer here */
  845. return;
  846. err_config_dma:
  847. pl022->cur_msg->state = STATE_ERROR;
  848. pl022->cur_msg->status = -EIO;
  849. giveback(pl022);
  850. return;
  851. }
  852. static void do_interrupt_transfer(void *data)
  853. {
  854. struct pl022 *pl022 = data;
  855. /* Enable target chip */
  856. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  857. if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
  858. /* Error path */
  859. pl022->cur_msg->state = STATE_ERROR;
  860. pl022->cur_msg->status = -EIO;
  861. giveback(pl022);
  862. return;
  863. }
  864. /* Enable SSP, turn on interrupts */
  865. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  866. SSP_CR1(pl022->virtbase));
  867. writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
  868. }
  869. static void do_polling_transfer(void *data)
  870. {
  871. struct pl022 *pl022 = data;
  872. struct spi_message *message = NULL;
  873. struct spi_transfer *transfer = NULL;
  874. struct spi_transfer *previous = NULL;
  875. struct chip_data *chip;
  876. chip = pl022->cur_chip;
  877. message = pl022->cur_msg;
  878. while (message->state != STATE_DONE) {
  879. /* Handle for abort */
  880. if (message->state == STATE_ERROR)
  881. break;
  882. transfer = pl022->cur_transfer;
  883. /* Delay if requested at end of transfer */
  884. if (message->state == STATE_RUNNING) {
  885. previous =
  886. list_entry(transfer->transfer_list.prev,
  887. struct spi_transfer, transfer_list);
  888. if (previous->delay_usecs)
  889. udelay(previous->delay_usecs);
  890. if (previous->cs_change)
  891. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  892. } else {
  893. /* STATE_START */
  894. message->state = STATE_RUNNING;
  895. pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
  896. }
  897. /* Configuration Changing Per Transfer */
  898. if (set_up_next_transfer(pl022, transfer)) {
  899. /* Error path */
  900. message->state = STATE_ERROR;
  901. break;
  902. }
  903. /* Flush FIFOs and enable SSP */
  904. flush(pl022);
  905. writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
  906. SSP_CR1(pl022->virtbase));
  907. dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
  908. /* FIXME: insert a timeout so we don't hang here indefinately */
  909. while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
  910. readwriter(pl022);
  911. /* Update total byte transfered */
  912. message->actual_length += pl022->cur_transfer->len;
  913. if (pl022->cur_transfer->cs_change)
  914. pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
  915. /* Move to next transfer */
  916. message->state = next_transfer(pl022);
  917. }
  918. /* Handle end of message */
  919. if (message->state == STATE_DONE)
  920. message->status = 0;
  921. else
  922. message->status = -EIO;
  923. giveback(pl022);
  924. return;
  925. }
  926. /**
  927. * pump_messages - Workqueue function which processes spi message queue
  928. * @data: pointer to private data of SSP driver
  929. *
  930. * This function checks if there is any spi message in the queue that
  931. * needs processing and delegate control to appropriate function
  932. * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
  933. * based on the kind of the transfer
  934. *
  935. */
  936. static void pump_messages(struct work_struct *work)
  937. {
  938. struct pl022 *pl022 =
  939. container_of(work, struct pl022, pump_messages);
  940. unsigned long flags;
  941. /* Lock queue and check for queue work */
  942. spin_lock_irqsave(&pl022->queue_lock, flags);
  943. if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
  944. pl022->busy = 0;
  945. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  946. return;
  947. }
  948. /* Make sure we are not already running a message */
  949. if (pl022->cur_msg) {
  950. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  951. return;
  952. }
  953. /* Extract head of queue */
  954. pl022->cur_msg =
  955. list_entry(pl022->queue.next, struct spi_message, queue);
  956. list_del_init(&pl022->cur_msg->queue);
  957. pl022->busy = 1;
  958. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  959. /* Initial message state */
  960. pl022->cur_msg->state = STATE_START;
  961. pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
  962. struct spi_transfer,
  963. transfer_list);
  964. /* Setup the SPI using the per chip configuration */
  965. pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
  966. /*
  967. * We enable the clock here, then the clock will be disabled when
  968. * giveback() is called in each method (poll/interrupt/DMA)
  969. */
  970. clk_enable(pl022->clk);
  971. restore_state(pl022);
  972. flush(pl022);
  973. if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
  974. do_polling_transfer(pl022);
  975. else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
  976. do_interrupt_transfer(pl022);
  977. else
  978. do_dma_transfer(pl022);
  979. }
  980. static int __init init_queue(struct pl022 *pl022)
  981. {
  982. INIT_LIST_HEAD(&pl022->queue);
  983. spin_lock_init(&pl022->queue_lock);
  984. pl022->run = QUEUE_STOPPED;
  985. pl022->busy = 0;
  986. tasklet_init(&pl022->pump_transfers,
  987. pump_transfers, (unsigned long)pl022);
  988. INIT_WORK(&pl022->pump_messages, pump_messages);
  989. pl022->workqueue = create_singlethread_workqueue(
  990. dev_name(pl022->master->dev.parent));
  991. if (pl022->workqueue == NULL)
  992. return -EBUSY;
  993. return 0;
  994. }
  995. static int start_queue(struct pl022 *pl022)
  996. {
  997. unsigned long flags;
  998. spin_lock_irqsave(&pl022->queue_lock, flags);
  999. if (pl022->run == QUEUE_RUNNING || pl022->busy) {
  1000. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1001. return -EBUSY;
  1002. }
  1003. pl022->run = QUEUE_RUNNING;
  1004. pl022->cur_msg = NULL;
  1005. pl022->cur_transfer = NULL;
  1006. pl022->cur_chip = NULL;
  1007. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1008. queue_work(pl022->workqueue, &pl022->pump_messages);
  1009. return 0;
  1010. }
  1011. static int stop_queue(struct pl022 *pl022)
  1012. {
  1013. unsigned long flags;
  1014. unsigned limit = 500;
  1015. int status = 0;
  1016. spin_lock_irqsave(&pl022->queue_lock, flags);
  1017. /* This is a bit lame, but is optimized for the common execution path.
  1018. * A wait_queue on the pl022->busy could be used, but then the common
  1019. * execution path (pump_messages) would be required to call wake_up or
  1020. * friends on every SPI message. Do this instead */
  1021. pl022->run = QUEUE_STOPPED;
  1022. while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
  1023. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1024. msleep(10);
  1025. spin_lock_irqsave(&pl022->queue_lock, flags);
  1026. }
  1027. if (!list_empty(&pl022->queue) || pl022->busy)
  1028. status = -EBUSY;
  1029. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1030. return status;
  1031. }
  1032. static int destroy_queue(struct pl022 *pl022)
  1033. {
  1034. int status;
  1035. status = stop_queue(pl022);
  1036. /* we are unloading the module or failing to load (only two calls
  1037. * to this routine), and neither call can handle a return value.
  1038. * However, destroy_workqueue calls flush_workqueue, and that will
  1039. * block until all work is done. If the reason that stop_queue
  1040. * timed out is that the work will never finish, then it does no
  1041. * good to call destroy_workqueue, so return anyway. */
  1042. if (status != 0)
  1043. return status;
  1044. destroy_workqueue(pl022->workqueue);
  1045. return 0;
  1046. }
  1047. static int verify_controller_parameters(struct pl022 *pl022,
  1048. struct pl022_config_chip *chip_info)
  1049. {
  1050. if ((chip_info->lbm != LOOPBACK_ENABLED)
  1051. && (chip_info->lbm != LOOPBACK_DISABLED)) {
  1052. dev_err(chip_info->dev,
  1053. "loopback Mode is configured incorrectly\n");
  1054. return -EINVAL;
  1055. }
  1056. if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
  1057. || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
  1058. dev_err(chip_info->dev,
  1059. "interface is configured incorrectly\n");
  1060. return -EINVAL;
  1061. }
  1062. if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
  1063. (!pl022->vendor->unidir)) {
  1064. dev_err(chip_info->dev,
  1065. "unidirectional mode not supported in this "
  1066. "hardware version\n");
  1067. return -EINVAL;
  1068. }
  1069. if ((chip_info->hierarchy != SSP_MASTER)
  1070. && (chip_info->hierarchy != SSP_SLAVE)) {
  1071. dev_err(chip_info->dev,
  1072. "hierarchy is configured incorrectly\n");
  1073. return -EINVAL;
  1074. }
  1075. if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
  1076. || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
  1077. dev_err(chip_info->dev,
  1078. "cpsdvsr is configured incorrectly\n");
  1079. return -EINVAL;
  1080. }
  1081. if ((chip_info->endian_rx != SSP_RX_MSB)
  1082. && (chip_info->endian_rx != SSP_RX_LSB)) {
  1083. dev_err(chip_info->dev,
  1084. "RX FIFO endianess is configured incorrectly\n");
  1085. return -EINVAL;
  1086. }
  1087. if ((chip_info->endian_tx != SSP_TX_MSB)
  1088. && (chip_info->endian_tx != SSP_TX_LSB)) {
  1089. dev_err(chip_info->dev,
  1090. "TX FIFO endianess is configured incorrectly\n");
  1091. return -EINVAL;
  1092. }
  1093. if ((chip_info->data_size < SSP_DATA_BITS_4)
  1094. || (chip_info->data_size > SSP_DATA_BITS_32)) {
  1095. dev_err(chip_info->dev,
  1096. "DATA Size is configured incorrectly\n");
  1097. return -EINVAL;
  1098. }
  1099. if ((chip_info->com_mode != INTERRUPT_TRANSFER)
  1100. && (chip_info->com_mode != DMA_TRANSFER)
  1101. && (chip_info->com_mode != POLLING_TRANSFER)) {
  1102. dev_err(chip_info->dev,
  1103. "Communication mode is configured incorrectly\n");
  1104. return -EINVAL;
  1105. }
  1106. if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
  1107. || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
  1108. dev_err(chip_info->dev,
  1109. "RX FIFO Trigger Level is configured incorrectly\n");
  1110. return -EINVAL;
  1111. }
  1112. if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
  1113. || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
  1114. dev_err(chip_info->dev,
  1115. "TX FIFO Trigger Level is configured incorrectly\n");
  1116. return -EINVAL;
  1117. }
  1118. if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
  1119. if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
  1120. && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
  1121. dev_err(chip_info->dev,
  1122. "Clock Phase is configured incorrectly\n");
  1123. return -EINVAL;
  1124. }
  1125. if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
  1126. && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
  1127. dev_err(chip_info->dev,
  1128. "Clock Polarity is configured incorrectly\n");
  1129. return -EINVAL;
  1130. }
  1131. }
  1132. if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
  1133. if ((chip_info->ctrl_len < SSP_BITS_4)
  1134. || (chip_info->ctrl_len > SSP_BITS_32)) {
  1135. dev_err(chip_info->dev,
  1136. "CTRL LEN is configured incorrectly\n");
  1137. return -EINVAL;
  1138. }
  1139. if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
  1140. && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
  1141. dev_err(chip_info->dev,
  1142. "Wait State is configured incorrectly\n");
  1143. return -EINVAL;
  1144. }
  1145. if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
  1146. && (chip_info->duplex !=
  1147. SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
  1148. dev_err(chip_info->dev,
  1149. "DUPLEX is configured incorrectly\n");
  1150. return -EINVAL;
  1151. }
  1152. }
  1153. if (chip_info->cs_control == NULL) {
  1154. dev_warn(chip_info->dev,
  1155. "Chip Select Function is NULL for this chip\n");
  1156. chip_info->cs_control = null_cs_control;
  1157. }
  1158. return 0;
  1159. }
  1160. /**
  1161. * pl022_transfer - transfer function registered to SPI master framework
  1162. * @spi: spi device which is requesting transfer
  1163. * @msg: spi message which is to handled is queued to driver queue
  1164. *
  1165. * This function is registered to the SPI framework for this SPI master
  1166. * controller. It will queue the spi_message in the queue of driver if
  1167. * the queue is not stopped and return.
  1168. */
  1169. static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
  1170. {
  1171. struct pl022 *pl022 = spi_master_get_devdata(spi->master);
  1172. unsigned long flags;
  1173. spin_lock_irqsave(&pl022->queue_lock, flags);
  1174. if (pl022->run == QUEUE_STOPPED) {
  1175. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1176. return -ESHUTDOWN;
  1177. }
  1178. msg->actual_length = 0;
  1179. msg->status = -EINPROGRESS;
  1180. msg->state = STATE_START;
  1181. list_add_tail(&msg->queue, &pl022->queue);
  1182. if (pl022->run == QUEUE_RUNNING && !pl022->busy)
  1183. queue_work(pl022->workqueue, &pl022->pump_messages);
  1184. spin_unlock_irqrestore(&pl022->queue_lock, flags);
  1185. return 0;
  1186. }
  1187. static int calculate_effective_freq(struct pl022 *pl022,
  1188. int freq,
  1189. struct ssp_clock_params *clk_freq)
  1190. {
  1191. /* Lets calculate the frequency parameters */
  1192. u16 cpsdvsr = 2;
  1193. u16 scr = 0;
  1194. bool freq_found = false;
  1195. u32 rate;
  1196. u32 max_tclk;
  1197. u32 min_tclk;
  1198. rate = clk_get_rate(pl022->clk);
  1199. /* cpsdvscr = 2 & scr 0 */
  1200. max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
  1201. /* cpsdvsr = 254 & scr = 255 */
  1202. min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
  1203. if ((freq <= max_tclk) && (freq >= min_tclk)) {
  1204. while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
  1205. while (scr <= SCR_MAX && !freq_found) {
  1206. if ((rate /
  1207. (cpsdvsr * (1 + scr))) > freq)
  1208. scr += 1;
  1209. else {
  1210. /*
  1211. * This bool is made true when
  1212. * effective frequency >=
  1213. * target frequency is found
  1214. */
  1215. freq_found = true;
  1216. if ((rate /
  1217. (cpsdvsr * (1 + scr))) != freq) {
  1218. if (scr == SCR_MIN) {
  1219. cpsdvsr -= 2;
  1220. scr = SCR_MAX;
  1221. } else
  1222. scr -= 1;
  1223. }
  1224. }
  1225. }
  1226. if (!freq_found) {
  1227. cpsdvsr += 2;
  1228. scr = SCR_MIN;
  1229. }
  1230. }
  1231. if (cpsdvsr != 0) {
  1232. dev_dbg(&pl022->adev->dev,
  1233. "SSP Effective Frequency is %u\n",
  1234. (rate / (cpsdvsr * (1 + scr))));
  1235. clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
  1236. clk_freq->scr = (u8) (scr & 0xFF);
  1237. dev_dbg(&pl022->adev->dev,
  1238. "SSP cpsdvsr = %d, scr = %d\n",
  1239. clk_freq->cpsdvsr, clk_freq->scr);
  1240. }
  1241. } else {
  1242. dev_err(&pl022->adev->dev,
  1243. "controller data is incorrect: out of range frequency");
  1244. return -EINVAL;
  1245. }
  1246. return 0;
  1247. }
  1248. /**
  1249. * NOT IMPLEMENTED
  1250. * process_dma_info - Processes the DMA info provided by client drivers
  1251. * @chip_info: chip info provided by client device
  1252. * @chip: Runtime state maintained by the SSP controller for each spi device
  1253. *
  1254. * This function processes and stores DMA config provided by client driver
  1255. * into the runtime state maintained by the SSP controller driver
  1256. */
  1257. static int process_dma_info(struct pl022_config_chip *chip_info,
  1258. struct chip_data *chip)
  1259. {
  1260. dev_err(chip_info->dev,
  1261. "cannot process DMA info, DMA not implemented!\n");
  1262. return -ENOTSUPP;
  1263. }
  1264. /**
  1265. * pl022_setup - setup function registered to SPI master framework
  1266. * @spi: spi device which is requesting setup
  1267. *
  1268. * This function is registered to the SPI framework for this SPI master
  1269. * controller. If it is the first time when setup is called by this device,
  1270. * this function will initialize the runtime state for this chip and save
  1271. * the same in the device structure. Else it will update the runtime info
  1272. * with the updated chip info. Nothing is really being written to the
  1273. * controller hardware here, that is not done until the actual transfer
  1274. * commence.
  1275. */
  1276. /* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
  1277. #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
  1278. | SPI_LSB_FIRST | SPI_LOOP)
  1279. static int pl022_setup(struct spi_device *spi)
  1280. {
  1281. struct pl022_config_chip *chip_info;
  1282. struct chip_data *chip;
  1283. int status = 0;
  1284. struct pl022 *pl022 = spi_master_get_devdata(spi->master);
  1285. if (spi->mode & ~MODEBITS) {
  1286. dev_dbg(&spi->dev, "unsupported mode bits %x\n",
  1287. spi->mode & ~MODEBITS);
  1288. return -EINVAL;
  1289. }
  1290. if (!spi->max_speed_hz)
  1291. return -EINVAL;
  1292. /* Get controller_state if one is supplied */
  1293. chip = spi_get_ctldata(spi);
  1294. if (chip == NULL) {
  1295. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1296. if (!chip) {
  1297. dev_err(&spi->dev,
  1298. "cannot allocate controller state\n");
  1299. return -ENOMEM;
  1300. }
  1301. dev_dbg(&spi->dev,
  1302. "allocated memory for controller's runtime state\n");
  1303. }
  1304. /* Get controller data if one is supplied */
  1305. chip_info = spi->controller_data;
  1306. if (chip_info == NULL) {
  1307. /* spi_board_info.controller_data not is supplied */
  1308. dev_dbg(&spi->dev,
  1309. "using default controller_data settings\n");
  1310. chip_info =
  1311. kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
  1312. if (!chip_info) {
  1313. dev_err(&spi->dev,
  1314. "cannot allocate controller data\n");
  1315. status = -ENOMEM;
  1316. goto err_first_setup;
  1317. }
  1318. dev_dbg(&spi->dev, "allocated memory for controller data\n");
  1319. /* Pointer back to the SPI device */
  1320. chip_info->dev = &spi->dev;
  1321. /*
  1322. * Set controller data default values:
  1323. * Polling is supported by default
  1324. */
  1325. chip_info->lbm = LOOPBACK_DISABLED;
  1326. chip_info->com_mode = POLLING_TRANSFER;
  1327. chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
  1328. chip_info->hierarchy = SSP_SLAVE;
  1329. chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
  1330. chip_info->endian_tx = SSP_TX_LSB;
  1331. chip_info->endian_rx = SSP_RX_LSB;
  1332. chip_info->data_size = SSP_DATA_BITS_12;
  1333. chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
  1334. chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
  1335. chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
  1336. chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
  1337. chip_info->ctrl_len = SSP_BITS_8;
  1338. chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
  1339. chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
  1340. chip_info->cs_control = null_cs_control;
  1341. } else {
  1342. dev_dbg(&spi->dev,
  1343. "using user supplied controller_data settings\n");
  1344. }
  1345. /*
  1346. * We can override with custom divisors, else we use the board
  1347. * frequency setting
  1348. */
  1349. if ((0 == chip_info->clk_freq.cpsdvsr)
  1350. && (0 == chip_info->clk_freq.scr)) {
  1351. status = calculate_effective_freq(pl022,
  1352. spi->max_speed_hz,
  1353. &chip_info->clk_freq);
  1354. if (status < 0)
  1355. goto err_config_params;
  1356. } else {
  1357. if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
  1358. chip_info->clk_freq.cpsdvsr =
  1359. chip_info->clk_freq.cpsdvsr - 1;
  1360. }
  1361. status = verify_controller_parameters(pl022, chip_info);
  1362. if (status) {
  1363. dev_err(&spi->dev, "controller data is incorrect");
  1364. goto err_config_params;
  1365. }
  1366. /* Now set controller state based on controller data */
  1367. chip->xfer_type = chip_info->com_mode;
  1368. chip->cs_control = chip_info->cs_control;
  1369. if (chip_info->data_size <= 8) {
  1370. dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
  1371. chip->n_bytes = 1;
  1372. chip->read = READING_U8;
  1373. chip->write = WRITING_U8;
  1374. } else if (chip_info->data_size <= 16) {
  1375. dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
  1376. chip->n_bytes = 2;
  1377. chip->read = READING_U16;
  1378. chip->write = WRITING_U16;
  1379. } else {
  1380. if (pl022->vendor->max_bpw >= 32) {
  1381. dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
  1382. chip->n_bytes = 4;
  1383. chip->read = READING_U32;
  1384. chip->write = WRITING_U32;
  1385. } else {
  1386. dev_err(&spi->dev,
  1387. "illegal data size for this controller!\n");
  1388. dev_err(&spi->dev,
  1389. "a standard pl022 can only handle "
  1390. "1 <= n <= 16 bit words\n");
  1391. goto err_config_params;
  1392. }
  1393. }
  1394. /* Now Initialize all register settings required for this chip */
  1395. chip->cr0 = 0;
  1396. chip->cr1 = 0;
  1397. chip->dmacr = 0;
  1398. chip->cpsr = 0;
  1399. if ((chip_info->com_mode == DMA_TRANSFER)
  1400. && ((pl022->master_info)->enable_dma)) {
  1401. chip->enable_dma = 1;
  1402. dev_dbg(&spi->dev, "DMA mode set in controller state\n");
  1403. status = process_dma_info(chip_info, chip);
  1404. if (status < 0)
  1405. goto err_config_params;
  1406. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
  1407. SSP_DMACR_MASK_RXDMAE, 0);
  1408. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
  1409. SSP_DMACR_MASK_TXDMAE, 1);
  1410. } else {
  1411. chip->enable_dma = 0;
  1412. dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
  1413. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
  1414. SSP_DMACR_MASK_RXDMAE, 0);
  1415. SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
  1416. SSP_DMACR_MASK_TXDMAE, 1);
  1417. }
  1418. chip->cpsr = chip_info->clk_freq.cpsdvsr;
  1419. SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
  1420. SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
  1421. SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
  1422. SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
  1423. SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
  1424. SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
  1425. SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
  1426. SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
  1427. SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
  1428. SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
  1429. SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
  1430. SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
  1431. SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
  1432. SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
  1433. SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
  1434. SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
  1435. /* Save controller_state */
  1436. spi_set_ctldata(spi, chip);
  1437. return status;
  1438. err_config_params:
  1439. err_first_setup:
  1440. kfree(chip);
  1441. return status;
  1442. }
  1443. /**
  1444. * pl022_cleanup - cleanup function registered to SPI master framework
  1445. * @spi: spi device which is requesting cleanup
  1446. *
  1447. * This function is registered to the SPI framework for this SPI master
  1448. * controller. It will free the runtime state of chip.
  1449. */
  1450. static void pl022_cleanup(struct spi_device *spi)
  1451. {
  1452. struct chip_data *chip = spi_get_ctldata(spi);
  1453. spi_set_ctldata(spi, NULL);
  1454. kfree(chip);
  1455. }
  1456. static int __init
  1457. pl022_probe(struct amba_device *adev, struct amba_id *id)
  1458. {
  1459. struct device *dev = &adev->dev;
  1460. struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
  1461. struct spi_master *master;
  1462. struct pl022 *pl022 = NULL; /*Data for this driver */
  1463. int status = 0;
  1464. dev_info(&adev->dev,
  1465. "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
  1466. if (platform_info == NULL) {
  1467. dev_err(&adev->dev, "probe - no platform data supplied\n");
  1468. status = -ENODEV;
  1469. goto err_no_pdata;
  1470. }
  1471. /* Allocate master with space for data */
  1472. master = spi_alloc_master(dev, sizeof(struct pl022));
  1473. if (master == NULL) {
  1474. dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
  1475. status = -ENOMEM;
  1476. goto err_no_master;
  1477. }
  1478. pl022 = spi_master_get_devdata(master);
  1479. pl022->master = master;
  1480. pl022->master_info = platform_info;
  1481. pl022->adev = adev;
  1482. pl022->vendor = id->data;
  1483. /*
  1484. * Bus Number Which has been Assigned to this SSP controller
  1485. * on this board
  1486. */
  1487. master->bus_num = platform_info->bus_id;
  1488. master->num_chipselect = platform_info->num_chipselect;
  1489. master->cleanup = pl022_cleanup;
  1490. master->setup = pl022_setup;
  1491. master->transfer = pl022_transfer;
  1492. dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
  1493. status = amba_request_regions(adev, NULL);
  1494. if (status)
  1495. goto err_no_ioregion;
  1496. pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
  1497. if (pl022->virtbase == NULL) {
  1498. status = -ENOMEM;
  1499. goto err_no_ioremap;
  1500. }
  1501. printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
  1502. adev->res.start, pl022->virtbase);
  1503. pl022->clk = clk_get(&adev->dev, NULL);
  1504. if (IS_ERR(pl022->clk)) {
  1505. status = PTR_ERR(pl022->clk);
  1506. dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
  1507. goto err_no_clk;
  1508. }
  1509. /* Disable SSP */
  1510. clk_enable(pl022->clk);
  1511. writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
  1512. SSP_CR1(pl022->virtbase));
  1513. load_ssp_default_config(pl022);
  1514. clk_disable(pl022->clk);
  1515. status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
  1516. pl022);
  1517. if (status < 0) {
  1518. dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
  1519. goto err_no_irq;
  1520. }
  1521. /* Initialize and start queue */
  1522. status = init_queue(pl022);
  1523. if (status != 0) {
  1524. dev_err(&adev->dev, "probe - problem initializing queue\n");
  1525. goto err_init_queue;
  1526. }
  1527. status = start_queue(pl022);
  1528. if (status != 0) {
  1529. dev_err(&adev->dev, "probe - problem starting queue\n");
  1530. goto err_start_queue;
  1531. }
  1532. /* Register with the SPI framework */
  1533. amba_set_drvdata(adev, pl022);
  1534. status = spi_register_master(master);
  1535. if (status != 0) {
  1536. dev_err(&adev->dev,
  1537. "probe - problem registering spi master\n");
  1538. goto err_spi_register;
  1539. }
  1540. dev_dbg(dev, "probe succeded\n");
  1541. return 0;
  1542. err_spi_register:
  1543. err_start_queue:
  1544. err_init_queue:
  1545. destroy_queue(pl022);
  1546. free_irq(adev->irq[0], pl022);
  1547. err_no_irq:
  1548. clk_put(pl022->clk);
  1549. err_no_clk:
  1550. iounmap(pl022->virtbase);
  1551. err_no_ioremap:
  1552. amba_release_regions(adev);
  1553. err_no_ioregion:
  1554. spi_master_put(master);
  1555. err_no_master:
  1556. err_no_pdata:
  1557. return status;
  1558. }
  1559. static int __exit
  1560. pl022_remove(struct amba_device *adev)
  1561. {
  1562. struct pl022 *pl022 = amba_get_drvdata(adev);
  1563. int status = 0;
  1564. if (!pl022)
  1565. return 0;
  1566. /* Remove the queue */
  1567. status = destroy_queue(pl022);
  1568. if (status != 0) {
  1569. dev_err(&adev->dev,
  1570. "queue remove failed (%d)\n", status);
  1571. return status;
  1572. }
  1573. load_ssp_default_config(pl022);
  1574. free_irq(adev->irq[0], pl022);
  1575. clk_disable(pl022->clk);
  1576. clk_put(pl022->clk);
  1577. iounmap(pl022->virtbase);
  1578. amba_release_regions(adev);
  1579. tasklet_disable(&pl022->pump_transfers);
  1580. spi_unregister_master(pl022->master);
  1581. spi_master_put(pl022->master);
  1582. amba_set_drvdata(adev, NULL);
  1583. dev_dbg(&adev->dev, "remove succeded\n");
  1584. return 0;
  1585. }
  1586. #ifdef CONFIG_PM
  1587. static int pl022_suspend(struct amba_device *adev, pm_message_t state)
  1588. {
  1589. struct pl022 *pl022 = amba_get_drvdata(adev);
  1590. int status = 0;
  1591. status = stop_queue(pl022);
  1592. if (status) {
  1593. dev_warn(&adev->dev, "suspend cannot stop queue\n");
  1594. return status;
  1595. }
  1596. clk_enable(pl022->clk);
  1597. load_ssp_default_config(pl022);
  1598. clk_disable(pl022->clk);
  1599. dev_dbg(&adev->dev, "suspended\n");
  1600. return 0;
  1601. }
  1602. static int pl022_resume(struct amba_device *adev)
  1603. {
  1604. struct pl022 *pl022 = amba_get_drvdata(adev);
  1605. int status = 0;
  1606. /* Start the queue running */
  1607. status = start_queue(pl022);
  1608. if (status)
  1609. dev_err(&adev->dev, "problem starting queue (%d)\n", status);
  1610. else
  1611. dev_dbg(&adev->dev, "resumed\n");
  1612. return status;
  1613. }
  1614. #else
  1615. #define pl022_suspend NULL
  1616. #define pl022_resume NULL
  1617. #endif /* CONFIG_PM */
  1618. static struct vendor_data vendor_arm = {
  1619. .fifodepth = 8,
  1620. .max_bpw = 16,
  1621. .unidir = false,
  1622. };
  1623. static struct vendor_data vendor_st = {
  1624. .fifodepth = 32,
  1625. .max_bpw = 32,
  1626. .unidir = false,
  1627. };
  1628. static struct amba_id pl022_ids[] = {
  1629. {
  1630. /*
  1631. * ARM PL022 variant, this has a 16bit wide
  1632. * and 8 locations deep TX/RX FIFO
  1633. */
  1634. .id = 0x00041022,
  1635. .mask = 0x000fffff,
  1636. .data = &vendor_arm,
  1637. },
  1638. {
  1639. /*
  1640. * ST Micro derivative, this has 32bit wide
  1641. * and 32 locations deep TX/RX FIFO
  1642. */
  1643. .id = 0x01080022,
  1644. .mask = 0xffffffff,
  1645. .data = &vendor_st,
  1646. },
  1647. { 0, 0 },
  1648. };
  1649. static struct amba_driver pl022_driver = {
  1650. .drv = {
  1651. .name = "ssp-pl022",
  1652. },
  1653. .id_table = pl022_ids,
  1654. .probe = pl022_probe,
  1655. .remove = __exit_p(pl022_remove),
  1656. .suspend = pl022_suspend,
  1657. .resume = pl022_resume,
  1658. };
  1659. static int __init pl022_init(void)
  1660. {
  1661. return amba_driver_register(&pl022_driver);
  1662. }
  1663. module_init(pl022_init);
  1664. static void __exit pl022_exit(void)
  1665. {
  1666. amba_driver_unregister(&pl022_driver);
  1667. }
  1668. module_exit(pl022_exit);
  1669. MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
  1670. MODULE_DESCRIPTION("PL022 SSP Controller Driver");
  1671. MODULE_LICENSE("GPL");