spi_imx.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777
  1. /*
  2. * drivers/spi/spi_imx.c
  3. *
  4. * Copyright (C) 2006 SWAPP
  5. * Andrea Paterniani <a.paterniani@swapp-eng.it>
  6. *
  7. * Initial version inspired by:
  8. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/device.h>
  23. #include <linux/ioport.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/spi/spi.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/delay.h>
  31. #include <linux/clk.h>
  32. #include <asm/io.h>
  33. #include <asm/irq.h>
  34. #include <asm/delay.h>
  35. #include <mach/hardware.h>
  36. #include <mach/imx-dma.h>
  37. #include <mach/spi_imx.h>
  38. /*-------------------------------------------------------------------------*/
  39. /* SPI Registers offsets from peripheral base address */
  40. #define SPI_RXDATA (0x00)
  41. #define SPI_TXDATA (0x04)
  42. #define SPI_CONTROL (0x08)
  43. #define SPI_INT_STATUS (0x0C)
  44. #define SPI_TEST (0x10)
  45. #define SPI_PERIOD (0x14)
  46. #define SPI_DMA (0x18)
  47. #define SPI_RESET (0x1C)
  48. /* SPI Control Register Bit Fields & Masks */
  49. #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
  50. #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
  51. #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
  52. #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
  53. #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
  54. #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
  55. #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
  56. #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
  57. #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
  58. #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
  59. Slave: RXFIFO advanced by BIT_COUNT */
  60. #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
  61. Slave: RXFIFO advanced by /SS rising edge */
  62. #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
  63. #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
  64. #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
  65. #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
  66. #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
  67. #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
  68. #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
  69. #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
  70. #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
  71. #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
  72. #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
  73. #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
  74. #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
  75. #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
  76. #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
  77. #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
  78. #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
  79. #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
  80. /* SPI Interrupt/Status Register Bit Fields & Masks */
  81. #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
  82. #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
  83. #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
  84. #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
  85. #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
  86. #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
  87. #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
  88. #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
  89. #define SPI_STATUS (0xFF) /* SPI Status Mask */
  90. #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
  91. #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
  92. #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
  93. #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
  94. #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
  95. #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
  96. #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
  97. #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
  98. #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
  99. /* SPI Test Register Bit Fields & Masks */
  100. #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
  101. #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
  102. #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
  103. #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
  104. #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
  105. /* SPI Period Register Bit Fields & Masks */
  106. #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
  107. #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
  108. Transactions */
  109. #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
  110. #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
  111. Bit Clock */
  112. #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
  113. 32.768 KHz Clock */
  114. /* SPI DMA Register Bit Fields & Masks */
  115. #define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
  116. #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
  117. #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
  118. #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
  119. #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
  120. #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
  121. #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
  122. #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
  123. /* SPI Soft Reset Register Bit Fields & Masks */
  124. #define SPI_RESET_START (0x1) /* Start */
  125. /* Default SPI configuration values */
  126. #define SPI_DEFAULT_CONTROL \
  127. ( \
  128. SPI_CONTROL_BITCOUNT(16) | \
  129. SPI_CONTROL_POL_ACT_HIGH | \
  130. SPI_CONTROL_PHA_0 | \
  131. SPI_CONTROL_SPIEN | \
  132. SPI_CONTROL_SSCTL_1 | \
  133. SPI_CONTROL_MODE_MASTER | \
  134. SPI_CONTROL_DRCTL_0 | \
  135. SPI_CONTROL_DATARATE_MIN \
  136. )
  137. #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
  138. #define SPI_DEFAULT_ENABLE_DMA (0)
  139. #define SPI_DEFAULT_PERIOD_WAIT (8)
  140. /*-------------------------------------------------------------------------*/
  141. /*-------------------------------------------------------------------------*/
  142. /* TX/RX SPI FIFO size */
  143. #define SPI_FIFO_DEPTH (8)
  144. #define SPI_FIFO_BYTE_WIDTH (2)
  145. #define SPI_FIFO_OVERFLOW_MARGIN (2)
  146. /* DMA burst length for half full/empty request trigger */
  147. #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
  148. /* Dummy char output to achieve reads.
  149. Choosing something different from all zeroes may help pattern recogition
  150. for oscilloscope analysis, but may break some drivers. */
  151. #define SPI_DUMMY_u8 0
  152. #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
  153. #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
  154. /**
  155. * Macro to change a u32 field:
  156. * @r : register to edit
  157. * @m : bit mask
  158. * @v : new value for the field correctly bit-alligned
  159. */
  160. #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
  161. /* Message state */
  162. #define START_STATE ((void*)0)
  163. #define RUNNING_STATE ((void*)1)
  164. #define DONE_STATE ((void*)2)
  165. #define ERROR_STATE ((void*)-1)
  166. /* Queue state */
  167. #define QUEUE_RUNNING (0)
  168. #define QUEUE_STOPPED (1)
  169. #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
  170. /*-------------------------------------------------------------------------*/
  171. /*-------------------------------------------------------------------------*/
  172. /* Driver data structs */
  173. /* Context */
  174. struct driver_data {
  175. /* Driver model hookup */
  176. struct platform_device *pdev;
  177. /* SPI framework hookup */
  178. struct spi_master *master;
  179. /* IMX hookup */
  180. struct spi_imx_master *master_info;
  181. /* Memory resources and SPI regs virtual address */
  182. struct resource *ioarea;
  183. void __iomem *regs;
  184. /* SPI RX_DATA physical address */
  185. dma_addr_t rd_data_phys;
  186. /* Driver message queue */
  187. struct workqueue_struct *workqueue;
  188. struct work_struct work;
  189. spinlock_t lock;
  190. struct list_head queue;
  191. int busy;
  192. int run;
  193. /* Message Transfer pump */
  194. struct tasklet_struct pump_transfers;
  195. /* Current message, transfer and state */
  196. struct spi_message *cur_msg;
  197. struct spi_transfer *cur_transfer;
  198. struct chip_data *cur_chip;
  199. /* Rd / Wr buffers pointers */
  200. size_t len;
  201. void *tx;
  202. void *tx_end;
  203. void *rx;
  204. void *rx_end;
  205. u8 rd_only;
  206. u8 n_bytes;
  207. int cs_change;
  208. /* Function pointers */
  209. irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
  210. void (*cs_control)(u32 command);
  211. /* DMA setup */
  212. int rx_channel;
  213. int tx_channel;
  214. dma_addr_t rx_dma;
  215. dma_addr_t tx_dma;
  216. int rx_dma_needs_unmap;
  217. int tx_dma_needs_unmap;
  218. size_t tx_map_len;
  219. u32 dummy_dma_buf ____cacheline_aligned;
  220. struct clk *clk;
  221. };
  222. /* Runtime state */
  223. struct chip_data {
  224. u32 control;
  225. u32 period;
  226. u32 test;
  227. u8 enable_dma:1;
  228. u8 bits_per_word;
  229. u8 n_bytes;
  230. u32 max_speed_hz;
  231. void (*cs_control)(u32 command);
  232. };
  233. /*-------------------------------------------------------------------------*/
  234. static void pump_messages(struct work_struct *work);
  235. static void flush(struct driver_data *drv_data)
  236. {
  237. void __iomem *regs = drv_data->regs;
  238. u32 control;
  239. dev_dbg(&drv_data->pdev->dev, "flush\n");
  240. /* Wait for end of transaction */
  241. do {
  242. control = readl(regs + SPI_CONTROL);
  243. } while (control & SPI_CONTROL_XCH);
  244. /* Release chip select if requested, transfer delays are
  245. handled in pump_transfers */
  246. if (drv_data->cs_change)
  247. drv_data->cs_control(SPI_CS_DEASSERT);
  248. /* Disable SPI to flush FIFOs */
  249. writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
  250. writel(control, regs + SPI_CONTROL);
  251. }
  252. static void restore_state(struct driver_data *drv_data)
  253. {
  254. void __iomem *regs = drv_data->regs;
  255. struct chip_data *chip = drv_data->cur_chip;
  256. /* Load chip registers */
  257. dev_dbg(&drv_data->pdev->dev,
  258. "restore_state\n"
  259. " test = 0x%08X\n"
  260. " control = 0x%08X\n",
  261. chip->test,
  262. chip->control);
  263. writel(chip->test, regs + SPI_TEST);
  264. writel(chip->period, regs + SPI_PERIOD);
  265. writel(0, regs + SPI_INT_STATUS);
  266. writel(chip->control, regs + SPI_CONTROL);
  267. }
  268. static void null_cs_control(u32 command)
  269. {
  270. }
  271. static inline u32 data_to_write(struct driver_data *drv_data)
  272. {
  273. return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
  274. }
  275. static inline u32 data_to_read(struct driver_data *drv_data)
  276. {
  277. return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
  278. }
  279. static int write(struct driver_data *drv_data)
  280. {
  281. void __iomem *regs = drv_data->regs;
  282. void *tx = drv_data->tx;
  283. void *tx_end = drv_data->tx_end;
  284. u8 n_bytes = drv_data->n_bytes;
  285. u32 remaining_writes;
  286. u32 fifo_avail_space;
  287. u32 n;
  288. u16 d;
  289. /* Compute how many fifo writes to do */
  290. remaining_writes = (u32)(tx_end - tx) / n_bytes;
  291. fifo_avail_space = SPI_FIFO_DEPTH -
  292. (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
  293. if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
  294. /* Fix misunderstood receive overflow */
  295. fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
  296. n = min(remaining_writes, fifo_avail_space);
  297. dev_dbg(&drv_data->pdev->dev,
  298. "write type %s\n"
  299. " remaining writes = %d\n"
  300. " fifo avail space = %d\n"
  301. " fifo writes = %d\n",
  302. (n_bytes == 1) ? "u8" : "u16",
  303. remaining_writes,
  304. fifo_avail_space,
  305. n);
  306. if (n > 0) {
  307. /* Fill SPI TXFIFO */
  308. if (drv_data->rd_only) {
  309. tx += n * n_bytes;
  310. while (n--)
  311. writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
  312. } else {
  313. if (n_bytes == 1) {
  314. while (n--) {
  315. d = *(u8*)tx;
  316. writel(d, regs + SPI_TXDATA);
  317. tx += 1;
  318. }
  319. } else {
  320. while (n--) {
  321. d = *(u16*)tx;
  322. writel(d, regs + SPI_TXDATA);
  323. tx += 2;
  324. }
  325. }
  326. }
  327. /* Trigger transfer */
  328. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  329. regs + SPI_CONTROL);
  330. /* Update tx pointer */
  331. drv_data->tx = tx;
  332. }
  333. return (tx >= tx_end);
  334. }
  335. static int read(struct driver_data *drv_data)
  336. {
  337. void __iomem *regs = drv_data->regs;
  338. void *rx = drv_data->rx;
  339. void *rx_end = drv_data->rx_end;
  340. u8 n_bytes = drv_data->n_bytes;
  341. u32 remaining_reads;
  342. u32 fifo_rxcnt;
  343. u32 n;
  344. u16 d;
  345. /* Compute how many fifo reads to do */
  346. remaining_reads = (u32)(rx_end - rx) / n_bytes;
  347. fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
  348. SPI_TEST_RXCNT_LSB;
  349. n = min(remaining_reads, fifo_rxcnt);
  350. dev_dbg(&drv_data->pdev->dev,
  351. "read type %s\n"
  352. " remaining reads = %d\n"
  353. " fifo rx count = %d\n"
  354. " fifo reads = %d\n",
  355. (n_bytes == 1) ? "u8" : "u16",
  356. remaining_reads,
  357. fifo_rxcnt,
  358. n);
  359. if (n > 0) {
  360. /* Read SPI RXFIFO */
  361. if (n_bytes == 1) {
  362. while (n--) {
  363. d = readl(regs + SPI_RXDATA);
  364. *((u8*)rx) = d;
  365. rx += 1;
  366. }
  367. } else {
  368. while (n--) {
  369. d = readl(regs + SPI_RXDATA);
  370. *((u16*)rx) = d;
  371. rx += 2;
  372. }
  373. }
  374. /* Update rx pointer */
  375. drv_data->rx = rx;
  376. }
  377. return (rx >= rx_end);
  378. }
  379. static void *next_transfer(struct driver_data *drv_data)
  380. {
  381. struct spi_message *msg = drv_data->cur_msg;
  382. struct spi_transfer *trans = drv_data->cur_transfer;
  383. /* Move to next transfer */
  384. if (trans->transfer_list.next != &msg->transfers) {
  385. drv_data->cur_transfer =
  386. list_entry(trans->transfer_list.next,
  387. struct spi_transfer,
  388. transfer_list);
  389. return RUNNING_STATE;
  390. }
  391. return DONE_STATE;
  392. }
  393. static int map_dma_buffers(struct driver_data *drv_data)
  394. {
  395. struct spi_message *msg;
  396. struct device *dev;
  397. void *buf;
  398. drv_data->rx_dma_needs_unmap = 0;
  399. drv_data->tx_dma_needs_unmap = 0;
  400. if (!drv_data->master_info->enable_dma ||
  401. !drv_data->cur_chip->enable_dma)
  402. return -1;
  403. msg = drv_data->cur_msg;
  404. dev = &msg->spi->dev;
  405. if (msg->is_dma_mapped) {
  406. if (drv_data->tx_dma)
  407. /* The caller provided at least dma and cpu virtual
  408. address for write; pump_transfers() will consider the
  409. transfer as write only if cpu rx virtual address is
  410. NULL */
  411. return 0;
  412. if (drv_data->rx_dma) {
  413. /* The caller provided dma and cpu virtual address to
  414. performe read only transfer -->
  415. use drv_data->dummy_dma_buf for dummy writes to
  416. achive reads */
  417. buf = &drv_data->dummy_dma_buf;
  418. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  419. drv_data->tx_dma = dma_map_single(dev,
  420. buf,
  421. drv_data->tx_map_len,
  422. DMA_TO_DEVICE);
  423. if (dma_mapping_error(dev, drv_data->tx_dma))
  424. return -1;
  425. drv_data->tx_dma_needs_unmap = 1;
  426. /* Flags transfer as rd_only for pump_transfers() DMA
  427. regs programming (should be redundant) */
  428. drv_data->tx = NULL;
  429. return 0;
  430. }
  431. }
  432. if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
  433. return -1;
  434. if (drv_data->tx == NULL) {
  435. /* Read only message --> use drv_data->dummy_dma_buf for dummy
  436. writes to achive reads */
  437. buf = &drv_data->dummy_dma_buf;
  438. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  439. } else {
  440. buf = drv_data->tx;
  441. drv_data->tx_map_len = drv_data->len;
  442. }
  443. drv_data->tx_dma = dma_map_single(dev,
  444. buf,
  445. drv_data->tx_map_len,
  446. DMA_TO_DEVICE);
  447. if (dma_mapping_error(dev, drv_data->tx_dma))
  448. return -1;
  449. drv_data->tx_dma_needs_unmap = 1;
  450. /* NULL rx means write-only transfer and no map needed
  451. * since rx DMA will not be used */
  452. if (drv_data->rx) {
  453. buf = drv_data->rx;
  454. drv_data->rx_dma = dma_map_single(dev,
  455. buf,
  456. drv_data->len,
  457. DMA_FROM_DEVICE);
  458. if (dma_mapping_error(dev, drv_data->rx_dma)) {
  459. if (drv_data->tx_dma) {
  460. dma_unmap_single(dev,
  461. drv_data->tx_dma,
  462. drv_data->tx_map_len,
  463. DMA_TO_DEVICE);
  464. drv_data->tx_dma_needs_unmap = 0;
  465. }
  466. return -1;
  467. }
  468. drv_data->rx_dma_needs_unmap = 1;
  469. }
  470. return 0;
  471. }
  472. static void unmap_dma_buffers(struct driver_data *drv_data)
  473. {
  474. struct spi_message *msg = drv_data->cur_msg;
  475. struct device *dev = &msg->spi->dev;
  476. if (drv_data->rx_dma_needs_unmap) {
  477. dma_unmap_single(dev,
  478. drv_data->rx_dma,
  479. drv_data->len,
  480. DMA_FROM_DEVICE);
  481. drv_data->rx_dma_needs_unmap = 0;
  482. }
  483. if (drv_data->tx_dma_needs_unmap) {
  484. dma_unmap_single(dev,
  485. drv_data->tx_dma,
  486. drv_data->tx_map_len,
  487. DMA_TO_DEVICE);
  488. drv_data->tx_dma_needs_unmap = 0;
  489. }
  490. }
  491. /* Caller already set message->status (dma is already blocked) */
  492. static void giveback(struct spi_message *message, struct driver_data *drv_data)
  493. {
  494. void __iomem *regs = drv_data->regs;
  495. /* Bring SPI to sleep; restore_state() and pump_transfer()
  496. will do new setup */
  497. writel(0, regs + SPI_INT_STATUS);
  498. writel(0, regs + SPI_DMA);
  499. /* Unconditioned deselct */
  500. drv_data->cs_control(SPI_CS_DEASSERT);
  501. message->state = NULL;
  502. if (message->complete)
  503. message->complete(message->context);
  504. drv_data->cur_msg = NULL;
  505. drv_data->cur_transfer = NULL;
  506. drv_data->cur_chip = NULL;
  507. queue_work(drv_data->workqueue, &drv_data->work);
  508. }
  509. static void dma_err_handler(int channel, void *data, int errcode)
  510. {
  511. struct driver_data *drv_data = data;
  512. struct spi_message *msg = drv_data->cur_msg;
  513. dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
  514. /* Disable both rx and tx dma channels */
  515. imx_dma_disable(drv_data->rx_channel);
  516. imx_dma_disable(drv_data->tx_channel);
  517. unmap_dma_buffers(drv_data);
  518. flush(drv_data);
  519. msg->state = ERROR_STATE;
  520. tasklet_schedule(&drv_data->pump_transfers);
  521. }
  522. static void dma_tx_handler(int channel, void *data)
  523. {
  524. struct driver_data *drv_data = data;
  525. dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
  526. imx_dma_disable(channel);
  527. /* Now waits for TX FIFO empty */
  528. writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
  529. }
  530. static irqreturn_t dma_transfer(struct driver_data *drv_data)
  531. {
  532. u32 status;
  533. struct spi_message *msg = drv_data->cur_msg;
  534. void __iomem *regs = drv_data->regs;
  535. status = readl(regs + SPI_INT_STATUS);
  536. if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
  537. == (SPI_INTEN_RO | SPI_STATUS_RO)) {
  538. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  539. imx_dma_disable(drv_data->tx_channel);
  540. imx_dma_disable(drv_data->rx_channel);
  541. unmap_dma_buffers(drv_data);
  542. flush(drv_data);
  543. dev_warn(&drv_data->pdev->dev,
  544. "dma_transfer - fifo overun\n");
  545. msg->state = ERROR_STATE;
  546. tasklet_schedule(&drv_data->pump_transfers);
  547. return IRQ_HANDLED;
  548. }
  549. if (status & SPI_STATUS_TE) {
  550. writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
  551. if (drv_data->rx) {
  552. /* Wait end of transfer before read trailing data */
  553. while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
  554. cpu_relax();
  555. imx_dma_disable(drv_data->rx_channel);
  556. unmap_dma_buffers(drv_data);
  557. /* Release chip select if requested, transfer delays are
  558. handled in pump_transfers() */
  559. if (drv_data->cs_change)
  560. drv_data->cs_control(SPI_CS_DEASSERT);
  561. /* Calculate number of trailing data and read them */
  562. dev_dbg(&drv_data->pdev->dev,
  563. "dma_transfer - test = 0x%08X\n",
  564. readl(regs + SPI_TEST));
  565. drv_data->rx = drv_data->rx_end -
  566. ((readl(regs + SPI_TEST) &
  567. SPI_TEST_RXCNT) >>
  568. SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
  569. read(drv_data);
  570. } else {
  571. /* Write only transfer */
  572. unmap_dma_buffers(drv_data);
  573. flush(drv_data);
  574. }
  575. /* End of transfer, update total byte transfered */
  576. msg->actual_length += drv_data->len;
  577. /* Move to next transfer */
  578. msg->state = next_transfer(drv_data);
  579. /* Schedule transfer tasklet */
  580. tasklet_schedule(&drv_data->pump_transfers);
  581. return IRQ_HANDLED;
  582. }
  583. /* Opps problem detected */
  584. return IRQ_NONE;
  585. }
  586. static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
  587. {
  588. struct spi_message *msg = drv_data->cur_msg;
  589. void __iomem *regs = drv_data->regs;
  590. u32 status;
  591. irqreturn_t handled = IRQ_NONE;
  592. status = readl(regs + SPI_INT_STATUS);
  593. if (status & SPI_INTEN_TE) {
  594. /* TXFIFO Empty Interrupt on the last transfered word */
  595. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  596. dev_dbg(&drv_data->pdev->dev,
  597. "interrupt_wronly_transfer - end of tx\n");
  598. flush(drv_data);
  599. /* Update total byte transfered */
  600. msg->actual_length += drv_data->len;
  601. /* Move to next transfer */
  602. msg->state = next_transfer(drv_data);
  603. /* Schedule transfer tasklet */
  604. tasklet_schedule(&drv_data->pump_transfers);
  605. return IRQ_HANDLED;
  606. } else {
  607. while (status & SPI_STATUS_TH) {
  608. dev_dbg(&drv_data->pdev->dev,
  609. "interrupt_wronly_transfer - status = 0x%08X\n",
  610. status);
  611. /* Pump data */
  612. if (write(drv_data)) {
  613. /* End of TXFIFO writes,
  614. now wait until TXFIFO is empty */
  615. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  616. return IRQ_HANDLED;
  617. }
  618. status = readl(regs + SPI_INT_STATUS);
  619. /* We did something */
  620. handled = IRQ_HANDLED;
  621. }
  622. }
  623. return handled;
  624. }
  625. static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
  626. {
  627. struct spi_message *msg = drv_data->cur_msg;
  628. void __iomem *regs = drv_data->regs;
  629. u32 status, control;
  630. irqreturn_t handled = IRQ_NONE;
  631. unsigned long limit;
  632. status = readl(regs + SPI_INT_STATUS);
  633. if (status & SPI_INTEN_TE) {
  634. /* TXFIFO Empty Interrupt on the last transfered word */
  635. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  636. dev_dbg(&drv_data->pdev->dev,
  637. "interrupt_transfer - end of tx\n");
  638. if (msg->state == ERROR_STATE) {
  639. /* RXFIFO overrun was detected and message aborted */
  640. flush(drv_data);
  641. } else {
  642. /* Wait for end of transaction */
  643. do {
  644. control = readl(regs + SPI_CONTROL);
  645. } while (control & SPI_CONTROL_XCH);
  646. /* Release chip select if requested, transfer delays are
  647. handled in pump_transfers */
  648. if (drv_data->cs_change)
  649. drv_data->cs_control(SPI_CS_DEASSERT);
  650. /* Read trailing bytes */
  651. limit = loops_per_jiffy << 1;
  652. while ((read(drv_data) == 0) && --limit)
  653. cpu_relax();
  654. if (limit == 0)
  655. dev_err(&drv_data->pdev->dev,
  656. "interrupt_transfer - "
  657. "trailing byte read failed\n");
  658. else
  659. dev_dbg(&drv_data->pdev->dev,
  660. "interrupt_transfer - end of rx\n");
  661. /* Update total byte transfered */
  662. msg->actual_length += drv_data->len;
  663. /* Move to next transfer */
  664. msg->state = next_transfer(drv_data);
  665. }
  666. /* Schedule transfer tasklet */
  667. tasklet_schedule(&drv_data->pump_transfers);
  668. return IRQ_HANDLED;
  669. } else {
  670. while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
  671. dev_dbg(&drv_data->pdev->dev,
  672. "interrupt_transfer - status = 0x%08X\n",
  673. status);
  674. if (status & SPI_STATUS_RO) {
  675. /* RXFIFO overrun, abort message end wait
  676. until TXFIFO is empty */
  677. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  678. dev_warn(&drv_data->pdev->dev,
  679. "interrupt_transfer - fifo overun\n"
  680. " data not yet written = %d\n"
  681. " data not yet read = %d\n",
  682. data_to_write(drv_data),
  683. data_to_read(drv_data));
  684. msg->state = ERROR_STATE;
  685. return IRQ_HANDLED;
  686. }
  687. /* Pump data */
  688. read(drv_data);
  689. if (write(drv_data)) {
  690. /* End of TXFIFO writes,
  691. now wait until TXFIFO is empty */
  692. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  693. return IRQ_HANDLED;
  694. }
  695. status = readl(regs + SPI_INT_STATUS);
  696. /* We did something */
  697. handled = IRQ_HANDLED;
  698. }
  699. }
  700. return handled;
  701. }
  702. static irqreturn_t spi_int(int irq, void *dev_id)
  703. {
  704. struct driver_data *drv_data = (struct driver_data *)dev_id;
  705. if (!drv_data->cur_msg) {
  706. dev_err(&drv_data->pdev->dev,
  707. "spi_int - bad message state\n");
  708. /* Never fail */
  709. return IRQ_HANDLED;
  710. }
  711. return drv_data->transfer_handler(drv_data);
  712. }
  713. static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
  714. {
  715. return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
  716. }
  717. static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
  718. {
  719. u32 div;
  720. u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
  721. for (div = SPI_PERCLK2_DIV_MIN;
  722. div <= SPI_PERCLK2_DIV_MAX;
  723. div++, quantized_hz >>= 1) {
  724. if (quantized_hz <= speed_hz)
  725. /* Max available speed LEQ required speed */
  726. return div << 13;
  727. }
  728. return SPI_CONTROL_DATARATE_BAD;
  729. }
  730. static void pump_transfers(unsigned long data)
  731. {
  732. struct driver_data *drv_data = (struct driver_data *)data;
  733. struct spi_message *message;
  734. struct spi_transfer *transfer, *previous;
  735. struct chip_data *chip;
  736. void __iomem *regs;
  737. u32 tmp, control;
  738. dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
  739. message = drv_data->cur_msg;
  740. /* Handle for abort */
  741. if (message->state == ERROR_STATE) {
  742. message->status = -EIO;
  743. giveback(message, drv_data);
  744. return;
  745. }
  746. /* Handle end of message */
  747. if (message->state == DONE_STATE) {
  748. message->status = 0;
  749. giveback(message, drv_data);
  750. return;
  751. }
  752. chip = drv_data->cur_chip;
  753. /* Delay if requested at end of transfer*/
  754. transfer = drv_data->cur_transfer;
  755. if (message->state == RUNNING_STATE) {
  756. previous = list_entry(transfer->transfer_list.prev,
  757. struct spi_transfer,
  758. transfer_list);
  759. if (previous->delay_usecs)
  760. udelay(previous->delay_usecs);
  761. } else {
  762. /* START_STATE */
  763. message->state = RUNNING_STATE;
  764. drv_data->cs_control = chip->cs_control;
  765. }
  766. transfer = drv_data->cur_transfer;
  767. drv_data->tx = (void *)transfer->tx_buf;
  768. drv_data->tx_end = drv_data->tx + transfer->len;
  769. drv_data->rx = transfer->rx_buf;
  770. drv_data->rx_end = drv_data->rx + transfer->len;
  771. drv_data->rx_dma = transfer->rx_dma;
  772. drv_data->tx_dma = transfer->tx_dma;
  773. drv_data->len = transfer->len;
  774. drv_data->cs_change = transfer->cs_change;
  775. drv_data->rd_only = (drv_data->tx == NULL);
  776. regs = drv_data->regs;
  777. control = readl(regs + SPI_CONTROL);
  778. /* Bits per word setup */
  779. tmp = transfer->bits_per_word;
  780. if (tmp == 0) {
  781. /* Use device setup */
  782. tmp = chip->bits_per_word;
  783. drv_data->n_bytes = chip->n_bytes;
  784. } else
  785. /* Use per-transfer setup */
  786. drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
  787. u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  788. /* Speed setup (surely valid because already checked) */
  789. tmp = transfer->speed_hz;
  790. if (tmp == 0)
  791. tmp = chip->max_speed_hz;
  792. tmp = spi_data_rate(drv_data, tmp);
  793. u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
  794. writel(control, regs + SPI_CONTROL);
  795. /* Assert device chip-select */
  796. drv_data->cs_control(SPI_CS_ASSERT);
  797. /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
  798. if bits_per_word is less or equal 8 PIO transfers are performed.
  799. Moreover DMA is convinient for transfer length bigger than FIFOs
  800. byte size. */
  801. if ((drv_data->n_bytes == 2) &&
  802. (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
  803. (map_dma_buffers(drv_data) == 0)) {
  804. dev_dbg(&drv_data->pdev->dev,
  805. "pump dma transfer\n"
  806. " tx = %p\n"
  807. " tx_dma = %08X\n"
  808. " rx = %p\n"
  809. " rx_dma = %08X\n"
  810. " len = %d\n",
  811. drv_data->tx,
  812. (unsigned int)drv_data->tx_dma,
  813. drv_data->rx,
  814. (unsigned int)drv_data->rx_dma,
  815. drv_data->len);
  816. /* Ensure we have the correct interrupt handler */
  817. drv_data->transfer_handler = dma_transfer;
  818. /* Trigger transfer */
  819. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  820. regs + SPI_CONTROL);
  821. /* Setup tx DMA */
  822. if (drv_data->tx)
  823. /* Linear source address */
  824. CCR(drv_data->tx_channel) =
  825. CCR_DMOD_FIFO |
  826. CCR_SMOD_LINEAR |
  827. CCR_SSIZ_32 | CCR_DSIZ_16 |
  828. CCR_REN;
  829. else
  830. /* Read only transfer -> fixed source address for
  831. dummy write to achive read */
  832. CCR(drv_data->tx_channel) =
  833. CCR_DMOD_FIFO |
  834. CCR_SMOD_FIFO |
  835. CCR_SSIZ_32 | CCR_DSIZ_16 |
  836. CCR_REN;
  837. imx_dma_setup_single(
  838. drv_data->tx_channel,
  839. drv_data->tx_dma,
  840. drv_data->len,
  841. drv_data->rd_data_phys + 4,
  842. DMA_MODE_WRITE);
  843. if (drv_data->rx) {
  844. /* Setup rx DMA for linear destination address */
  845. CCR(drv_data->rx_channel) =
  846. CCR_DMOD_LINEAR |
  847. CCR_SMOD_FIFO |
  848. CCR_DSIZ_32 | CCR_SSIZ_16 |
  849. CCR_REN;
  850. imx_dma_setup_single(
  851. drv_data->rx_channel,
  852. drv_data->rx_dma,
  853. drv_data->len,
  854. drv_data->rd_data_phys,
  855. DMA_MODE_READ);
  856. imx_dma_enable(drv_data->rx_channel);
  857. /* Enable SPI interrupt */
  858. writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
  859. /* Set SPI to request DMA service on both
  860. Rx and Tx half fifo watermark */
  861. writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
  862. } else
  863. /* Write only access -> set SPI to request DMA
  864. service on Tx half fifo watermark */
  865. writel(SPI_DMA_THDEN, regs + SPI_DMA);
  866. imx_dma_enable(drv_data->tx_channel);
  867. } else {
  868. dev_dbg(&drv_data->pdev->dev,
  869. "pump pio transfer\n"
  870. " tx = %p\n"
  871. " rx = %p\n"
  872. " len = %d\n",
  873. drv_data->tx,
  874. drv_data->rx,
  875. drv_data->len);
  876. /* Ensure we have the correct interrupt handler */
  877. if (drv_data->rx)
  878. drv_data->transfer_handler = interrupt_transfer;
  879. else
  880. drv_data->transfer_handler = interrupt_wronly_transfer;
  881. /* Enable SPI interrupt */
  882. if (drv_data->rx)
  883. writel(SPI_INTEN_TH | SPI_INTEN_RO,
  884. regs + SPI_INT_STATUS);
  885. else
  886. writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
  887. }
  888. }
  889. static void pump_messages(struct work_struct *work)
  890. {
  891. struct driver_data *drv_data =
  892. container_of(work, struct driver_data, work);
  893. unsigned long flags;
  894. /* Lock queue and check for queue work */
  895. spin_lock_irqsave(&drv_data->lock, flags);
  896. if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
  897. drv_data->busy = 0;
  898. spin_unlock_irqrestore(&drv_data->lock, flags);
  899. return;
  900. }
  901. /* Make sure we are not already running a message */
  902. if (drv_data->cur_msg) {
  903. spin_unlock_irqrestore(&drv_data->lock, flags);
  904. return;
  905. }
  906. /* Extract head of queue */
  907. drv_data->cur_msg = list_entry(drv_data->queue.next,
  908. struct spi_message, queue);
  909. list_del_init(&drv_data->cur_msg->queue);
  910. drv_data->busy = 1;
  911. spin_unlock_irqrestore(&drv_data->lock, flags);
  912. /* Initial message state */
  913. drv_data->cur_msg->state = START_STATE;
  914. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  915. struct spi_transfer,
  916. transfer_list);
  917. /* Setup the SPI using the per chip configuration */
  918. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  919. restore_state(drv_data);
  920. /* Mark as busy and launch transfers */
  921. tasklet_schedule(&drv_data->pump_transfers);
  922. }
  923. static int transfer(struct spi_device *spi, struct spi_message *msg)
  924. {
  925. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  926. u32 min_speed_hz, max_speed_hz, tmp;
  927. struct spi_transfer *trans;
  928. unsigned long flags;
  929. msg->actual_length = 0;
  930. /* Per transfer setup check */
  931. min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
  932. max_speed_hz = spi->max_speed_hz;
  933. list_for_each_entry(trans, &msg->transfers, transfer_list) {
  934. tmp = trans->bits_per_word;
  935. if (tmp > 16) {
  936. dev_err(&drv_data->pdev->dev,
  937. "message rejected : "
  938. "invalid transfer bits_per_word (%d bits)\n",
  939. tmp);
  940. goto msg_rejected;
  941. }
  942. tmp = trans->speed_hz;
  943. if (tmp) {
  944. if (tmp < min_speed_hz) {
  945. dev_err(&drv_data->pdev->dev,
  946. "message rejected : "
  947. "device min speed (%d Hz) exceeds "
  948. "required transfer speed (%d Hz)\n",
  949. min_speed_hz,
  950. tmp);
  951. goto msg_rejected;
  952. } else if (tmp > max_speed_hz) {
  953. dev_err(&drv_data->pdev->dev,
  954. "message rejected : "
  955. "transfer speed (%d Hz) exceeds "
  956. "device max speed (%d Hz)\n",
  957. tmp,
  958. max_speed_hz);
  959. goto msg_rejected;
  960. }
  961. }
  962. }
  963. /* Message accepted */
  964. msg->status = -EINPROGRESS;
  965. msg->state = START_STATE;
  966. spin_lock_irqsave(&drv_data->lock, flags);
  967. if (drv_data->run == QUEUE_STOPPED) {
  968. spin_unlock_irqrestore(&drv_data->lock, flags);
  969. return -ESHUTDOWN;
  970. }
  971. list_add_tail(&msg->queue, &drv_data->queue);
  972. if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
  973. queue_work(drv_data->workqueue, &drv_data->work);
  974. spin_unlock_irqrestore(&drv_data->lock, flags);
  975. return 0;
  976. msg_rejected:
  977. /* Message rejected and not queued */
  978. msg->status = -EINVAL;
  979. msg->state = ERROR_STATE;
  980. if (msg->complete)
  981. msg->complete(msg->context);
  982. return -EINVAL;
  983. }
  984. /* the spi->mode bits understood by this driver: */
  985. #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
  986. /* On first setup bad values must free chip_data memory since will cause
  987. spi_new_device to fail. Bad value setup from protocol driver are simply not
  988. applied and notified to the calling driver. */
  989. static int setup(struct spi_device *spi)
  990. {
  991. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  992. struct spi_imx_chip *chip_info;
  993. struct chip_data *chip;
  994. int first_setup = 0;
  995. u32 tmp;
  996. int status = 0;
  997. if (spi->mode & ~MODEBITS) {
  998. dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
  999. spi->mode & ~MODEBITS);
  1000. return -EINVAL;
  1001. }
  1002. /* Get controller data */
  1003. chip_info = spi->controller_data;
  1004. /* Get controller_state */
  1005. chip = spi_get_ctldata(spi);
  1006. if (chip == NULL) {
  1007. first_setup = 1;
  1008. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1009. if (!chip) {
  1010. dev_err(&spi->dev,
  1011. "setup - cannot allocate controller state\n");
  1012. return -ENOMEM;
  1013. }
  1014. chip->control = SPI_DEFAULT_CONTROL;
  1015. if (chip_info == NULL) {
  1016. /* spi_board_info.controller_data not is supplied */
  1017. chip_info = kzalloc(sizeof(struct spi_imx_chip),
  1018. GFP_KERNEL);
  1019. if (!chip_info) {
  1020. dev_err(&spi->dev,
  1021. "setup - "
  1022. "cannot allocate controller data\n");
  1023. status = -ENOMEM;
  1024. goto err_first_setup;
  1025. }
  1026. /* Set controller data default value */
  1027. chip_info->enable_loopback =
  1028. SPI_DEFAULT_ENABLE_LOOPBACK;
  1029. chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
  1030. chip_info->ins_ss_pulse = 1;
  1031. chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
  1032. chip_info->cs_control = null_cs_control;
  1033. }
  1034. }
  1035. /* Now set controller state based on controller data */
  1036. if (first_setup) {
  1037. /* SPI loopback */
  1038. if (chip_info->enable_loopback)
  1039. chip->test = SPI_TEST_LBC;
  1040. else
  1041. chip->test = 0;
  1042. /* SPI dma driven */
  1043. chip->enable_dma = chip_info->enable_dma;
  1044. /* SPI /SS pulse between spi burst */
  1045. if (chip_info->ins_ss_pulse)
  1046. u32_EDIT(chip->control,
  1047. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
  1048. else
  1049. u32_EDIT(chip->control,
  1050. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
  1051. /* SPI bclk waits between each bits_per_word spi burst */
  1052. if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
  1053. dev_err(&spi->dev,
  1054. "setup - "
  1055. "bclk_wait exceeds max allowed (%d)\n",
  1056. SPI_PERIOD_MAX_WAIT);
  1057. goto err_first_setup;
  1058. }
  1059. chip->period = SPI_PERIOD_CSRC_BCLK |
  1060. (chip_info->bclk_wait & SPI_PERIOD_WAIT);
  1061. }
  1062. /* SPI mode */
  1063. tmp = spi->mode;
  1064. if (tmp & SPI_CS_HIGH) {
  1065. u32_EDIT(chip->control,
  1066. SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
  1067. }
  1068. switch (tmp & SPI_MODE_3) {
  1069. case SPI_MODE_0:
  1070. tmp = 0;
  1071. break;
  1072. case SPI_MODE_1:
  1073. tmp = SPI_CONTROL_PHA_1;
  1074. break;
  1075. case SPI_MODE_2:
  1076. tmp = SPI_CONTROL_POL_ACT_LOW;
  1077. break;
  1078. default:
  1079. /* SPI_MODE_3 */
  1080. tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
  1081. break;
  1082. }
  1083. u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
  1084. /* SPI word width */
  1085. tmp = spi->bits_per_word;
  1086. if (tmp == 0) {
  1087. tmp = 8;
  1088. spi->bits_per_word = 8;
  1089. } else if (tmp > 16) {
  1090. status = -EINVAL;
  1091. dev_err(&spi->dev,
  1092. "setup - "
  1093. "invalid bits_per_word (%d)\n",
  1094. tmp);
  1095. if (first_setup)
  1096. goto err_first_setup;
  1097. else {
  1098. /* Undo setup using chip as backup copy */
  1099. tmp = chip->bits_per_word;
  1100. spi->bits_per_word = tmp;
  1101. }
  1102. }
  1103. chip->bits_per_word = tmp;
  1104. u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  1105. chip->n_bytes = (tmp <= 8) ? 1 : 2;
  1106. /* SPI datarate */
  1107. tmp = spi_data_rate(drv_data, spi->max_speed_hz);
  1108. if (tmp == SPI_CONTROL_DATARATE_BAD) {
  1109. status = -EINVAL;
  1110. dev_err(&spi->dev,
  1111. "setup - "
  1112. "HW min speed (%d Hz) exceeds required "
  1113. "max speed (%d Hz)\n",
  1114. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1115. spi->max_speed_hz);
  1116. if (first_setup)
  1117. goto err_first_setup;
  1118. else
  1119. /* Undo setup using chip as backup copy */
  1120. spi->max_speed_hz = chip->max_speed_hz;
  1121. } else {
  1122. u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
  1123. /* Actual rounded max_speed_hz */
  1124. tmp = spi_speed_hz(drv_data, tmp);
  1125. spi->max_speed_hz = tmp;
  1126. chip->max_speed_hz = tmp;
  1127. }
  1128. /* SPI chip-select management */
  1129. if (chip_info->cs_control)
  1130. chip->cs_control = chip_info->cs_control;
  1131. else
  1132. chip->cs_control = null_cs_control;
  1133. /* Save controller_state */
  1134. spi_set_ctldata(spi, chip);
  1135. /* Summary */
  1136. dev_dbg(&spi->dev,
  1137. "setup succeded\n"
  1138. " loopback enable = %s\n"
  1139. " dma enable = %s\n"
  1140. " insert /ss pulse = %s\n"
  1141. " period wait = %d\n"
  1142. " mode = %d\n"
  1143. " bits per word = %d\n"
  1144. " min speed = %d Hz\n"
  1145. " rounded max speed = %d Hz\n",
  1146. chip->test & SPI_TEST_LBC ? "Yes" : "No",
  1147. chip->enable_dma ? "Yes" : "No",
  1148. chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
  1149. chip->period & SPI_PERIOD_WAIT,
  1150. spi->mode,
  1151. spi->bits_per_word,
  1152. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1153. spi->max_speed_hz);
  1154. return status;
  1155. err_first_setup:
  1156. kfree(chip);
  1157. return status;
  1158. }
  1159. static void cleanup(struct spi_device *spi)
  1160. {
  1161. kfree(spi_get_ctldata(spi));
  1162. }
  1163. static int __init init_queue(struct driver_data *drv_data)
  1164. {
  1165. INIT_LIST_HEAD(&drv_data->queue);
  1166. spin_lock_init(&drv_data->lock);
  1167. drv_data->run = QUEUE_STOPPED;
  1168. drv_data->busy = 0;
  1169. tasklet_init(&drv_data->pump_transfers,
  1170. pump_transfers, (unsigned long)drv_data);
  1171. INIT_WORK(&drv_data->work, pump_messages);
  1172. drv_data->workqueue = create_singlethread_workqueue(
  1173. dev_name(drv_data->master->dev.parent));
  1174. if (drv_data->workqueue == NULL)
  1175. return -EBUSY;
  1176. return 0;
  1177. }
  1178. static int start_queue(struct driver_data *drv_data)
  1179. {
  1180. unsigned long flags;
  1181. spin_lock_irqsave(&drv_data->lock, flags);
  1182. if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
  1183. spin_unlock_irqrestore(&drv_data->lock, flags);
  1184. return -EBUSY;
  1185. }
  1186. drv_data->run = QUEUE_RUNNING;
  1187. drv_data->cur_msg = NULL;
  1188. drv_data->cur_transfer = NULL;
  1189. drv_data->cur_chip = NULL;
  1190. spin_unlock_irqrestore(&drv_data->lock, flags);
  1191. queue_work(drv_data->workqueue, &drv_data->work);
  1192. return 0;
  1193. }
  1194. static int stop_queue(struct driver_data *drv_data)
  1195. {
  1196. unsigned long flags;
  1197. unsigned limit = 500;
  1198. int status = 0;
  1199. spin_lock_irqsave(&drv_data->lock, flags);
  1200. /* This is a bit lame, but is optimized for the common execution path.
  1201. * A wait_queue on the drv_data->busy could be used, but then the common
  1202. * execution path (pump_messages) would be required to call wake_up or
  1203. * friends on every SPI message. Do this instead */
  1204. drv_data->run = QUEUE_STOPPED;
  1205. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  1206. spin_unlock_irqrestore(&drv_data->lock, flags);
  1207. msleep(10);
  1208. spin_lock_irqsave(&drv_data->lock, flags);
  1209. }
  1210. if (!list_empty(&drv_data->queue) || drv_data->busy)
  1211. status = -EBUSY;
  1212. spin_unlock_irqrestore(&drv_data->lock, flags);
  1213. return status;
  1214. }
  1215. static int destroy_queue(struct driver_data *drv_data)
  1216. {
  1217. int status;
  1218. status = stop_queue(drv_data);
  1219. if (status != 0)
  1220. return status;
  1221. if (drv_data->workqueue)
  1222. destroy_workqueue(drv_data->workqueue);
  1223. return 0;
  1224. }
  1225. static int __init spi_imx_probe(struct platform_device *pdev)
  1226. {
  1227. struct device *dev = &pdev->dev;
  1228. struct spi_imx_master *platform_info;
  1229. struct spi_master *master;
  1230. struct driver_data *drv_data;
  1231. struct resource *res;
  1232. int irq, status = 0;
  1233. platform_info = dev->platform_data;
  1234. if (platform_info == NULL) {
  1235. dev_err(&pdev->dev, "probe - no platform data supplied\n");
  1236. status = -ENODEV;
  1237. goto err_no_pdata;
  1238. }
  1239. /* Allocate master with space for drv_data */
  1240. master = spi_alloc_master(dev, sizeof(struct driver_data));
  1241. if (!master) {
  1242. dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
  1243. status = -ENOMEM;
  1244. goto err_no_mem;
  1245. }
  1246. drv_data = spi_master_get_devdata(master);
  1247. drv_data->master = master;
  1248. drv_data->master_info = platform_info;
  1249. drv_data->pdev = pdev;
  1250. master->bus_num = pdev->id;
  1251. master->num_chipselect = platform_info->num_chipselect;
  1252. master->cleanup = cleanup;
  1253. master->setup = setup;
  1254. master->transfer = transfer;
  1255. drv_data->dummy_dma_buf = SPI_DUMMY_u32;
  1256. drv_data->clk = clk_get(&pdev->dev, "perclk2");
  1257. if (IS_ERR(drv_data->clk)) {
  1258. dev_err(&pdev->dev, "probe - cannot get clock\n");
  1259. status = PTR_ERR(drv_data->clk);
  1260. goto err_no_clk;
  1261. }
  1262. clk_enable(drv_data->clk);
  1263. /* Find and map resources */
  1264. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1265. if (!res) {
  1266. dev_err(&pdev->dev, "probe - MEM resources not defined\n");
  1267. status = -ENODEV;
  1268. goto err_no_iores;
  1269. }
  1270. drv_data->ioarea = request_mem_region(res->start,
  1271. res->end - res->start + 1,
  1272. pdev->name);
  1273. if (drv_data->ioarea == NULL) {
  1274. dev_err(&pdev->dev, "probe - cannot reserve region\n");
  1275. status = -ENXIO;
  1276. goto err_no_iores;
  1277. }
  1278. drv_data->regs = ioremap(res->start, res->end - res->start + 1);
  1279. if (drv_data->regs == NULL) {
  1280. dev_err(&pdev->dev, "probe - cannot map IO\n");
  1281. status = -ENXIO;
  1282. goto err_no_iomap;
  1283. }
  1284. drv_data->rd_data_phys = (dma_addr_t)res->start;
  1285. /* Attach to IRQ */
  1286. irq = platform_get_irq(pdev, 0);
  1287. if (irq < 0) {
  1288. dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
  1289. status = -ENODEV;
  1290. goto err_no_irqres;
  1291. }
  1292. status = request_irq(irq, spi_int, IRQF_DISABLED,
  1293. dev_name(dev), drv_data);
  1294. if (status < 0) {
  1295. dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
  1296. goto err_no_irqres;
  1297. }
  1298. /* Setup DMA if requested */
  1299. drv_data->tx_channel = -1;
  1300. drv_data->rx_channel = -1;
  1301. if (platform_info->enable_dma) {
  1302. /* Get rx DMA channel */
  1303. drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
  1304. DMA_PRIO_HIGH);
  1305. if (drv_data->rx_channel < 0) {
  1306. dev_err(dev,
  1307. "probe - problem (%d) requesting rx channel\n",
  1308. drv_data->rx_channel);
  1309. goto err_no_rxdma;
  1310. } else
  1311. imx_dma_setup_handlers(drv_data->rx_channel, NULL,
  1312. dma_err_handler, drv_data);
  1313. /* Get tx DMA channel */
  1314. drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
  1315. DMA_PRIO_MEDIUM);
  1316. if (drv_data->tx_channel < 0) {
  1317. dev_err(dev,
  1318. "probe - problem (%d) requesting tx channel\n",
  1319. drv_data->tx_channel);
  1320. imx_dma_free(drv_data->rx_channel);
  1321. goto err_no_txdma;
  1322. } else
  1323. imx_dma_setup_handlers(drv_data->tx_channel,
  1324. dma_tx_handler, dma_err_handler,
  1325. drv_data);
  1326. /* Set request source and burst length for allocated channels */
  1327. switch (drv_data->pdev->id) {
  1328. case 1:
  1329. /* Using SPI1 */
  1330. RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
  1331. RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
  1332. break;
  1333. case 2:
  1334. /* Using SPI2 */
  1335. RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
  1336. RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
  1337. break;
  1338. default:
  1339. dev_err(dev, "probe - bad SPI Id\n");
  1340. imx_dma_free(drv_data->rx_channel);
  1341. imx_dma_free(drv_data->tx_channel);
  1342. status = -ENODEV;
  1343. goto err_no_devid;
  1344. }
  1345. BLR(drv_data->rx_channel) = SPI_DMA_BLR;
  1346. BLR(drv_data->tx_channel) = SPI_DMA_BLR;
  1347. }
  1348. /* Load default SPI configuration */
  1349. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1350. writel(0, drv_data->regs + SPI_RESET);
  1351. writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
  1352. /* Initial and start queue */
  1353. status = init_queue(drv_data);
  1354. if (status != 0) {
  1355. dev_err(&pdev->dev, "probe - problem initializing queue\n");
  1356. goto err_init_queue;
  1357. }
  1358. status = start_queue(drv_data);
  1359. if (status != 0) {
  1360. dev_err(&pdev->dev, "probe - problem starting queue\n");
  1361. goto err_start_queue;
  1362. }
  1363. /* Register with the SPI framework */
  1364. platform_set_drvdata(pdev, drv_data);
  1365. status = spi_register_master(master);
  1366. if (status != 0) {
  1367. dev_err(&pdev->dev, "probe - problem registering spi master\n");
  1368. goto err_spi_register;
  1369. }
  1370. dev_dbg(dev, "probe succeded\n");
  1371. return 0;
  1372. err_init_queue:
  1373. err_start_queue:
  1374. err_spi_register:
  1375. destroy_queue(drv_data);
  1376. err_no_rxdma:
  1377. err_no_txdma:
  1378. err_no_devid:
  1379. free_irq(irq, drv_data);
  1380. err_no_irqres:
  1381. iounmap(drv_data->regs);
  1382. err_no_iomap:
  1383. release_resource(drv_data->ioarea);
  1384. kfree(drv_data->ioarea);
  1385. err_no_iores:
  1386. clk_disable(drv_data->clk);
  1387. clk_put(drv_data->clk);
  1388. err_no_clk:
  1389. spi_master_put(master);
  1390. err_no_pdata:
  1391. err_no_mem:
  1392. return status;
  1393. }
  1394. static int __exit spi_imx_remove(struct platform_device *pdev)
  1395. {
  1396. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1397. int irq;
  1398. int status = 0;
  1399. if (!drv_data)
  1400. return 0;
  1401. tasklet_kill(&drv_data->pump_transfers);
  1402. /* Remove the queue */
  1403. status = destroy_queue(drv_data);
  1404. if (status != 0) {
  1405. dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
  1406. return status;
  1407. }
  1408. /* Reset SPI */
  1409. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1410. writel(0, drv_data->regs + SPI_RESET);
  1411. /* Release DMA */
  1412. if (drv_data->master_info->enable_dma) {
  1413. RSSR(drv_data->rx_channel) = 0;
  1414. RSSR(drv_data->tx_channel) = 0;
  1415. imx_dma_free(drv_data->tx_channel);
  1416. imx_dma_free(drv_data->rx_channel);
  1417. }
  1418. /* Release IRQ */
  1419. irq = platform_get_irq(pdev, 0);
  1420. if (irq >= 0)
  1421. free_irq(irq, drv_data);
  1422. clk_disable(drv_data->clk);
  1423. clk_put(drv_data->clk);
  1424. /* Release map resources */
  1425. iounmap(drv_data->regs);
  1426. release_resource(drv_data->ioarea);
  1427. kfree(drv_data->ioarea);
  1428. /* Disconnect from the SPI framework */
  1429. spi_unregister_master(drv_data->master);
  1430. spi_master_put(drv_data->master);
  1431. /* Prevent double remove */
  1432. platform_set_drvdata(pdev, NULL);
  1433. dev_dbg(&pdev->dev, "remove succeded\n");
  1434. return 0;
  1435. }
  1436. static void spi_imx_shutdown(struct platform_device *pdev)
  1437. {
  1438. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1439. /* Reset SPI */
  1440. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1441. writel(0, drv_data->regs + SPI_RESET);
  1442. dev_dbg(&pdev->dev, "shutdown succeded\n");
  1443. }
  1444. #ifdef CONFIG_PM
  1445. static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
  1446. {
  1447. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1448. int status = 0;
  1449. status = stop_queue(drv_data);
  1450. if (status != 0) {
  1451. dev_warn(&pdev->dev, "suspend cannot stop queue\n");
  1452. return status;
  1453. }
  1454. dev_dbg(&pdev->dev, "suspended\n");
  1455. return 0;
  1456. }
  1457. static int spi_imx_resume(struct platform_device *pdev)
  1458. {
  1459. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1460. int status = 0;
  1461. /* Start the queue running */
  1462. status = start_queue(drv_data);
  1463. if (status != 0)
  1464. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1465. else
  1466. dev_dbg(&pdev->dev, "resumed\n");
  1467. return status;
  1468. }
  1469. #else
  1470. #define spi_imx_suspend NULL
  1471. #define spi_imx_resume NULL
  1472. #endif /* CONFIG_PM */
  1473. /* work with hotplug and coldplug */
  1474. MODULE_ALIAS("platform:spi_imx");
  1475. static struct platform_driver driver = {
  1476. .driver = {
  1477. .name = "spi_imx",
  1478. .owner = THIS_MODULE,
  1479. },
  1480. .remove = __exit_p(spi_imx_remove),
  1481. .shutdown = spi_imx_shutdown,
  1482. .suspend = spi_imx_suspend,
  1483. .resume = spi_imx_resume,
  1484. };
  1485. static int __init spi_imx_init(void)
  1486. {
  1487. return platform_driver_probe(&driver, spi_imx_probe);
  1488. }
  1489. module_init(spi_imx_init);
  1490. static void __exit spi_imx_exit(void)
  1491. {
  1492. platform_driver_unregister(&driver);
  1493. }
  1494. module_exit(spi_imx_exit);
  1495. MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
  1496. MODULE_DESCRIPTION("iMX SPI Controller Driver");
  1497. MODULE_LICENSE("GPL");