spi_imx.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768
  1. /*
  2. * drivers/spi/spi_imx.c
  3. *
  4. * Copyright (C) 2006 SWAPP
  5. * Andrea Paterniani <a.paterniani@swapp-eng.it>
  6. *
  7. * Initial version inspired by:
  8. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/device.h>
  23. #include <linux/ioport.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/spi/spi.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/delay.h>
  31. #include <asm/io.h>
  32. #include <asm/irq.h>
  33. #include <asm/hardware.h>
  34. #include <asm/delay.h>
  35. #include <asm/arch/hardware.h>
  36. #include <asm/arch/imx-dma.h>
  37. #include <asm/arch/spi_imx.h>
  38. /*-------------------------------------------------------------------------*/
  39. /* SPI Registers offsets from peripheral base address */
  40. #define SPI_RXDATA (0x00)
  41. #define SPI_TXDATA (0x04)
  42. #define SPI_CONTROL (0x08)
  43. #define SPI_INT_STATUS (0x0C)
  44. #define SPI_TEST (0x10)
  45. #define SPI_PERIOD (0x14)
  46. #define SPI_DMA (0x18)
  47. #define SPI_RESET (0x1C)
  48. /* SPI Control Register Bit Fields & Masks */
  49. #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
  50. #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
  51. #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
  52. #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
  53. #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
  54. #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
  55. #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
  56. #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
  57. #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
  58. #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
  59. Slave: RXFIFO advanced by BIT_COUNT */
  60. #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
  61. Slave: RXFIFO advanced by /SS rising edge */
  62. #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
  63. #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
  64. #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
  65. #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
  66. #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
  67. #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
  68. #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
  69. #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
  70. #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
  71. #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
  72. #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
  73. #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
  74. #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
  75. #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
  76. #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
  77. #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
  78. #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
  79. #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
  80. /* SPI Interrupt/Status Register Bit Fields & Masks */
  81. #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
  82. #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
  83. #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
  84. #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
  85. #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
  86. #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
  87. #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
  88. #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
  89. #define SPI_STATUS (0xFF) /* SPI Status Mask */
  90. #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
  91. #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
  92. #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
  93. #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
  94. #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
  95. #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
  96. #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
  97. #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
  98. #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
  99. /* SPI Test Register Bit Fields & Masks */
  100. #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
  101. #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
  102. #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
  103. #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
  104. #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
  105. /* SPI Period Register Bit Fields & Masks */
  106. #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
  107. #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
  108. Transactions */
  109. #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
  110. #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
  111. Bit Clock */
  112. #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
  113. 32.768 KHz Clock */
  114. /* SPI DMA Register Bit Fields & Masks */
  115. #define SPI_DMA_RHDMA (0xF << 4) /* RXFIFO Half Status */
  116. #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
  117. #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
  118. #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
  119. #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
  120. #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
  121. #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
  122. #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
  123. /* SPI Soft Reset Register Bit Fields & Masks */
  124. #define SPI_RESET_START (0x1) /* Start */
  125. /* Default SPI configuration values */
  126. #define SPI_DEFAULT_CONTROL \
  127. ( \
  128. SPI_CONTROL_BITCOUNT(16) | \
  129. SPI_CONTROL_POL_ACT_HIGH | \
  130. SPI_CONTROL_PHA_0 | \
  131. SPI_CONTROL_SPIEN | \
  132. SPI_CONTROL_SSCTL_1 | \
  133. SPI_CONTROL_MODE_MASTER | \
  134. SPI_CONTROL_DRCTL_0 | \
  135. SPI_CONTROL_DATARATE_MIN \
  136. )
  137. #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
  138. #define SPI_DEFAULT_ENABLE_DMA (0)
  139. #define SPI_DEFAULT_PERIOD_WAIT (8)
  140. /*-------------------------------------------------------------------------*/
  141. /*-------------------------------------------------------------------------*/
  142. /* TX/RX SPI FIFO size */
  143. #define SPI_FIFO_DEPTH (8)
  144. #define SPI_FIFO_BYTE_WIDTH (2)
  145. #define SPI_FIFO_OVERFLOW_MARGIN (2)
  146. /* DMA burst lenght for half full/empty request trigger */
  147. #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
  148. /* Dummy char output to achieve reads.
  149. Choosing something different from all zeroes may help pattern recogition
  150. for oscilloscope analysis, but may break some drivers. */
  151. #define SPI_DUMMY_u8 0
  152. #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
  153. #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
  154. /**
  155. * Macro to change a u32 field:
  156. * @r : register to edit
  157. * @m : bit mask
  158. * @v : new value for the field correctly bit-alligned
  159. */
  160. #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
  161. /* Message state */
  162. #define START_STATE ((void*)0)
  163. #define RUNNING_STATE ((void*)1)
  164. #define DONE_STATE ((void*)2)
  165. #define ERROR_STATE ((void*)-1)
  166. /* Queue state */
  167. #define QUEUE_RUNNING (0)
  168. #define QUEUE_STOPPED (1)
  169. #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
  170. /*-------------------------------------------------------------------------*/
  171. /*-------------------------------------------------------------------------*/
  172. /* Driver data structs */
  173. /* Context */
  174. struct driver_data {
  175. /* Driver model hookup */
  176. struct platform_device *pdev;
  177. /* SPI framework hookup */
  178. struct spi_master *master;
  179. /* IMX hookup */
  180. struct spi_imx_master *master_info;
  181. /* Memory resources and SPI regs virtual address */
  182. struct resource *ioarea;
  183. void __iomem *regs;
  184. /* SPI RX_DATA physical address */
  185. dma_addr_t rd_data_phys;
  186. /* Driver message queue */
  187. struct workqueue_struct *workqueue;
  188. struct work_struct work;
  189. spinlock_t lock;
  190. struct list_head queue;
  191. int busy;
  192. int run;
  193. /* Message Transfer pump */
  194. struct tasklet_struct pump_transfers;
  195. /* Current message, transfer and state */
  196. struct spi_message *cur_msg;
  197. struct spi_transfer *cur_transfer;
  198. struct chip_data *cur_chip;
  199. /* Rd / Wr buffers pointers */
  200. size_t len;
  201. void *tx;
  202. void *tx_end;
  203. void *rx;
  204. void *rx_end;
  205. u8 rd_only;
  206. u8 n_bytes;
  207. int cs_change;
  208. /* Function pointers */
  209. irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
  210. void (*cs_control)(u32 command);
  211. /* DMA setup */
  212. int rx_channel;
  213. int tx_channel;
  214. dma_addr_t rx_dma;
  215. dma_addr_t tx_dma;
  216. int rx_dma_needs_unmap;
  217. int tx_dma_needs_unmap;
  218. size_t tx_map_len;
  219. u32 dummy_dma_buf ____cacheline_aligned;
  220. };
  221. /* Runtime state */
  222. struct chip_data {
  223. u32 control;
  224. u32 period;
  225. u32 test;
  226. u8 enable_dma:1;
  227. u8 bits_per_word;
  228. u8 n_bytes;
  229. u32 max_speed_hz;
  230. void (*cs_control)(u32 command);
  231. };
  232. /*-------------------------------------------------------------------------*/
  233. static void pump_messages(struct work_struct *work);
  234. static int flush(struct driver_data *drv_data)
  235. {
  236. unsigned long limit = loops_per_jiffy << 1;
  237. void __iomem *regs = drv_data->regs;
  238. volatile u32 d;
  239. dev_dbg(&drv_data->pdev->dev, "flush\n");
  240. do {
  241. while (readl(regs + SPI_INT_STATUS) & SPI_STATUS_RR)
  242. d = readl(regs + SPI_RXDATA);
  243. } while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) && limit--);
  244. return limit;
  245. }
  246. static void restore_state(struct driver_data *drv_data)
  247. {
  248. void __iomem *regs = drv_data->regs;
  249. struct chip_data *chip = drv_data->cur_chip;
  250. /* Load chip registers */
  251. dev_dbg(&drv_data->pdev->dev,
  252. "restore_state\n"
  253. " test = 0x%08X\n"
  254. " control = 0x%08X\n",
  255. chip->test,
  256. chip->control);
  257. writel(chip->test, regs + SPI_TEST);
  258. writel(chip->period, regs + SPI_PERIOD);
  259. writel(0, regs + SPI_INT_STATUS);
  260. writel(chip->control, regs + SPI_CONTROL);
  261. }
  262. static void null_cs_control(u32 command)
  263. {
  264. }
  265. static inline u32 data_to_write(struct driver_data *drv_data)
  266. {
  267. return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
  268. }
  269. static inline u32 data_to_read(struct driver_data *drv_data)
  270. {
  271. return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
  272. }
  273. static int write(struct driver_data *drv_data)
  274. {
  275. void __iomem *regs = drv_data->regs;
  276. void *tx = drv_data->tx;
  277. void *tx_end = drv_data->tx_end;
  278. u8 n_bytes = drv_data->n_bytes;
  279. u32 remaining_writes;
  280. u32 fifo_avail_space;
  281. u32 n;
  282. u16 d;
  283. /* Compute how many fifo writes to do */
  284. remaining_writes = (u32)(tx_end - tx) / n_bytes;
  285. fifo_avail_space = SPI_FIFO_DEPTH -
  286. (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
  287. if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
  288. /* Fix misunderstood receive overflow */
  289. fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
  290. n = min(remaining_writes, fifo_avail_space);
  291. dev_dbg(&drv_data->pdev->dev,
  292. "write type %s\n"
  293. " remaining writes = %d\n"
  294. " fifo avail space = %d\n"
  295. " fifo writes = %d\n",
  296. (n_bytes == 1) ? "u8" : "u16",
  297. remaining_writes,
  298. fifo_avail_space,
  299. n);
  300. if (n > 0) {
  301. /* Fill SPI TXFIFO */
  302. if (drv_data->rd_only) {
  303. tx += n * n_bytes;
  304. while (n--)
  305. writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
  306. } else {
  307. if (n_bytes == 1) {
  308. while (n--) {
  309. d = *(u8*)tx;
  310. writel(d, regs + SPI_TXDATA);
  311. tx += 1;
  312. }
  313. } else {
  314. while (n--) {
  315. d = *(u16*)tx;
  316. writel(d, regs + SPI_TXDATA);
  317. tx += 2;
  318. }
  319. }
  320. }
  321. /* Trigger transfer */
  322. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  323. regs + SPI_CONTROL);
  324. /* Update tx pointer */
  325. drv_data->tx = tx;
  326. }
  327. return (tx >= tx_end);
  328. }
  329. static int read(struct driver_data *drv_data)
  330. {
  331. void __iomem *regs = drv_data->regs;
  332. void *rx = drv_data->rx;
  333. void *rx_end = drv_data->rx_end;
  334. u8 n_bytes = drv_data->n_bytes;
  335. u32 remaining_reads;
  336. u32 fifo_rxcnt;
  337. u32 n;
  338. u16 d;
  339. /* Compute how many fifo reads to do */
  340. remaining_reads = (u32)(rx_end - rx) / n_bytes;
  341. fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
  342. SPI_TEST_RXCNT_LSB;
  343. n = min(remaining_reads, fifo_rxcnt);
  344. dev_dbg(&drv_data->pdev->dev,
  345. "read type %s\n"
  346. " remaining reads = %d\n"
  347. " fifo rx count = %d\n"
  348. " fifo reads = %d\n",
  349. (n_bytes == 1) ? "u8" : "u16",
  350. remaining_reads,
  351. fifo_rxcnt,
  352. n);
  353. if (n > 0) {
  354. /* Read SPI RXFIFO */
  355. if (n_bytes == 1) {
  356. while (n--) {
  357. d = readl(regs + SPI_RXDATA);
  358. *((u8*)rx) = d;
  359. rx += 1;
  360. }
  361. } else {
  362. while (n--) {
  363. d = readl(regs + SPI_RXDATA);
  364. *((u16*)rx) = d;
  365. rx += 2;
  366. }
  367. }
  368. /* Update rx pointer */
  369. drv_data->rx = rx;
  370. }
  371. return (rx >= rx_end);
  372. }
  373. static void *next_transfer(struct driver_data *drv_data)
  374. {
  375. struct spi_message *msg = drv_data->cur_msg;
  376. struct spi_transfer *trans = drv_data->cur_transfer;
  377. /* Move to next transfer */
  378. if (trans->transfer_list.next != &msg->transfers) {
  379. drv_data->cur_transfer =
  380. list_entry(trans->transfer_list.next,
  381. struct spi_transfer,
  382. transfer_list);
  383. return RUNNING_STATE;
  384. }
  385. return DONE_STATE;
  386. }
  387. static int map_dma_buffers(struct driver_data *drv_data)
  388. {
  389. struct spi_message *msg;
  390. struct device *dev;
  391. void *buf;
  392. drv_data->rx_dma_needs_unmap = 0;
  393. drv_data->tx_dma_needs_unmap = 0;
  394. if (!drv_data->master_info->enable_dma ||
  395. !drv_data->cur_chip->enable_dma)
  396. return -1;
  397. msg = drv_data->cur_msg;
  398. dev = &msg->spi->dev;
  399. if (msg->is_dma_mapped) {
  400. if (drv_data->tx_dma)
  401. /* The caller provided at least dma and cpu virtual
  402. address for write; pump_transfers() will consider the
  403. transfer as write only if cpu rx virtual address is
  404. NULL */
  405. return 0;
  406. if (drv_data->rx_dma) {
  407. /* The caller provided dma and cpu virtual address to
  408. performe read only transfer -->
  409. use drv_data->dummy_dma_buf for dummy writes to
  410. achive reads */
  411. buf = &drv_data->dummy_dma_buf;
  412. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  413. drv_data->tx_dma = dma_map_single(dev,
  414. buf,
  415. drv_data->tx_map_len,
  416. DMA_TO_DEVICE);
  417. if (dma_mapping_error(drv_data->tx_dma))
  418. return -1;
  419. drv_data->tx_dma_needs_unmap = 1;
  420. /* Flags transfer as rd_only for pump_transfers() DMA
  421. regs programming (should be redundant) */
  422. drv_data->tx = NULL;
  423. return 0;
  424. }
  425. }
  426. if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
  427. return -1;
  428. /* NULL rx means write-only transfer and no map needed
  429. since rx DMA will not be used */
  430. if (drv_data->rx) {
  431. buf = drv_data->rx;
  432. drv_data->rx_dma = dma_map_single(
  433. dev,
  434. buf,
  435. drv_data->len,
  436. DMA_FROM_DEVICE);
  437. if (dma_mapping_error(drv_data->rx_dma))
  438. return -1;
  439. drv_data->rx_dma_needs_unmap = 1;
  440. }
  441. if (drv_data->tx == NULL) {
  442. /* Read only message --> use drv_data->dummy_dma_buf for dummy
  443. writes to achive reads */
  444. buf = &drv_data->dummy_dma_buf;
  445. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  446. } else {
  447. buf = drv_data->tx;
  448. drv_data->tx_map_len = drv_data->len;
  449. }
  450. drv_data->tx_dma = dma_map_single(dev,
  451. buf,
  452. drv_data->tx_map_len,
  453. DMA_TO_DEVICE);
  454. if (dma_mapping_error(drv_data->tx_dma)) {
  455. if (drv_data->rx_dma) {
  456. dma_unmap_single(dev,
  457. drv_data->rx_dma,
  458. drv_data->len,
  459. DMA_FROM_DEVICE);
  460. drv_data->rx_dma_needs_unmap = 0;
  461. }
  462. return -1;
  463. }
  464. drv_data->tx_dma_needs_unmap = 1;
  465. return 0;
  466. }
  467. static void unmap_dma_buffers(struct driver_data *drv_data)
  468. {
  469. struct spi_message *msg = drv_data->cur_msg;
  470. struct device *dev = &msg->spi->dev;
  471. if (drv_data->rx_dma_needs_unmap) {
  472. dma_unmap_single(dev,
  473. drv_data->rx_dma,
  474. drv_data->len,
  475. DMA_FROM_DEVICE);
  476. drv_data->rx_dma_needs_unmap = 0;
  477. }
  478. if (drv_data->tx_dma_needs_unmap) {
  479. dma_unmap_single(dev,
  480. drv_data->tx_dma,
  481. drv_data->tx_map_len,
  482. DMA_TO_DEVICE);
  483. drv_data->tx_dma_needs_unmap = 0;
  484. }
  485. }
  486. /* Caller already set message->status (dma is already blocked) */
  487. static void giveback(struct spi_message *message, struct driver_data *drv_data)
  488. {
  489. void __iomem *regs = drv_data->regs;
  490. /* Bring SPI to sleep; restore_state() and pump_transfer()
  491. will do new setup */
  492. writel(0, regs + SPI_INT_STATUS);
  493. writel(0, regs + SPI_DMA);
  494. drv_data->cs_control(SPI_CS_DEASSERT);
  495. message->state = NULL;
  496. if (message->complete)
  497. message->complete(message->context);
  498. drv_data->cur_msg = NULL;
  499. drv_data->cur_transfer = NULL;
  500. drv_data->cur_chip = NULL;
  501. queue_work(drv_data->workqueue, &drv_data->work);
  502. }
  503. static void dma_err_handler(int channel, void *data, int errcode)
  504. {
  505. struct driver_data *drv_data = data;
  506. struct spi_message *msg = drv_data->cur_msg;
  507. dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
  508. /* Disable both rx and tx dma channels */
  509. imx_dma_disable(drv_data->rx_channel);
  510. imx_dma_disable(drv_data->tx_channel);
  511. if (flush(drv_data) == 0)
  512. dev_err(&drv_data->pdev->dev,
  513. "dma_err_handler - flush failed\n");
  514. unmap_dma_buffers(drv_data);
  515. msg->state = ERROR_STATE;
  516. tasklet_schedule(&drv_data->pump_transfers);
  517. }
  518. static void dma_tx_handler(int channel, void *data)
  519. {
  520. struct driver_data *drv_data = data;
  521. dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
  522. imx_dma_disable(channel);
  523. /* Now waits for TX FIFO empty */
  524. writel(readl(drv_data->regs + SPI_INT_STATUS) | SPI_INTEN_TE,
  525. drv_data->regs + SPI_INT_STATUS);
  526. }
  527. static irqreturn_t dma_transfer(struct driver_data *drv_data)
  528. {
  529. u32 status;
  530. struct spi_message *msg = drv_data->cur_msg;
  531. void __iomem *regs = drv_data->regs;
  532. unsigned long limit;
  533. status = readl(regs + SPI_INT_STATUS);
  534. if ((status & SPI_INTEN_RO) && (status & SPI_STATUS_RO)) {
  535. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  536. imx_dma_disable(drv_data->rx_channel);
  537. unmap_dma_buffers(drv_data);
  538. if (flush(drv_data) == 0)
  539. dev_err(&drv_data->pdev->dev,
  540. "dma_transfer - flush failed\n");
  541. dev_warn(&drv_data->pdev->dev,
  542. "dma_transfer - fifo overun\n");
  543. msg->state = ERROR_STATE;
  544. tasklet_schedule(&drv_data->pump_transfers);
  545. return IRQ_HANDLED;
  546. }
  547. if (status & SPI_STATUS_TE) {
  548. writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
  549. if (drv_data->rx) {
  550. /* Wait end of transfer before read trailing data */
  551. limit = loops_per_jiffy << 1;
  552. while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) &&
  553. limit--);
  554. if (limit == 0)
  555. dev_err(&drv_data->pdev->dev,
  556. "dma_transfer - end of tx failed\n");
  557. else
  558. dev_dbg(&drv_data->pdev->dev,
  559. "dma_transfer - end of tx\n");
  560. imx_dma_disable(drv_data->rx_channel);
  561. unmap_dma_buffers(drv_data);
  562. /* Calculate number of trailing data and read them */
  563. dev_dbg(&drv_data->pdev->dev,
  564. "dma_transfer - test = 0x%08X\n",
  565. readl(regs + SPI_TEST));
  566. drv_data->rx = drv_data->rx_end -
  567. ((readl(regs + SPI_TEST) &
  568. SPI_TEST_RXCNT) >>
  569. SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
  570. read(drv_data);
  571. } else {
  572. /* Write only transfer */
  573. unmap_dma_buffers(drv_data);
  574. if (flush(drv_data) == 0)
  575. dev_err(&drv_data->pdev->dev,
  576. "dma_transfer - flush failed\n");
  577. }
  578. /* End of transfer, update total byte transfered */
  579. msg->actual_length += drv_data->len;
  580. /* Release chip select if requested, transfer delays are
  581. handled in pump_transfers() */
  582. if (drv_data->cs_change)
  583. drv_data->cs_control(SPI_CS_DEASSERT);
  584. /* Move to next transfer */
  585. msg->state = next_transfer(drv_data);
  586. /* Schedule transfer tasklet */
  587. tasklet_schedule(&drv_data->pump_transfers);
  588. return IRQ_HANDLED;
  589. }
  590. /* Opps problem detected */
  591. return IRQ_NONE;
  592. }
  593. static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
  594. {
  595. struct spi_message *msg = drv_data->cur_msg;
  596. void __iomem *regs = drv_data->regs;
  597. u32 status;
  598. irqreturn_t handled = IRQ_NONE;
  599. status = readl(regs + SPI_INT_STATUS);
  600. while (status & SPI_STATUS_TH) {
  601. dev_dbg(&drv_data->pdev->dev,
  602. "interrupt_wronly_transfer - status = 0x%08X\n", status);
  603. /* Pump data */
  604. if (write(drv_data)) {
  605. writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
  606. regs + SPI_INT_STATUS);
  607. dev_dbg(&drv_data->pdev->dev,
  608. "interrupt_wronly_transfer - end of tx\n");
  609. if (flush(drv_data) == 0)
  610. dev_err(&drv_data->pdev->dev,
  611. "interrupt_wronly_transfer - "
  612. "flush failed\n");
  613. /* End of transfer, update total byte transfered */
  614. msg->actual_length += drv_data->len;
  615. /* Release chip select if requested, transfer delays are
  616. handled in pump_transfers */
  617. if (drv_data->cs_change)
  618. drv_data->cs_control(SPI_CS_DEASSERT);
  619. /* Move to next transfer */
  620. msg->state = next_transfer(drv_data);
  621. /* Schedule transfer tasklet */
  622. tasklet_schedule(&drv_data->pump_transfers);
  623. return IRQ_HANDLED;
  624. }
  625. status = readl(regs + SPI_INT_STATUS);
  626. /* We did something */
  627. handled = IRQ_HANDLED;
  628. }
  629. return handled;
  630. }
  631. static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
  632. {
  633. struct spi_message *msg = drv_data->cur_msg;
  634. void __iomem *regs = drv_data->regs;
  635. u32 status;
  636. irqreturn_t handled = IRQ_NONE;
  637. unsigned long limit;
  638. status = readl(regs + SPI_INT_STATUS);
  639. while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
  640. dev_dbg(&drv_data->pdev->dev,
  641. "interrupt_transfer - status = 0x%08X\n", status);
  642. if (status & SPI_STATUS_RO) {
  643. writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
  644. regs + SPI_INT_STATUS);
  645. dev_warn(&drv_data->pdev->dev,
  646. "interrupt_transfer - fifo overun\n"
  647. " data not yet written = %d\n"
  648. " data not yet read = %d\n",
  649. data_to_write(drv_data),
  650. data_to_read(drv_data));
  651. if (flush(drv_data) == 0)
  652. dev_err(&drv_data->pdev->dev,
  653. "interrupt_transfer - flush failed\n");
  654. msg->state = ERROR_STATE;
  655. tasklet_schedule(&drv_data->pump_transfers);
  656. return IRQ_HANDLED;
  657. }
  658. /* Pump data */
  659. read(drv_data);
  660. if (write(drv_data)) {
  661. writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
  662. regs + SPI_INT_STATUS);
  663. dev_dbg(&drv_data->pdev->dev,
  664. "interrupt_transfer - end of tx\n");
  665. /* Read trailing bytes */
  666. limit = loops_per_jiffy << 1;
  667. while ((read(drv_data) == 0) && limit--);
  668. if (limit == 0)
  669. dev_err(&drv_data->pdev->dev,
  670. "interrupt_transfer - "
  671. "trailing byte read failed\n");
  672. else
  673. dev_dbg(&drv_data->pdev->dev,
  674. "interrupt_transfer - end of rx\n");
  675. /* End of transfer, update total byte transfered */
  676. msg->actual_length += drv_data->len;
  677. /* Release chip select if requested, transfer delays are
  678. handled in pump_transfers */
  679. if (drv_data->cs_change)
  680. drv_data->cs_control(SPI_CS_DEASSERT);
  681. /* Move to next transfer */
  682. msg->state = next_transfer(drv_data);
  683. /* Schedule transfer tasklet */
  684. tasklet_schedule(&drv_data->pump_transfers);
  685. return IRQ_HANDLED;
  686. }
  687. status = readl(regs + SPI_INT_STATUS);
  688. /* We did something */
  689. handled = IRQ_HANDLED;
  690. }
  691. return handled;
  692. }
  693. static irqreturn_t spi_int(int irq, void *dev_id)
  694. {
  695. struct driver_data *drv_data = (struct driver_data *)dev_id;
  696. if (!drv_data->cur_msg) {
  697. dev_err(&drv_data->pdev->dev,
  698. "spi_int - bad message state\n");
  699. /* Never fail */
  700. return IRQ_HANDLED;
  701. }
  702. return drv_data->transfer_handler(drv_data);
  703. }
  704. static inline u32 spi_speed_hz(u32 data_rate)
  705. {
  706. return imx_get_perclk2() / (4 << ((data_rate) >> 13));
  707. }
  708. static u32 spi_data_rate(u32 speed_hz)
  709. {
  710. u32 div;
  711. u32 quantized_hz = imx_get_perclk2() >> 2;
  712. for (div = SPI_PERCLK2_DIV_MIN;
  713. div <= SPI_PERCLK2_DIV_MAX;
  714. div++, quantized_hz >>= 1) {
  715. if (quantized_hz <= speed_hz)
  716. /* Max available speed LEQ required speed */
  717. return div << 13;
  718. }
  719. return SPI_CONTROL_DATARATE_BAD;
  720. }
  721. static void pump_transfers(unsigned long data)
  722. {
  723. struct driver_data *drv_data = (struct driver_data *)data;
  724. struct spi_message *message;
  725. struct spi_transfer *transfer, *previous;
  726. struct chip_data *chip;
  727. void __iomem *regs;
  728. u32 tmp, control;
  729. dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
  730. message = drv_data->cur_msg;
  731. /* Handle for abort */
  732. if (message->state == ERROR_STATE) {
  733. message->status = -EIO;
  734. giveback(message, drv_data);
  735. return;
  736. }
  737. /* Handle end of message */
  738. if (message->state == DONE_STATE) {
  739. message->status = 0;
  740. giveback(message, drv_data);
  741. return;
  742. }
  743. chip = drv_data->cur_chip;
  744. /* Delay if requested at end of transfer*/
  745. transfer = drv_data->cur_transfer;
  746. if (message->state == RUNNING_STATE) {
  747. previous = list_entry(transfer->transfer_list.prev,
  748. struct spi_transfer,
  749. transfer_list);
  750. if (previous->delay_usecs)
  751. udelay(previous->delay_usecs);
  752. } else {
  753. /* START_STATE */
  754. message->state = RUNNING_STATE;
  755. drv_data->cs_control = chip->cs_control;
  756. }
  757. transfer = drv_data->cur_transfer;
  758. drv_data->tx = (void *)transfer->tx_buf;
  759. drv_data->tx_end = drv_data->tx + transfer->len;
  760. drv_data->rx = transfer->rx_buf;
  761. drv_data->rx_end = drv_data->rx + transfer->len;
  762. drv_data->rx_dma = transfer->rx_dma;
  763. drv_data->tx_dma = transfer->tx_dma;
  764. drv_data->len = transfer->len;
  765. drv_data->cs_change = transfer->cs_change;
  766. drv_data->rd_only = (drv_data->tx == NULL);
  767. regs = drv_data->regs;
  768. control = readl(regs + SPI_CONTROL);
  769. /* Bits per word setup */
  770. tmp = transfer->bits_per_word;
  771. if (tmp == 0) {
  772. /* Use device setup */
  773. tmp = chip->bits_per_word;
  774. drv_data->n_bytes = chip->n_bytes;
  775. } else
  776. /* Use per-transfer setup */
  777. drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
  778. u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  779. /* Speed setup (surely valid because already checked) */
  780. tmp = transfer->speed_hz;
  781. if (tmp == 0)
  782. tmp = chip->max_speed_hz;
  783. tmp = spi_data_rate(tmp);
  784. u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
  785. writel(control, regs + SPI_CONTROL);
  786. /* Assert device chip-select */
  787. drv_data->cs_control(SPI_CS_ASSERT);
  788. /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
  789. if bits_per_word is less or equal 8 PIO transfers are performed.
  790. Moreover DMA is convinient for transfer length bigger than FIFOs
  791. byte size. */
  792. if ((drv_data->n_bytes == 2) &&
  793. (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
  794. (map_dma_buffers(drv_data) == 0)) {
  795. dev_dbg(&drv_data->pdev->dev,
  796. "pump dma transfer\n"
  797. " tx = %p\n"
  798. " tx_dma = %08X\n"
  799. " rx = %p\n"
  800. " rx_dma = %08X\n"
  801. " len = %d\n",
  802. drv_data->tx,
  803. (unsigned int)drv_data->tx_dma,
  804. drv_data->rx,
  805. (unsigned int)drv_data->rx_dma,
  806. drv_data->len);
  807. /* Ensure we have the correct interrupt handler */
  808. drv_data->transfer_handler = dma_transfer;
  809. /* Trigger transfer */
  810. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  811. regs + SPI_CONTROL);
  812. /* Setup tx DMA */
  813. if (drv_data->tx)
  814. /* Linear source address */
  815. CCR(drv_data->tx_channel) =
  816. CCR_DMOD_FIFO |
  817. CCR_SMOD_LINEAR |
  818. CCR_SSIZ_32 | CCR_DSIZ_16 |
  819. CCR_REN;
  820. else
  821. /* Read only transfer -> fixed source address for
  822. dummy write to achive read */
  823. CCR(drv_data->tx_channel) =
  824. CCR_DMOD_FIFO |
  825. CCR_SMOD_FIFO |
  826. CCR_SSIZ_32 | CCR_DSIZ_16 |
  827. CCR_REN;
  828. imx_dma_setup_single(
  829. drv_data->tx_channel,
  830. drv_data->tx_dma,
  831. drv_data->len,
  832. drv_data->rd_data_phys + 4,
  833. DMA_MODE_WRITE);
  834. if (drv_data->rx) {
  835. /* Setup rx DMA for linear destination address */
  836. CCR(drv_data->rx_channel) =
  837. CCR_DMOD_LINEAR |
  838. CCR_SMOD_FIFO |
  839. CCR_DSIZ_32 | CCR_SSIZ_16 |
  840. CCR_REN;
  841. imx_dma_setup_single(
  842. drv_data->rx_channel,
  843. drv_data->rx_dma,
  844. drv_data->len,
  845. drv_data->rd_data_phys,
  846. DMA_MODE_READ);
  847. imx_dma_enable(drv_data->rx_channel);
  848. /* Enable SPI interrupt */
  849. writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
  850. /* Set SPI to request DMA service on both
  851. Rx and Tx half fifo watermark */
  852. writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
  853. } else
  854. /* Write only access -> set SPI to request DMA
  855. service on Tx half fifo watermark */
  856. writel(SPI_DMA_THDEN, regs + SPI_DMA);
  857. imx_dma_enable(drv_data->tx_channel);
  858. } else {
  859. dev_dbg(&drv_data->pdev->dev,
  860. "pump pio transfer\n"
  861. " tx = %p\n"
  862. " rx = %p\n"
  863. " len = %d\n",
  864. drv_data->tx,
  865. drv_data->rx,
  866. drv_data->len);
  867. /* Ensure we have the correct interrupt handler */
  868. if (drv_data->rx)
  869. drv_data->transfer_handler = interrupt_transfer;
  870. else
  871. drv_data->transfer_handler = interrupt_wronly_transfer;
  872. /* Enable SPI interrupt */
  873. if (drv_data->rx)
  874. writel(SPI_INTEN_TH | SPI_INTEN_RO,
  875. regs + SPI_INT_STATUS);
  876. else
  877. writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
  878. }
  879. }
  880. static void pump_messages(struct work_struct *work)
  881. {
  882. struct driver_data *drv_data =
  883. container_of(work, struct driver_data, work);
  884. unsigned long flags;
  885. /* Lock queue and check for queue work */
  886. spin_lock_irqsave(&drv_data->lock, flags);
  887. if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
  888. drv_data->busy = 0;
  889. spin_unlock_irqrestore(&drv_data->lock, flags);
  890. return;
  891. }
  892. /* Make sure we are not already running a message */
  893. if (drv_data->cur_msg) {
  894. spin_unlock_irqrestore(&drv_data->lock, flags);
  895. return;
  896. }
  897. /* Extract head of queue */
  898. drv_data->cur_msg = list_entry(drv_data->queue.next,
  899. struct spi_message, queue);
  900. list_del_init(&drv_data->cur_msg->queue);
  901. drv_data->busy = 1;
  902. spin_unlock_irqrestore(&drv_data->lock, flags);
  903. /* Initial message state */
  904. drv_data->cur_msg->state = START_STATE;
  905. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  906. struct spi_transfer,
  907. transfer_list);
  908. /* Setup the SPI using the per chip configuration */
  909. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  910. restore_state(drv_data);
  911. /* Mark as busy and launch transfers */
  912. tasklet_schedule(&drv_data->pump_transfers);
  913. }
  914. static int transfer(struct spi_device *spi, struct spi_message *msg)
  915. {
  916. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  917. u32 min_speed_hz, max_speed_hz, tmp;
  918. struct spi_transfer *trans;
  919. unsigned long flags;
  920. msg->actual_length = 0;
  921. /* Per transfer setup check */
  922. min_speed_hz = spi_speed_hz(SPI_CONTROL_DATARATE_MIN);
  923. max_speed_hz = spi->max_speed_hz;
  924. list_for_each_entry(trans, &msg->transfers, transfer_list) {
  925. tmp = trans->bits_per_word;
  926. if (tmp > 16) {
  927. dev_err(&drv_data->pdev->dev,
  928. "message rejected : "
  929. "invalid transfer bits_per_word (%d bits)\n",
  930. tmp);
  931. goto msg_rejected;
  932. }
  933. tmp = trans->speed_hz;
  934. if (tmp) {
  935. if (tmp < min_speed_hz) {
  936. dev_err(&drv_data->pdev->dev,
  937. "message rejected : "
  938. "device min speed (%d Hz) exceeds "
  939. "required transfer speed (%d Hz)\n",
  940. min_speed_hz,
  941. tmp);
  942. goto msg_rejected;
  943. } else if (tmp > max_speed_hz) {
  944. dev_err(&drv_data->pdev->dev,
  945. "message rejected : "
  946. "transfer speed (%d Hz) exceeds "
  947. "device max speed (%d Hz)\n",
  948. tmp,
  949. max_speed_hz);
  950. goto msg_rejected;
  951. }
  952. }
  953. }
  954. /* Message accepted */
  955. msg->status = -EINPROGRESS;
  956. msg->state = START_STATE;
  957. spin_lock_irqsave(&drv_data->lock, flags);
  958. if (drv_data->run == QUEUE_STOPPED) {
  959. spin_unlock_irqrestore(&drv_data->lock, flags);
  960. return -ESHUTDOWN;
  961. }
  962. list_add_tail(&msg->queue, &drv_data->queue);
  963. if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
  964. queue_work(drv_data->workqueue, &drv_data->work);
  965. spin_unlock_irqrestore(&drv_data->lock, flags);
  966. return 0;
  967. msg_rejected:
  968. /* Message rejected and not queued */
  969. msg->status = -EINVAL;
  970. msg->state = ERROR_STATE;
  971. if (msg->complete)
  972. msg->complete(msg->context);
  973. return -EINVAL;
  974. }
  975. /* On first setup bad values must free chip_data memory since will cause
  976. spi_new_device to fail. Bad value setup from protocol driver are simply not
  977. applied and notified to the calling driver. */
  978. static int setup(struct spi_device *spi)
  979. {
  980. struct spi_imx_chip *chip_info;
  981. struct chip_data *chip;
  982. int first_setup = 0;
  983. u32 tmp;
  984. int status = 0;
  985. /* Get controller data */
  986. chip_info = spi->controller_data;
  987. /* Get controller_state */
  988. chip = spi_get_ctldata(spi);
  989. if (chip == NULL) {
  990. first_setup = 1;
  991. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  992. if (!chip) {
  993. dev_err(&spi->dev,
  994. "setup - cannot allocate controller state");
  995. return -ENOMEM;
  996. }
  997. chip->control = SPI_DEFAULT_CONTROL;
  998. if (chip_info == NULL) {
  999. /* spi_board_info.controller_data not is supplied */
  1000. chip_info = kzalloc(sizeof(struct spi_imx_chip),
  1001. GFP_KERNEL);
  1002. if (!chip_info) {
  1003. dev_err(&spi->dev,
  1004. "setup - "
  1005. "cannot allocate controller data");
  1006. status = -ENOMEM;
  1007. goto err_first_setup;
  1008. }
  1009. /* Set controller data default value */
  1010. chip_info->enable_loopback =
  1011. SPI_DEFAULT_ENABLE_LOOPBACK;
  1012. chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
  1013. chip_info->ins_ss_pulse = 1;
  1014. chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
  1015. chip_info->cs_control = null_cs_control;
  1016. }
  1017. }
  1018. /* Now set controller state based on controller data */
  1019. if (first_setup) {
  1020. /* SPI loopback */
  1021. if (chip_info->enable_loopback)
  1022. chip->test = SPI_TEST_LBC;
  1023. else
  1024. chip->test = 0;
  1025. /* SPI dma driven */
  1026. chip->enable_dma = chip_info->enable_dma;
  1027. /* SPI /SS pulse between spi burst */
  1028. if (chip_info->ins_ss_pulse)
  1029. u32_EDIT(chip->control,
  1030. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
  1031. else
  1032. u32_EDIT(chip->control,
  1033. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
  1034. /* SPI bclk waits between each bits_per_word spi burst */
  1035. if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
  1036. dev_err(&spi->dev,
  1037. "setup - "
  1038. "bclk_wait exceeds max allowed (%d)\n",
  1039. SPI_PERIOD_MAX_WAIT);
  1040. goto err_first_setup;
  1041. }
  1042. chip->period = SPI_PERIOD_CSRC_BCLK |
  1043. (chip_info->bclk_wait & SPI_PERIOD_WAIT);
  1044. }
  1045. /* SPI mode */
  1046. tmp = spi->mode;
  1047. if (tmp & SPI_LSB_FIRST) {
  1048. status = -EINVAL;
  1049. if (first_setup) {
  1050. dev_err(&spi->dev,
  1051. "setup - "
  1052. "HW doesn't support LSB first transfer\n");
  1053. goto err_first_setup;
  1054. } else {
  1055. dev_err(&spi->dev,
  1056. "setup - "
  1057. "HW doesn't support LSB first transfer, "
  1058. "default to MSB first\n");
  1059. spi->mode &= ~SPI_LSB_FIRST;
  1060. }
  1061. }
  1062. if (tmp & SPI_CS_HIGH) {
  1063. u32_EDIT(chip->control,
  1064. SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
  1065. }
  1066. switch (tmp & SPI_MODE_3) {
  1067. case SPI_MODE_0:
  1068. tmp = 0;
  1069. break;
  1070. case SPI_MODE_1:
  1071. tmp = SPI_CONTROL_PHA_1;
  1072. break;
  1073. case SPI_MODE_2:
  1074. tmp = SPI_CONTROL_POL_ACT_LOW;
  1075. break;
  1076. default:
  1077. /* SPI_MODE_3 */
  1078. tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
  1079. break;
  1080. }
  1081. u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
  1082. /* SPI word width */
  1083. tmp = spi->bits_per_word;
  1084. if (tmp == 0) {
  1085. tmp = 8;
  1086. spi->bits_per_word = 8;
  1087. } else if (tmp > 16) {
  1088. status = -EINVAL;
  1089. dev_err(&spi->dev,
  1090. "setup - "
  1091. "invalid bits_per_word (%d)\n",
  1092. tmp);
  1093. if (first_setup)
  1094. goto err_first_setup;
  1095. else {
  1096. /* Undo setup using chip as backup copy */
  1097. tmp = chip->bits_per_word;
  1098. spi->bits_per_word = tmp;
  1099. }
  1100. }
  1101. chip->bits_per_word = tmp;
  1102. u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  1103. chip->n_bytes = (tmp <= 8) ? 1 : 2;
  1104. /* SPI datarate */
  1105. tmp = spi_data_rate(spi->max_speed_hz);
  1106. if (tmp == SPI_CONTROL_DATARATE_BAD) {
  1107. status = -EINVAL;
  1108. dev_err(&spi->dev,
  1109. "setup - "
  1110. "HW min speed (%d Hz) exceeds required "
  1111. "max speed (%d Hz)\n",
  1112. spi_speed_hz(SPI_CONTROL_DATARATE_MIN),
  1113. spi->max_speed_hz);
  1114. if (first_setup)
  1115. goto err_first_setup;
  1116. else
  1117. /* Undo setup using chip as backup copy */
  1118. spi->max_speed_hz = chip->max_speed_hz;
  1119. } else {
  1120. u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
  1121. /* Actual rounded max_speed_hz */
  1122. tmp = spi_speed_hz(tmp);
  1123. spi->max_speed_hz = tmp;
  1124. chip->max_speed_hz = tmp;
  1125. }
  1126. /* SPI chip-select management */
  1127. if (chip_info->cs_control)
  1128. chip->cs_control = chip_info->cs_control;
  1129. else
  1130. chip->cs_control = null_cs_control;
  1131. /* Save controller_state */
  1132. spi_set_ctldata(spi, chip);
  1133. /* Summary */
  1134. dev_dbg(&spi->dev,
  1135. "setup succeded\n"
  1136. " loopback enable = %s\n"
  1137. " dma enable = %s\n"
  1138. " insert /ss pulse = %s\n"
  1139. " period wait = %d\n"
  1140. " mode = %d\n"
  1141. " bits per word = %d\n"
  1142. " min speed = %d Hz\n"
  1143. " rounded max speed = %d Hz\n",
  1144. chip->test & SPI_TEST_LBC ? "Yes" : "No",
  1145. chip->enable_dma ? "Yes" : "No",
  1146. chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
  1147. chip->period & SPI_PERIOD_WAIT,
  1148. spi->mode,
  1149. spi->bits_per_word,
  1150. spi_speed_hz(SPI_CONTROL_DATARATE_MIN),
  1151. spi->max_speed_hz);
  1152. err_first_setup:
  1153. kfree(chip);
  1154. return status;
  1155. }
  1156. static void cleanup(struct spi_device *spi)
  1157. {
  1158. kfree(spi_get_ctldata(spi));
  1159. }
  1160. static int init_queue(struct driver_data *drv_data)
  1161. {
  1162. INIT_LIST_HEAD(&drv_data->queue);
  1163. spin_lock_init(&drv_data->lock);
  1164. drv_data->run = QUEUE_STOPPED;
  1165. drv_data->busy = 0;
  1166. tasklet_init(&drv_data->pump_transfers,
  1167. pump_transfers, (unsigned long)drv_data);
  1168. INIT_WORK(&drv_data->work, pump_messages);
  1169. drv_data->workqueue = create_singlethread_workqueue(
  1170. drv_data->master->cdev.dev->bus_id);
  1171. if (drv_data->workqueue == NULL)
  1172. return -EBUSY;
  1173. return 0;
  1174. }
  1175. static int start_queue(struct driver_data *drv_data)
  1176. {
  1177. unsigned long flags;
  1178. spin_lock_irqsave(&drv_data->lock, flags);
  1179. if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
  1180. spin_unlock_irqrestore(&drv_data->lock, flags);
  1181. return -EBUSY;
  1182. }
  1183. drv_data->run = QUEUE_RUNNING;
  1184. drv_data->cur_msg = NULL;
  1185. drv_data->cur_transfer = NULL;
  1186. drv_data->cur_chip = NULL;
  1187. spin_unlock_irqrestore(&drv_data->lock, flags);
  1188. queue_work(drv_data->workqueue, &drv_data->work);
  1189. return 0;
  1190. }
  1191. static int stop_queue(struct driver_data *drv_data)
  1192. {
  1193. unsigned long flags;
  1194. unsigned limit = 500;
  1195. int status = 0;
  1196. spin_lock_irqsave(&drv_data->lock, flags);
  1197. /* This is a bit lame, but is optimized for the common execution path.
  1198. * A wait_queue on the drv_data->busy could be used, but then the common
  1199. * execution path (pump_messages) would be required to call wake_up or
  1200. * friends on every SPI message. Do this instead */
  1201. drv_data->run = QUEUE_STOPPED;
  1202. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  1203. spin_unlock_irqrestore(&drv_data->lock, flags);
  1204. msleep(10);
  1205. spin_lock_irqsave(&drv_data->lock, flags);
  1206. }
  1207. if (!list_empty(&drv_data->queue) || drv_data->busy)
  1208. status = -EBUSY;
  1209. spin_unlock_irqrestore(&drv_data->lock, flags);
  1210. return status;
  1211. }
  1212. static int destroy_queue(struct driver_data *drv_data)
  1213. {
  1214. int status;
  1215. status = stop_queue(drv_data);
  1216. if (status != 0)
  1217. return status;
  1218. if (drv_data->workqueue)
  1219. destroy_workqueue(drv_data->workqueue);
  1220. return 0;
  1221. }
  1222. static int spi_imx_probe(struct platform_device *pdev)
  1223. {
  1224. struct device *dev = &pdev->dev;
  1225. struct spi_imx_master *platform_info;
  1226. struct spi_master *master;
  1227. struct driver_data *drv_data = NULL;
  1228. struct resource *res;
  1229. int irq, status = 0;
  1230. platform_info = dev->platform_data;
  1231. if (platform_info == NULL) {
  1232. dev_err(&pdev->dev, "probe - no platform data supplied\n");
  1233. status = -ENODEV;
  1234. goto err_no_pdata;
  1235. }
  1236. /* Allocate master with space for drv_data */
  1237. master = spi_alloc_master(dev, sizeof(struct driver_data));
  1238. if (!master) {
  1239. dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
  1240. status = -ENOMEM;
  1241. goto err_no_mem;
  1242. }
  1243. drv_data = spi_master_get_devdata(master);
  1244. drv_data->master = master;
  1245. drv_data->master_info = platform_info;
  1246. drv_data->pdev = pdev;
  1247. master->bus_num = pdev->id;
  1248. master->num_chipselect = platform_info->num_chipselect;
  1249. master->cleanup = cleanup;
  1250. master->setup = setup;
  1251. master->transfer = transfer;
  1252. drv_data->dummy_dma_buf = SPI_DUMMY_u32;
  1253. /* Find and map resources */
  1254. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1255. if (!res) {
  1256. dev_err(&pdev->dev, "probe - MEM resources not defined\n");
  1257. status = -ENODEV;
  1258. goto err_no_iores;
  1259. }
  1260. drv_data->ioarea = request_mem_region(res->start,
  1261. res->end - res->start + 1,
  1262. pdev->name);
  1263. if (drv_data->ioarea == NULL) {
  1264. dev_err(&pdev->dev, "probe - cannot reserve region\n");
  1265. status = -ENXIO;
  1266. goto err_no_iores;
  1267. }
  1268. drv_data->regs = ioremap(res->start, res->end - res->start + 1);
  1269. if (drv_data->regs == NULL) {
  1270. dev_err(&pdev->dev, "probe - cannot map IO\n");
  1271. status = -ENXIO;
  1272. goto err_no_iomap;
  1273. }
  1274. drv_data->rd_data_phys = (dma_addr_t)res->start;
  1275. /* Attach to IRQ */
  1276. irq = platform_get_irq(pdev, 0);
  1277. if (irq < 0) {
  1278. dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
  1279. status = -ENODEV;
  1280. goto err_no_irqres;
  1281. }
  1282. status = request_irq(irq, spi_int, IRQF_DISABLED, dev->bus_id, drv_data);
  1283. if (status < 0) {
  1284. dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
  1285. goto err_no_irqres;
  1286. }
  1287. /* Setup DMA if requested */
  1288. drv_data->tx_channel = -1;
  1289. drv_data->rx_channel = -1;
  1290. if (platform_info->enable_dma) {
  1291. /* Get rx DMA channel */
  1292. status = imx_dma_request_by_prio(&drv_data->rx_channel,
  1293. "spi_imx_rx", DMA_PRIO_HIGH);
  1294. if (status < 0) {
  1295. dev_err(dev,
  1296. "probe - problem (%d) requesting rx channel\n",
  1297. status);
  1298. goto err_no_rxdma;
  1299. } else
  1300. imx_dma_setup_handlers(drv_data->rx_channel, NULL,
  1301. dma_err_handler, drv_data);
  1302. /* Get tx DMA channel */
  1303. status = imx_dma_request_by_prio(&drv_data->tx_channel,
  1304. "spi_imx_tx", DMA_PRIO_MEDIUM);
  1305. if (status < 0) {
  1306. dev_err(dev,
  1307. "probe - problem (%d) requesting tx channel\n",
  1308. status);
  1309. imx_dma_free(drv_data->rx_channel);
  1310. goto err_no_txdma;
  1311. } else
  1312. imx_dma_setup_handlers(drv_data->tx_channel,
  1313. dma_tx_handler, dma_err_handler,
  1314. drv_data);
  1315. /* Set request source and burst length for allocated channels */
  1316. switch (drv_data->pdev->id) {
  1317. case 1:
  1318. /* Using SPI1 */
  1319. RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
  1320. RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
  1321. break;
  1322. case 2:
  1323. /* Using SPI2 */
  1324. RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
  1325. RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
  1326. break;
  1327. default:
  1328. dev_err(dev, "probe - bad SPI Id\n");
  1329. imx_dma_free(drv_data->rx_channel);
  1330. imx_dma_free(drv_data->tx_channel);
  1331. status = -ENODEV;
  1332. goto err_no_devid;
  1333. }
  1334. BLR(drv_data->rx_channel) = SPI_DMA_BLR;
  1335. BLR(drv_data->tx_channel) = SPI_DMA_BLR;
  1336. }
  1337. /* Load default SPI configuration */
  1338. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1339. writel(0, drv_data->regs + SPI_RESET);
  1340. writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
  1341. /* Initial and start queue */
  1342. status = init_queue(drv_data);
  1343. if (status != 0) {
  1344. dev_err(&pdev->dev, "probe - problem initializing queue\n");
  1345. goto err_init_queue;
  1346. }
  1347. status = start_queue(drv_data);
  1348. if (status != 0) {
  1349. dev_err(&pdev->dev, "probe - problem starting queue\n");
  1350. goto err_start_queue;
  1351. }
  1352. /* Register with the SPI framework */
  1353. platform_set_drvdata(pdev, drv_data);
  1354. status = spi_register_master(master);
  1355. if (status != 0) {
  1356. dev_err(&pdev->dev, "probe - problem registering spi master\n");
  1357. goto err_spi_register;
  1358. }
  1359. dev_dbg(dev, "probe succeded\n");
  1360. return 0;
  1361. err_init_queue:
  1362. err_start_queue:
  1363. err_spi_register:
  1364. destroy_queue(drv_data);
  1365. err_no_rxdma:
  1366. err_no_txdma:
  1367. err_no_devid:
  1368. free_irq(irq, drv_data);
  1369. err_no_irqres:
  1370. iounmap(drv_data->regs);
  1371. err_no_iomap:
  1372. release_resource(drv_data->ioarea);
  1373. kfree(drv_data->ioarea);
  1374. err_no_iores:
  1375. spi_master_put(master);
  1376. err_no_pdata:
  1377. err_no_mem:
  1378. return status;
  1379. }
  1380. static int __devexit spi_imx_remove(struct platform_device *pdev)
  1381. {
  1382. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1383. int irq;
  1384. int status = 0;
  1385. if (!drv_data)
  1386. return 0;
  1387. tasklet_kill(&drv_data->pump_transfers);
  1388. /* Remove the queue */
  1389. status = destroy_queue(drv_data);
  1390. if (status != 0) {
  1391. dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
  1392. return status;
  1393. }
  1394. /* Reset SPI */
  1395. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1396. writel(0, drv_data->regs + SPI_RESET);
  1397. /* Release DMA */
  1398. if (drv_data->master_info->enable_dma) {
  1399. RSSR(drv_data->rx_channel) = 0;
  1400. RSSR(drv_data->tx_channel) = 0;
  1401. imx_dma_free(drv_data->tx_channel);
  1402. imx_dma_free(drv_data->rx_channel);
  1403. }
  1404. /* Release IRQ */
  1405. irq = platform_get_irq(pdev, 0);
  1406. if (irq >= 0)
  1407. free_irq(irq, drv_data);
  1408. /* Release map resources */
  1409. iounmap(drv_data->regs);
  1410. release_resource(drv_data->ioarea);
  1411. kfree(drv_data->ioarea);
  1412. /* Disconnect from the SPI framework */
  1413. spi_unregister_master(drv_data->master);
  1414. spi_master_put(drv_data->master);
  1415. /* Prevent double remove */
  1416. platform_set_drvdata(pdev, NULL);
  1417. dev_dbg(&pdev->dev, "remove succeded\n");
  1418. return 0;
  1419. }
  1420. static void spi_imx_shutdown(struct platform_device *pdev)
  1421. {
  1422. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1423. /* Reset SPI */
  1424. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1425. writel(0, drv_data->regs + SPI_RESET);
  1426. dev_dbg(&pdev->dev, "shutdown succeded\n");
  1427. }
  1428. #ifdef CONFIG_PM
  1429. static int suspend_devices(struct device *dev, void *pm_message)
  1430. {
  1431. pm_message_t *state = pm_message;
  1432. if (dev->power.power_state.event != state->event) {
  1433. dev_warn(dev, "pm state does not match request\n");
  1434. return -1;
  1435. }
  1436. return 0;
  1437. }
  1438. static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
  1439. {
  1440. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1441. int status = 0;
  1442. status = stop_queue(drv_data);
  1443. if (status != 0) {
  1444. dev_warn(&pdev->dev, "suspend cannot stop queue\n");
  1445. return status;
  1446. }
  1447. dev_dbg(&pdev->dev, "suspended\n");
  1448. return 0;
  1449. }
  1450. static int spi_imx_resume(struct platform_device *pdev)
  1451. {
  1452. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1453. int status = 0;
  1454. /* Start the queue running */
  1455. status = start_queue(drv_data);
  1456. if (status != 0)
  1457. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1458. else
  1459. dev_dbg(&pdev->dev, "resumed\n");
  1460. return status;
  1461. }
  1462. #else
  1463. #define spi_imx_suspend NULL
  1464. #define spi_imx_resume NULL
  1465. #endif /* CONFIG_PM */
  1466. static struct platform_driver driver = {
  1467. .driver = {
  1468. .name = "imx-spi",
  1469. .bus = &platform_bus_type,
  1470. .owner = THIS_MODULE,
  1471. },
  1472. .probe = spi_imx_probe,
  1473. .remove = __devexit_p(spi_imx_remove),
  1474. .shutdown = spi_imx_shutdown,
  1475. .suspend = spi_imx_suspend,
  1476. .resume = spi_imx_resume,
  1477. };
  1478. static int __init spi_imx_init(void)
  1479. {
  1480. return platform_driver_register(&driver);
  1481. }
  1482. module_init(spi_imx_init);
  1483. static void __exit spi_imx_exit(void)
  1484. {
  1485. platform_driver_unregister(&driver);
  1486. }
  1487. module_exit(spi_imx_exit);
  1488. MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
  1489. MODULE_DESCRIPTION("iMX SPI Contoller Driver");
  1490. MODULE_LICENSE("GPL");