spi_imx.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776
  1. /*
  2. * drivers/spi/spi_imx.c
  3. *
  4. * Copyright (C) 2006 SWAPP
  5. * Andrea Paterniani <a.paterniani@swapp-eng.it>
  6. *
  7. * Initial version inspired by:
  8. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/device.h>
  23. #include <linux/ioport.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/spi/spi.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/delay.h>
  31. #include <linux/clk.h>
  32. #include <asm/io.h>
  33. #include <asm/irq.h>
  34. #include <asm/delay.h>
  35. #include <mach/hardware.h>
  36. #include <mach/imx-dma.h>
  37. #include <mach/spi_imx.h>
  38. /*-------------------------------------------------------------------------*/
  39. /* SPI Registers offsets from peripheral base address */
  40. #define SPI_RXDATA (0x00)
  41. #define SPI_TXDATA (0x04)
  42. #define SPI_CONTROL (0x08)
  43. #define SPI_INT_STATUS (0x0C)
  44. #define SPI_TEST (0x10)
  45. #define SPI_PERIOD (0x14)
  46. #define SPI_DMA (0x18)
  47. #define SPI_RESET (0x1C)
  48. /* SPI Control Register Bit Fields & Masks */
  49. #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
  50. #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
  51. #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
  52. #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
  53. #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
  54. #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
  55. #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
  56. #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
  57. #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
  58. #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
  59. Slave: RXFIFO advanced by BIT_COUNT */
  60. #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
  61. Slave: RXFIFO advanced by /SS rising edge */
  62. #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
  63. #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
  64. #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
  65. #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
  66. #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
  67. #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
  68. #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
  69. #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
  70. #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
  71. #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
  72. #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
  73. #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
  74. #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
  75. #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
  76. #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
  77. #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
  78. #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
  79. #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
  80. /* SPI Interrupt/Status Register Bit Fields & Masks */
  81. #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
  82. #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
  83. #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
  84. #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
  85. #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
  86. #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
  87. #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
  88. #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
  89. #define SPI_STATUS (0xFF) /* SPI Status Mask */
  90. #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
  91. #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
  92. #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
  93. #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
  94. #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
  95. #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
  96. #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
  97. #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
  98. #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
  99. /* SPI Test Register Bit Fields & Masks */
  100. #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
  101. #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
  102. #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
  103. #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
  104. #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
  105. /* SPI Period Register Bit Fields & Masks */
  106. #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
  107. #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
  108. Transactions */
  109. #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
  110. #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
  111. Bit Clock */
  112. #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
  113. 32.768 KHz Clock */
  114. /* SPI DMA Register Bit Fields & Masks */
  115. #define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
  116. #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
  117. #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
  118. #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
  119. #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
  120. #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
  121. #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
  122. #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
  123. /* SPI Soft Reset Register Bit Fields & Masks */
  124. #define SPI_RESET_START (0x1) /* Start */
  125. /* Default SPI configuration values */
  126. #define SPI_DEFAULT_CONTROL \
  127. ( \
  128. SPI_CONTROL_BITCOUNT(16) | \
  129. SPI_CONTROL_POL_ACT_HIGH | \
  130. SPI_CONTROL_PHA_0 | \
  131. SPI_CONTROL_SPIEN | \
  132. SPI_CONTROL_SSCTL_1 | \
  133. SPI_CONTROL_MODE_MASTER | \
  134. SPI_CONTROL_DRCTL_0 | \
  135. SPI_CONTROL_DATARATE_MIN \
  136. )
  137. #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
  138. #define SPI_DEFAULT_ENABLE_DMA (0)
  139. #define SPI_DEFAULT_PERIOD_WAIT (8)
  140. /*-------------------------------------------------------------------------*/
  141. /*-------------------------------------------------------------------------*/
  142. /* TX/RX SPI FIFO size */
  143. #define SPI_FIFO_DEPTH (8)
  144. #define SPI_FIFO_BYTE_WIDTH (2)
  145. #define SPI_FIFO_OVERFLOW_MARGIN (2)
  146. /* DMA burst length for half full/empty request trigger */
  147. #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
  148. /* Dummy char output to achieve reads.
  149. Choosing something different from all zeroes may help pattern recogition
  150. for oscilloscope analysis, but may break some drivers. */
  151. #define SPI_DUMMY_u8 0
  152. #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
  153. #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
  154. /**
  155. * Macro to change a u32 field:
  156. * @r : register to edit
  157. * @m : bit mask
  158. * @v : new value for the field correctly bit-alligned
  159. */
  160. #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
  161. /* Message state */
  162. #define START_STATE ((void*)0)
  163. #define RUNNING_STATE ((void*)1)
  164. #define DONE_STATE ((void*)2)
  165. #define ERROR_STATE ((void*)-1)
  166. /* Queue state */
  167. #define QUEUE_RUNNING (0)
  168. #define QUEUE_STOPPED (1)
  169. #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
  170. /*-------------------------------------------------------------------------*/
  171. /*-------------------------------------------------------------------------*/
  172. /* Driver data structs */
  173. /* Context */
  174. struct driver_data {
  175. /* Driver model hookup */
  176. struct platform_device *pdev;
  177. /* SPI framework hookup */
  178. struct spi_master *master;
  179. /* IMX hookup */
  180. struct spi_imx_master *master_info;
  181. /* Memory resources and SPI regs virtual address */
  182. struct resource *ioarea;
  183. void __iomem *regs;
  184. /* SPI RX_DATA physical address */
  185. dma_addr_t rd_data_phys;
  186. /* Driver message queue */
  187. struct workqueue_struct *workqueue;
  188. struct work_struct work;
  189. spinlock_t lock;
  190. struct list_head queue;
  191. int busy;
  192. int run;
  193. /* Message Transfer pump */
  194. struct tasklet_struct pump_transfers;
  195. /* Current message, transfer and state */
  196. struct spi_message *cur_msg;
  197. struct spi_transfer *cur_transfer;
  198. struct chip_data *cur_chip;
  199. /* Rd / Wr buffers pointers */
  200. size_t len;
  201. void *tx;
  202. void *tx_end;
  203. void *rx;
  204. void *rx_end;
  205. u8 rd_only;
  206. u8 n_bytes;
  207. int cs_change;
  208. /* Function pointers */
  209. irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
  210. void (*cs_control)(u32 command);
  211. /* DMA setup */
  212. int rx_channel;
  213. int tx_channel;
  214. dma_addr_t rx_dma;
  215. dma_addr_t tx_dma;
  216. int rx_dma_needs_unmap;
  217. int tx_dma_needs_unmap;
  218. size_t tx_map_len;
  219. u32 dummy_dma_buf ____cacheline_aligned;
  220. struct clk *clk;
  221. };
  222. /* Runtime state */
  223. struct chip_data {
  224. u32 control;
  225. u32 period;
  226. u32 test;
  227. u8 enable_dma:1;
  228. u8 bits_per_word;
  229. u8 n_bytes;
  230. u32 max_speed_hz;
  231. void (*cs_control)(u32 command);
  232. };
  233. /*-------------------------------------------------------------------------*/
  234. static void pump_messages(struct work_struct *work);
  235. static void flush(struct driver_data *drv_data)
  236. {
  237. void __iomem *regs = drv_data->regs;
  238. u32 control;
  239. dev_dbg(&drv_data->pdev->dev, "flush\n");
  240. /* Wait for end of transaction */
  241. do {
  242. control = readl(regs + SPI_CONTROL);
  243. } while (control & SPI_CONTROL_XCH);
  244. /* Release chip select if requested, transfer delays are
  245. handled in pump_transfers */
  246. if (drv_data->cs_change)
  247. drv_data->cs_control(SPI_CS_DEASSERT);
  248. /* Disable SPI to flush FIFOs */
  249. writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
  250. writel(control, regs + SPI_CONTROL);
  251. }
  252. static void restore_state(struct driver_data *drv_data)
  253. {
  254. void __iomem *regs = drv_data->regs;
  255. struct chip_data *chip = drv_data->cur_chip;
  256. /* Load chip registers */
  257. dev_dbg(&drv_data->pdev->dev,
  258. "restore_state\n"
  259. " test = 0x%08X\n"
  260. " control = 0x%08X\n",
  261. chip->test,
  262. chip->control);
  263. writel(chip->test, regs + SPI_TEST);
  264. writel(chip->period, regs + SPI_PERIOD);
  265. writel(0, regs + SPI_INT_STATUS);
  266. writel(chip->control, regs + SPI_CONTROL);
  267. }
  268. static void null_cs_control(u32 command)
  269. {
  270. }
  271. static inline u32 data_to_write(struct driver_data *drv_data)
  272. {
  273. return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
  274. }
  275. static inline u32 data_to_read(struct driver_data *drv_data)
  276. {
  277. return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
  278. }
  279. static int write(struct driver_data *drv_data)
  280. {
  281. void __iomem *regs = drv_data->regs;
  282. void *tx = drv_data->tx;
  283. void *tx_end = drv_data->tx_end;
  284. u8 n_bytes = drv_data->n_bytes;
  285. u32 remaining_writes;
  286. u32 fifo_avail_space;
  287. u32 n;
  288. u16 d;
  289. /* Compute how many fifo writes to do */
  290. remaining_writes = (u32)(tx_end - tx) / n_bytes;
  291. fifo_avail_space = SPI_FIFO_DEPTH -
  292. (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
  293. if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
  294. /* Fix misunderstood receive overflow */
  295. fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
  296. n = min(remaining_writes, fifo_avail_space);
  297. dev_dbg(&drv_data->pdev->dev,
  298. "write type %s\n"
  299. " remaining writes = %d\n"
  300. " fifo avail space = %d\n"
  301. " fifo writes = %d\n",
  302. (n_bytes == 1) ? "u8" : "u16",
  303. remaining_writes,
  304. fifo_avail_space,
  305. n);
  306. if (n > 0) {
  307. /* Fill SPI TXFIFO */
  308. if (drv_data->rd_only) {
  309. tx += n * n_bytes;
  310. while (n--)
  311. writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
  312. } else {
  313. if (n_bytes == 1) {
  314. while (n--) {
  315. d = *(u8*)tx;
  316. writel(d, regs + SPI_TXDATA);
  317. tx += 1;
  318. }
  319. } else {
  320. while (n--) {
  321. d = *(u16*)tx;
  322. writel(d, regs + SPI_TXDATA);
  323. tx += 2;
  324. }
  325. }
  326. }
  327. /* Trigger transfer */
  328. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  329. regs + SPI_CONTROL);
  330. /* Update tx pointer */
  331. drv_data->tx = tx;
  332. }
  333. return (tx >= tx_end);
  334. }
  335. static int read(struct driver_data *drv_data)
  336. {
  337. void __iomem *regs = drv_data->regs;
  338. void *rx = drv_data->rx;
  339. void *rx_end = drv_data->rx_end;
  340. u8 n_bytes = drv_data->n_bytes;
  341. u32 remaining_reads;
  342. u32 fifo_rxcnt;
  343. u32 n;
  344. u16 d;
  345. /* Compute how many fifo reads to do */
  346. remaining_reads = (u32)(rx_end - rx) / n_bytes;
  347. fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
  348. SPI_TEST_RXCNT_LSB;
  349. n = min(remaining_reads, fifo_rxcnt);
  350. dev_dbg(&drv_data->pdev->dev,
  351. "read type %s\n"
  352. " remaining reads = %d\n"
  353. " fifo rx count = %d\n"
  354. " fifo reads = %d\n",
  355. (n_bytes == 1) ? "u8" : "u16",
  356. remaining_reads,
  357. fifo_rxcnt,
  358. n);
  359. if (n > 0) {
  360. /* Read SPI RXFIFO */
  361. if (n_bytes == 1) {
  362. while (n--) {
  363. d = readl(regs + SPI_RXDATA);
  364. *((u8*)rx) = d;
  365. rx += 1;
  366. }
  367. } else {
  368. while (n--) {
  369. d = readl(regs + SPI_RXDATA);
  370. *((u16*)rx) = d;
  371. rx += 2;
  372. }
  373. }
  374. /* Update rx pointer */
  375. drv_data->rx = rx;
  376. }
  377. return (rx >= rx_end);
  378. }
  379. static void *next_transfer(struct driver_data *drv_data)
  380. {
  381. struct spi_message *msg = drv_data->cur_msg;
  382. struct spi_transfer *trans = drv_data->cur_transfer;
  383. /* Move to next transfer */
  384. if (trans->transfer_list.next != &msg->transfers) {
  385. drv_data->cur_transfer =
  386. list_entry(trans->transfer_list.next,
  387. struct spi_transfer,
  388. transfer_list);
  389. return RUNNING_STATE;
  390. }
  391. return DONE_STATE;
  392. }
  393. static int map_dma_buffers(struct driver_data *drv_data)
  394. {
  395. struct spi_message *msg;
  396. struct device *dev;
  397. void *buf;
  398. drv_data->rx_dma_needs_unmap = 0;
  399. drv_data->tx_dma_needs_unmap = 0;
  400. if (!drv_data->master_info->enable_dma ||
  401. !drv_data->cur_chip->enable_dma)
  402. return -1;
  403. msg = drv_data->cur_msg;
  404. dev = &msg->spi->dev;
  405. if (msg->is_dma_mapped) {
  406. if (drv_data->tx_dma)
  407. /* The caller provided at least dma and cpu virtual
  408. address for write; pump_transfers() will consider the
  409. transfer as write only if cpu rx virtual address is
  410. NULL */
  411. return 0;
  412. if (drv_data->rx_dma) {
  413. /* The caller provided dma and cpu virtual address to
  414. performe read only transfer -->
  415. use drv_data->dummy_dma_buf for dummy writes to
  416. achive reads */
  417. buf = &drv_data->dummy_dma_buf;
  418. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  419. drv_data->tx_dma = dma_map_single(dev,
  420. buf,
  421. drv_data->tx_map_len,
  422. DMA_TO_DEVICE);
  423. if (dma_mapping_error(dev, drv_data->tx_dma))
  424. return -1;
  425. drv_data->tx_dma_needs_unmap = 1;
  426. /* Flags transfer as rd_only for pump_transfers() DMA
  427. regs programming (should be redundant) */
  428. drv_data->tx = NULL;
  429. return 0;
  430. }
  431. }
  432. if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
  433. return -1;
  434. if (drv_data->tx == NULL) {
  435. /* Read only message --> use drv_data->dummy_dma_buf for dummy
  436. writes to achive reads */
  437. buf = &drv_data->dummy_dma_buf;
  438. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  439. } else {
  440. buf = drv_data->tx;
  441. drv_data->tx_map_len = drv_data->len;
  442. }
  443. drv_data->tx_dma = dma_map_single(dev,
  444. buf,
  445. drv_data->tx_map_len,
  446. DMA_TO_DEVICE);
  447. if (dma_mapping_error(dev, drv_data->tx_dma))
  448. return -1;
  449. drv_data->tx_dma_needs_unmap = 1;
  450. /* NULL rx means write-only transfer and no map needed
  451. * since rx DMA will not be used */
  452. if (drv_data->rx) {
  453. buf = drv_data->rx;
  454. drv_data->rx_dma = dma_map_single(dev,
  455. buf,
  456. drv_data->len,
  457. DMA_FROM_DEVICE);
  458. if (dma_mapping_error(dev, drv_data->rx_dma)) {
  459. if (drv_data->tx_dma) {
  460. dma_unmap_single(dev,
  461. drv_data->tx_dma,
  462. drv_data->tx_map_len,
  463. DMA_TO_DEVICE);
  464. drv_data->tx_dma_needs_unmap = 0;
  465. }
  466. return -1;
  467. }
  468. drv_data->rx_dma_needs_unmap = 1;
  469. }
  470. return 0;
  471. }
  472. static void unmap_dma_buffers(struct driver_data *drv_data)
  473. {
  474. struct spi_message *msg = drv_data->cur_msg;
  475. struct device *dev = &msg->spi->dev;
  476. if (drv_data->rx_dma_needs_unmap) {
  477. dma_unmap_single(dev,
  478. drv_data->rx_dma,
  479. drv_data->len,
  480. DMA_FROM_DEVICE);
  481. drv_data->rx_dma_needs_unmap = 0;
  482. }
  483. if (drv_data->tx_dma_needs_unmap) {
  484. dma_unmap_single(dev,
  485. drv_data->tx_dma,
  486. drv_data->tx_map_len,
  487. DMA_TO_DEVICE);
  488. drv_data->tx_dma_needs_unmap = 0;
  489. }
  490. }
  491. /* Caller already set message->status (dma is already blocked) */
  492. static void giveback(struct spi_message *message, struct driver_data *drv_data)
  493. {
  494. void __iomem *regs = drv_data->regs;
  495. /* Bring SPI to sleep; restore_state() and pump_transfer()
  496. will do new setup */
  497. writel(0, regs + SPI_INT_STATUS);
  498. writel(0, regs + SPI_DMA);
  499. /* Unconditioned deselct */
  500. drv_data->cs_control(SPI_CS_DEASSERT);
  501. message->state = NULL;
  502. if (message->complete)
  503. message->complete(message->context);
  504. drv_data->cur_msg = NULL;
  505. drv_data->cur_transfer = NULL;
  506. drv_data->cur_chip = NULL;
  507. queue_work(drv_data->workqueue, &drv_data->work);
  508. }
  509. static void dma_err_handler(int channel, void *data, int errcode)
  510. {
  511. struct driver_data *drv_data = data;
  512. struct spi_message *msg = drv_data->cur_msg;
  513. dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
  514. /* Disable both rx and tx dma channels */
  515. imx_dma_disable(drv_data->rx_channel);
  516. imx_dma_disable(drv_data->tx_channel);
  517. unmap_dma_buffers(drv_data);
  518. flush(drv_data);
  519. msg->state = ERROR_STATE;
  520. tasklet_schedule(&drv_data->pump_transfers);
  521. }
  522. static void dma_tx_handler(int channel, void *data)
  523. {
  524. struct driver_data *drv_data = data;
  525. dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
  526. imx_dma_disable(channel);
  527. /* Now waits for TX FIFO empty */
  528. writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
  529. }
  530. static irqreturn_t dma_transfer(struct driver_data *drv_data)
  531. {
  532. u32 status;
  533. struct spi_message *msg = drv_data->cur_msg;
  534. void __iomem *regs = drv_data->regs;
  535. status = readl(regs + SPI_INT_STATUS);
  536. if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
  537. == (SPI_INTEN_RO | SPI_STATUS_RO)) {
  538. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  539. imx_dma_disable(drv_data->tx_channel);
  540. imx_dma_disable(drv_data->rx_channel);
  541. unmap_dma_buffers(drv_data);
  542. flush(drv_data);
  543. dev_warn(&drv_data->pdev->dev,
  544. "dma_transfer - fifo overun\n");
  545. msg->state = ERROR_STATE;
  546. tasklet_schedule(&drv_data->pump_transfers);
  547. return IRQ_HANDLED;
  548. }
  549. if (status & SPI_STATUS_TE) {
  550. writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
  551. if (drv_data->rx) {
  552. /* Wait end of transfer before read trailing data */
  553. while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
  554. cpu_relax();
  555. imx_dma_disable(drv_data->rx_channel);
  556. unmap_dma_buffers(drv_data);
  557. /* Release chip select if requested, transfer delays are
  558. handled in pump_transfers() */
  559. if (drv_data->cs_change)
  560. drv_data->cs_control(SPI_CS_DEASSERT);
  561. /* Calculate number of trailing data and read them */
  562. dev_dbg(&drv_data->pdev->dev,
  563. "dma_transfer - test = 0x%08X\n",
  564. readl(regs + SPI_TEST));
  565. drv_data->rx = drv_data->rx_end -
  566. ((readl(regs + SPI_TEST) &
  567. SPI_TEST_RXCNT) >>
  568. SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
  569. read(drv_data);
  570. } else {
  571. /* Write only transfer */
  572. unmap_dma_buffers(drv_data);
  573. flush(drv_data);
  574. }
  575. /* End of transfer, update total byte transfered */
  576. msg->actual_length += drv_data->len;
  577. /* Move to next transfer */
  578. msg->state = next_transfer(drv_data);
  579. /* Schedule transfer tasklet */
  580. tasklet_schedule(&drv_data->pump_transfers);
  581. return IRQ_HANDLED;
  582. }
  583. /* Opps problem detected */
  584. return IRQ_NONE;
  585. }
  586. static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
  587. {
  588. struct spi_message *msg = drv_data->cur_msg;
  589. void __iomem *regs = drv_data->regs;
  590. u32 status;
  591. irqreturn_t handled = IRQ_NONE;
  592. status = readl(regs + SPI_INT_STATUS);
  593. if (status & SPI_INTEN_TE) {
  594. /* TXFIFO Empty Interrupt on the last transfered word */
  595. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  596. dev_dbg(&drv_data->pdev->dev,
  597. "interrupt_wronly_transfer - end of tx\n");
  598. flush(drv_data);
  599. /* Update total byte transfered */
  600. msg->actual_length += drv_data->len;
  601. /* Move to next transfer */
  602. msg->state = next_transfer(drv_data);
  603. /* Schedule transfer tasklet */
  604. tasklet_schedule(&drv_data->pump_transfers);
  605. return IRQ_HANDLED;
  606. } else {
  607. while (status & SPI_STATUS_TH) {
  608. dev_dbg(&drv_data->pdev->dev,
  609. "interrupt_wronly_transfer - status = 0x%08X\n",
  610. status);
  611. /* Pump data */
  612. if (write(drv_data)) {
  613. /* End of TXFIFO writes,
  614. now wait until TXFIFO is empty */
  615. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  616. return IRQ_HANDLED;
  617. }
  618. status = readl(regs + SPI_INT_STATUS);
  619. /* We did something */
  620. handled = IRQ_HANDLED;
  621. }
  622. }
  623. return handled;
  624. }
  625. static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
  626. {
  627. struct spi_message *msg = drv_data->cur_msg;
  628. void __iomem *regs = drv_data->regs;
  629. u32 status, control;
  630. irqreturn_t handled = IRQ_NONE;
  631. unsigned long limit;
  632. status = readl(regs + SPI_INT_STATUS);
  633. if (status & SPI_INTEN_TE) {
  634. /* TXFIFO Empty Interrupt on the last transfered word */
  635. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  636. dev_dbg(&drv_data->pdev->dev,
  637. "interrupt_transfer - end of tx\n");
  638. if (msg->state == ERROR_STATE) {
  639. /* RXFIFO overrun was detected and message aborted */
  640. flush(drv_data);
  641. } else {
  642. /* Wait for end of transaction */
  643. do {
  644. control = readl(regs + SPI_CONTROL);
  645. } while (control & SPI_CONTROL_XCH);
  646. /* Release chip select if requested, transfer delays are
  647. handled in pump_transfers */
  648. if (drv_data->cs_change)
  649. drv_data->cs_control(SPI_CS_DEASSERT);
  650. /* Read trailing bytes */
  651. limit = loops_per_jiffy << 1;
  652. while ((read(drv_data) == 0) && limit--);
  653. if (limit == 0)
  654. dev_err(&drv_data->pdev->dev,
  655. "interrupt_transfer - "
  656. "trailing byte read failed\n");
  657. else
  658. dev_dbg(&drv_data->pdev->dev,
  659. "interrupt_transfer - end of rx\n");
  660. /* Update total byte transfered */
  661. msg->actual_length += drv_data->len;
  662. /* Move to next transfer */
  663. msg->state = next_transfer(drv_data);
  664. }
  665. /* Schedule transfer tasklet */
  666. tasklet_schedule(&drv_data->pump_transfers);
  667. return IRQ_HANDLED;
  668. } else {
  669. while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
  670. dev_dbg(&drv_data->pdev->dev,
  671. "interrupt_transfer - status = 0x%08X\n",
  672. status);
  673. if (status & SPI_STATUS_RO) {
  674. /* RXFIFO overrun, abort message end wait
  675. until TXFIFO is empty */
  676. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  677. dev_warn(&drv_data->pdev->dev,
  678. "interrupt_transfer - fifo overun\n"
  679. " data not yet written = %d\n"
  680. " data not yet read = %d\n",
  681. data_to_write(drv_data),
  682. data_to_read(drv_data));
  683. msg->state = ERROR_STATE;
  684. return IRQ_HANDLED;
  685. }
  686. /* Pump data */
  687. read(drv_data);
  688. if (write(drv_data)) {
  689. /* End of TXFIFO writes,
  690. now wait until TXFIFO is empty */
  691. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  692. return IRQ_HANDLED;
  693. }
  694. status = readl(regs + SPI_INT_STATUS);
  695. /* We did something */
  696. handled = IRQ_HANDLED;
  697. }
  698. }
  699. return handled;
  700. }
  701. static irqreturn_t spi_int(int irq, void *dev_id)
  702. {
  703. struct driver_data *drv_data = (struct driver_data *)dev_id;
  704. if (!drv_data->cur_msg) {
  705. dev_err(&drv_data->pdev->dev,
  706. "spi_int - bad message state\n");
  707. /* Never fail */
  708. return IRQ_HANDLED;
  709. }
  710. return drv_data->transfer_handler(drv_data);
  711. }
  712. static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
  713. {
  714. return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
  715. }
  716. static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
  717. {
  718. u32 div;
  719. u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
  720. for (div = SPI_PERCLK2_DIV_MIN;
  721. div <= SPI_PERCLK2_DIV_MAX;
  722. div++, quantized_hz >>= 1) {
  723. if (quantized_hz <= speed_hz)
  724. /* Max available speed LEQ required speed */
  725. return div << 13;
  726. }
  727. return SPI_CONTROL_DATARATE_BAD;
  728. }
  729. static void pump_transfers(unsigned long data)
  730. {
  731. struct driver_data *drv_data = (struct driver_data *)data;
  732. struct spi_message *message;
  733. struct spi_transfer *transfer, *previous;
  734. struct chip_data *chip;
  735. void __iomem *regs;
  736. u32 tmp, control;
  737. dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
  738. message = drv_data->cur_msg;
  739. /* Handle for abort */
  740. if (message->state == ERROR_STATE) {
  741. message->status = -EIO;
  742. giveback(message, drv_data);
  743. return;
  744. }
  745. /* Handle end of message */
  746. if (message->state == DONE_STATE) {
  747. message->status = 0;
  748. giveback(message, drv_data);
  749. return;
  750. }
  751. chip = drv_data->cur_chip;
  752. /* Delay if requested at end of transfer*/
  753. transfer = drv_data->cur_transfer;
  754. if (message->state == RUNNING_STATE) {
  755. previous = list_entry(transfer->transfer_list.prev,
  756. struct spi_transfer,
  757. transfer_list);
  758. if (previous->delay_usecs)
  759. udelay(previous->delay_usecs);
  760. } else {
  761. /* START_STATE */
  762. message->state = RUNNING_STATE;
  763. drv_data->cs_control = chip->cs_control;
  764. }
  765. transfer = drv_data->cur_transfer;
  766. drv_data->tx = (void *)transfer->tx_buf;
  767. drv_data->tx_end = drv_data->tx + transfer->len;
  768. drv_data->rx = transfer->rx_buf;
  769. drv_data->rx_end = drv_data->rx + transfer->len;
  770. drv_data->rx_dma = transfer->rx_dma;
  771. drv_data->tx_dma = transfer->tx_dma;
  772. drv_data->len = transfer->len;
  773. drv_data->cs_change = transfer->cs_change;
  774. drv_data->rd_only = (drv_data->tx == NULL);
  775. regs = drv_data->regs;
  776. control = readl(regs + SPI_CONTROL);
  777. /* Bits per word setup */
  778. tmp = transfer->bits_per_word;
  779. if (tmp == 0) {
  780. /* Use device setup */
  781. tmp = chip->bits_per_word;
  782. drv_data->n_bytes = chip->n_bytes;
  783. } else
  784. /* Use per-transfer setup */
  785. drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
  786. u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  787. /* Speed setup (surely valid because already checked) */
  788. tmp = transfer->speed_hz;
  789. if (tmp == 0)
  790. tmp = chip->max_speed_hz;
  791. tmp = spi_data_rate(drv_data, tmp);
  792. u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
  793. writel(control, regs + SPI_CONTROL);
  794. /* Assert device chip-select */
  795. drv_data->cs_control(SPI_CS_ASSERT);
  796. /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
  797. if bits_per_word is less or equal 8 PIO transfers are performed.
  798. Moreover DMA is convinient for transfer length bigger than FIFOs
  799. byte size. */
  800. if ((drv_data->n_bytes == 2) &&
  801. (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
  802. (map_dma_buffers(drv_data) == 0)) {
  803. dev_dbg(&drv_data->pdev->dev,
  804. "pump dma transfer\n"
  805. " tx = %p\n"
  806. " tx_dma = %08X\n"
  807. " rx = %p\n"
  808. " rx_dma = %08X\n"
  809. " len = %d\n",
  810. drv_data->tx,
  811. (unsigned int)drv_data->tx_dma,
  812. drv_data->rx,
  813. (unsigned int)drv_data->rx_dma,
  814. drv_data->len);
  815. /* Ensure we have the correct interrupt handler */
  816. drv_data->transfer_handler = dma_transfer;
  817. /* Trigger transfer */
  818. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  819. regs + SPI_CONTROL);
  820. /* Setup tx DMA */
  821. if (drv_data->tx)
  822. /* Linear source address */
  823. CCR(drv_data->tx_channel) =
  824. CCR_DMOD_FIFO |
  825. CCR_SMOD_LINEAR |
  826. CCR_SSIZ_32 | CCR_DSIZ_16 |
  827. CCR_REN;
  828. else
  829. /* Read only transfer -> fixed source address for
  830. dummy write to achive read */
  831. CCR(drv_data->tx_channel) =
  832. CCR_DMOD_FIFO |
  833. CCR_SMOD_FIFO |
  834. CCR_SSIZ_32 | CCR_DSIZ_16 |
  835. CCR_REN;
  836. imx_dma_setup_single(
  837. drv_data->tx_channel,
  838. drv_data->tx_dma,
  839. drv_data->len,
  840. drv_data->rd_data_phys + 4,
  841. DMA_MODE_WRITE);
  842. if (drv_data->rx) {
  843. /* Setup rx DMA for linear destination address */
  844. CCR(drv_data->rx_channel) =
  845. CCR_DMOD_LINEAR |
  846. CCR_SMOD_FIFO |
  847. CCR_DSIZ_32 | CCR_SSIZ_16 |
  848. CCR_REN;
  849. imx_dma_setup_single(
  850. drv_data->rx_channel,
  851. drv_data->rx_dma,
  852. drv_data->len,
  853. drv_data->rd_data_phys,
  854. DMA_MODE_READ);
  855. imx_dma_enable(drv_data->rx_channel);
  856. /* Enable SPI interrupt */
  857. writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
  858. /* Set SPI to request DMA service on both
  859. Rx and Tx half fifo watermark */
  860. writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
  861. } else
  862. /* Write only access -> set SPI to request DMA
  863. service on Tx half fifo watermark */
  864. writel(SPI_DMA_THDEN, regs + SPI_DMA);
  865. imx_dma_enable(drv_data->tx_channel);
  866. } else {
  867. dev_dbg(&drv_data->pdev->dev,
  868. "pump pio transfer\n"
  869. " tx = %p\n"
  870. " rx = %p\n"
  871. " len = %d\n",
  872. drv_data->tx,
  873. drv_data->rx,
  874. drv_data->len);
  875. /* Ensure we have the correct interrupt handler */
  876. if (drv_data->rx)
  877. drv_data->transfer_handler = interrupt_transfer;
  878. else
  879. drv_data->transfer_handler = interrupt_wronly_transfer;
  880. /* Enable SPI interrupt */
  881. if (drv_data->rx)
  882. writel(SPI_INTEN_TH | SPI_INTEN_RO,
  883. regs + SPI_INT_STATUS);
  884. else
  885. writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
  886. }
  887. }
  888. static void pump_messages(struct work_struct *work)
  889. {
  890. struct driver_data *drv_data =
  891. container_of(work, struct driver_data, work);
  892. unsigned long flags;
  893. /* Lock queue and check for queue work */
  894. spin_lock_irqsave(&drv_data->lock, flags);
  895. if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
  896. drv_data->busy = 0;
  897. spin_unlock_irqrestore(&drv_data->lock, flags);
  898. return;
  899. }
  900. /* Make sure we are not already running a message */
  901. if (drv_data->cur_msg) {
  902. spin_unlock_irqrestore(&drv_data->lock, flags);
  903. return;
  904. }
  905. /* Extract head of queue */
  906. drv_data->cur_msg = list_entry(drv_data->queue.next,
  907. struct spi_message, queue);
  908. list_del_init(&drv_data->cur_msg->queue);
  909. drv_data->busy = 1;
  910. spin_unlock_irqrestore(&drv_data->lock, flags);
  911. /* Initial message state */
  912. drv_data->cur_msg->state = START_STATE;
  913. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  914. struct spi_transfer,
  915. transfer_list);
  916. /* Setup the SPI using the per chip configuration */
  917. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  918. restore_state(drv_data);
  919. /* Mark as busy and launch transfers */
  920. tasklet_schedule(&drv_data->pump_transfers);
  921. }
  922. static int transfer(struct spi_device *spi, struct spi_message *msg)
  923. {
  924. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  925. u32 min_speed_hz, max_speed_hz, tmp;
  926. struct spi_transfer *trans;
  927. unsigned long flags;
  928. msg->actual_length = 0;
  929. /* Per transfer setup check */
  930. min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
  931. max_speed_hz = spi->max_speed_hz;
  932. list_for_each_entry(trans, &msg->transfers, transfer_list) {
  933. tmp = trans->bits_per_word;
  934. if (tmp > 16) {
  935. dev_err(&drv_data->pdev->dev,
  936. "message rejected : "
  937. "invalid transfer bits_per_word (%d bits)\n",
  938. tmp);
  939. goto msg_rejected;
  940. }
  941. tmp = trans->speed_hz;
  942. if (tmp) {
  943. if (tmp < min_speed_hz) {
  944. dev_err(&drv_data->pdev->dev,
  945. "message rejected : "
  946. "device min speed (%d Hz) exceeds "
  947. "required transfer speed (%d Hz)\n",
  948. min_speed_hz,
  949. tmp);
  950. goto msg_rejected;
  951. } else if (tmp > max_speed_hz) {
  952. dev_err(&drv_data->pdev->dev,
  953. "message rejected : "
  954. "transfer speed (%d Hz) exceeds "
  955. "device max speed (%d Hz)\n",
  956. tmp,
  957. max_speed_hz);
  958. goto msg_rejected;
  959. }
  960. }
  961. }
  962. /* Message accepted */
  963. msg->status = -EINPROGRESS;
  964. msg->state = START_STATE;
  965. spin_lock_irqsave(&drv_data->lock, flags);
  966. if (drv_data->run == QUEUE_STOPPED) {
  967. spin_unlock_irqrestore(&drv_data->lock, flags);
  968. return -ESHUTDOWN;
  969. }
  970. list_add_tail(&msg->queue, &drv_data->queue);
  971. if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
  972. queue_work(drv_data->workqueue, &drv_data->work);
  973. spin_unlock_irqrestore(&drv_data->lock, flags);
  974. return 0;
  975. msg_rejected:
  976. /* Message rejected and not queued */
  977. msg->status = -EINVAL;
  978. msg->state = ERROR_STATE;
  979. if (msg->complete)
  980. msg->complete(msg->context);
  981. return -EINVAL;
  982. }
  983. /* the spi->mode bits understood by this driver: */
  984. #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
  985. /* On first setup bad values must free chip_data memory since will cause
  986. spi_new_device to fail. Bad value setup from protocol driver are simply not
  987. applied and notified to the calling driver. */
  988. static int setup(struct spi_device *spi)
  989. {
  990. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  991. struct spi_imx_chip *chip_info;
  992. struct chip_data *chip;
  993. int first_setup = 0;
  994. u32 tmp;
  995. int status = 0;
  996. if (spi->mode & ~MODEBITS) {
  997. dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
  998. spi->mode & ~MODEBITS);
  999. return -EINVAL;
  1000. }
  1001. /* Get controller data */
  1002. chip_info = spi->controller_data;
  1003. /* Get controller_state */
  1004. chip = spi_get_ctldata(spi);
  1005. if (chip == NULL) {
  1006. first_setup = 1;
  1007. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1008. if (!chip) {
  1009. dev_err(&spi->dev,
  1010. "setup - cannot allocate controller state\n");
  1011. return -ENOMEM;
  1012. }
  1013. chip->control = SPI_DEFAULT_CONTROL;
  1014. if (chip_info == NULL) {
  1015. /* spi_board_info.controller_data not is supplied */
  1016. chip_info = kzalloc(sizeof(struct spi_imx_chip),
  1017. GFP_KERNEL);
  1018. if (!chip_info) {
  1019. dev_err(&spi->dev,
  1020. "setup - "
  1021. "cannot allocate controller data\n");
  1022. status = -ENOMEM;
  1023. goto err_first_setup;
  1024. }
  1025. /* Set controller data default value */
  1026. chip_info->enable_loopback =
  1027. SPI_DEFAULT_ENABLE_LOOPBACK;
  1028. chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
  1029. chip_info->ins_ss_pulse = 1;
  1030. chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
  1031. chip_info->cs_control = null_cs_control;
  1032. }
  1033. }
  1034. /* Now set controller state based on controller data */
  1035. if (first_setup) {
  1036. /* SPI loopback */
  1037. if (chip_info->enable_loopback)
  1038. chip->test = SPI_TEST_LBC;
  1039. else
  1040. chip->test = 0;
  1041. /* SPI dma driven */
  1042. chip->enable_dma = chip_info->enable_dma;
  1043. /* SPI /SS pulse between spi burst */
  1044. if (chip_info->ins_ss_pulse)
  1045. u32_EDIT(chip->control,
  1046. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
  1047. else
  1048. u32_EDIT(chip->control,
  1049. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
  1050. /* SPI bclk waits between each bits_per_word spi burst */
  1051. if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
  1052. dev_err(&spi->dev,
  1053. "setup - "
  1054. "bclk_wait exceeds max allowed (%d)\n",
  1055. SPI_PERIOD_MAX_WAIT);
  1056. goto err_first_setup;
  1057. }
  1058. chip->period = SPI_PERIOD_CSRC_BCLK |
  1059. (chip_info->bclk_wait & SPI_PERIOD_WAIT);
  1060. }
  1061. /* SPI mode */
  1062. tmp = spi->mode;
  1063. if (tmp & SPI_CS_HIGH) {
  1064. u32_EDIT(chip->control,
  1065. SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
  1066. }
  1067. switch (tmp & SPI_MODE_3) {
  1068. case SPI_MODE_0:
  1069. tmp = 0;
  1070. break;
  1071. case SPI_MODE_1:
  1072. tmp = SPI_CONTROL_PHA_1;
  1073. break;
  1074. case SPI_MODE_2:
  1075. tmp = SPI_CONTROL_POL_ACT_LOW;
  1076. break;
  1077. default:
  1078. /* SPI_MODE_3 */
  1079. tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
  1080. break;
  1081. }
  1082. u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
  1083. /* SPI word width */
  1084. tmp = spi->bits_per_word;
  1085. if (tmp == 0) {
  1086. tmp = 8;
  1087. spi->bits_per_word = 8;
  1088. } else if (tmp > 16) {
  1089. status = -EINVAL;
  1090. dev_err(&spi->dev,
  1091. "setup - "
  1092. "invalid bits_per_word (%d)\n",
  1093. tmp);
  1094. if (first_setup)
  1095. goto err_first_setup;
  1096. else {
  1097. /* Undo setup using chip as backup copy */
  1098. tmp = chip->bits_per_word;
  1099. spi->bits_per_word = tmp;
  1100. }
  1101. }
  1102. chip->bits_per_word = tmp;
  1103. u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  1104. chip->n_bytes = (tmp <= 8) ? 1 : 2;
  1105. /* SPI datarate */
  1106. tmp = spi_data_rate(drv_data, spi->max_speed_hz);
  1107. if (tmp == SPI_CONTROL_DATARATE_BAD) {
  1108. status = -EINVAL;
  1109. dev_err(&spi->dev,
  1110. "setup - "
  1111. "HW min speed (%d Hz) exceeds required "
  1112. "max speed (%d Hz)\n",
  1113. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1114. spi->max_speed_hz);
  1115. if (first_setup)
  1116. goto err_first_setup;
  1117. else
  1118. /* Undo setup using chip as backup copy */
  1119. spi->max_speed_hz = chip->max_speed_hz;
  1120. } else {
  1121. u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
  1122. /* Actual rounded max_speed_hz */
  1123. tmp = spi_speed_hz(drv_data, tmp);
  1124. spi->max_speed_hz = tmp;
  1125. chip->max_speed_hz = tmp;
  1126. }
  1127. /* SPI chip-select management */
  1128. if (chip_info->cs_control)
  1129. chip->cs_control = chip_info->cs_control;
  1130. else
  1131. chip->cs_control = null_cs_control;
  1132. /* Save controller_state */
  1133. spi_set_ctldata(spi, chip);
  1134. /* Summary */
  1135. dev_dbg(&spi->dev,
  1136. "setup succeded\n"
  1137. " loopback enable = %s\n"
  1138. " dma enable = %s\n"
  1139. " insert /ss pulse = %s\n"
  1140. " period wait = %d\n"
  1141. " mode = %d\n"
  1142. " bits per word = %d\n"
  1143. " min speed = %d Hz\n"
  1144. " rounded max speed = %d Hz\n",
  1145. chip->test & SPI_TEST_LBC ? "Yes" : "No",
  1146. chip->enable_dma ? "Yes" : "No",
  1147. chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
  1148. chip->period & SPI_PERIOD_WAIT,
  1149. spi->mode,
  1150. spi->bits_per_word,
  1151. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1152. spi->max_speed_hz);
  1153. return status;
  1154. err_first_setup:
  1155. kfree(chip);
  1156. return status;
  1157. }
  1158. static void cleanup(struct spi_device *spi)
  1159. {
  1160. kfree(spi_get_ctldata(spi));
  1161. }
  1162. static int __init init_queue(struct driver_data *drv_data)
  1163. {
  1164. INIT_LIST_HEAD(&drv_data->queue);
  1165. spin_lock_init(&drv_data->lock);
  1166. drv_data->run = QUEUE_STOPPED;
  1167. drv_data->busy = 0;
  1168. tasklet_init(&drv_data->pump_transfers,
  1169. pump_transfers, (unsigned long)drv_data);
  1170. INIT_WORK(&drv_data->work, pump_messages);
  1171. drv_data->workqueue = create_singlethread_workqueue(
  1172. dev_name(drv_data->master->dev.parent));
  1173. if (drv_data->workqueue == NULL)
  1174. return -EBUSY;
  1175. return 0;
  1176. }
  1177. static int start_queue(struct driver_data *drv_data)
  1178. {
  1179. unsigned long flags;
  1180. spin_lock_irqsave(&drv_data->lock, flags);
  1181. if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
  1182. spin_unlock_irqrestore(&drv_data->lock, flags);
  1183. return -EBUSY;
  1184. }
  1185. drv_data->run = QUEUE_RUNNING;
  1186. drv_data->cur_msg = NULL;
  1187. drv_data->cur_transfer = NULL;
  1188. drv_data->cur_chip = NULL;
  1189. spin_unlock_irqrestore(&drv_data->lock, flags);
  1190. queue_work(drv_data->workqueue, &drv_data->work);
  1191. return 0;
  1192. }
  1193. static int stop_queue(struct driver_data *drv_data)
  1194. {
  1195. unsigned long flags;
  1196. unsigned limit = 500;
  1197. int status = 0;
  1198. spin_lock_irqsave(&drv_data->lock, flags);
  1199. /* This is a bit lame, but is optimized for the common execution path.
  1200. * A wait_queue on the drv_data->busy could be used, but then the common
  1201. * execution path (pump_messages) would be required to call wake_up or
  1202. * friends on every SPI message. Do this instead */
  1203. drv_data->run = QUEUE_STOPPED;
  1204. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  1205. spin_unlock_irqrestore(&drv_data->lock, flags);
  1206. msleep(10);
  1207. spin_lock_irqsave(&drv_data->lock, flags);
  1208. }
  1209. if (!list_empty(&drv_data->queue) || drv_data->busy)
  1210. status = -EBUSY;
  1211. spin_unlock_irqrestore(&drv_data->lock, flags);
  1212. return status;
  1213. }
  1214. static int destroy_queue(struct driver_data *drv_data)
  1215. {
  1216. int status;
  1217. status = stop_queue(drv_data);
  1218. if (status != 0)
  1219. return status;
  1220. if (drv_data->workqueue)
  1221. destroy_workqueue(drv_data->workqueue);
  1222. return 0;
  1223. }
  1224. static int __init spi_imx_probe(struct platform_device *pdev)
  1225. {
  1226. struct device *dev = &pdev->dev;
  1227. struct spi_imx_master *platform_info;
  1228. struct spi_master *master;
  1229. struct driver_data *drv_data;
  1230. struct resource *res;
  1231. int irq, status = 0;
  1232. platform_info = dev->platform_data;
  1233. if (platform_info == NULL) {
  1234. dev_err(&pdev->dev, "probe - no platform data supplied\n");
  1235. status = -ENODEV;
  1236. goto err_no_pdata;
  1237. }
  1238. /* Allocate master with space for drv_data */
  1239. master = spi_alloc_master(dev, sizeof(struct driver_data));
  1240. if (!master) {
  1241. dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
  1242. status = -ENOMEM;
  1243. goto err_no_mem;
  1244. }
  1245. drv_data = spi_master_get_devdata(master);
  1246. drv_data->master = master;
  1247. drv_data->master_info = platform_info;
  1248. drv_data->pdev = pdev;
  1249. master->bus_num = pdev->id;
  1250. master->num_chipselect = platform_info->num_chipselect;
  1251. master->cleanup = cleanup;
  1252. master->setup = setup;
  1253. master->transfer = transfer;
  1254. drv_data->dummy_dma_buf = SPI_DUMMY_u32;
  1255. drv_data->clk = clk_get(&pdev->dev, "perclk2");
  1256. if (IS_ERR(drv_data->clk)) {
  1257. dev_err(&pdev->dev, "probe - cannot get clock\n");
  1258. status = PTR_ERR(drv_data->clk);
  1259. goto err_no_clk;
  1260. }
  1261. clk_enable(drv_data->clk);
  1262. /* Find and map resources */
  1263. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1264. if (!res) {
  1265. dev_err(&pdev->dev, "probe - MEM resources not defined\n");
  1266. status = -ENODEV;
  1267. goto err_no_iores;
  1268. }
  1269. drv_data->ioarea = request_mem_region(res->start,
  1270. res->end - res->start + 1,
  1271. pdev->name);
  1272. if (drv_data->ioarea == NULL) {
  1273. dev_err(&pdev->dev, "probe - cannot reserve region\n");
  1274. status = -ENXIO;
  1275. goto err_no_iores;
  1276. }
  1277. drv_data->regs = ioremap(res->start, res->end - res->start + 1);
  1278. if (drv_data->regs == NULL) {
  1279. dev_err(&pdev->dev, "probe - cannot map IO\n");
  1280. status = -ENXIO;
  1281. goto err_no_iomap;
  1282. }
  1283. drv_data->rd_data_phys = (dma_addr_t)res->start;
  1284. /* Attach to IRQ */
  1285. irq = platform_get_irq(pdev, 0);
  1286. if (irq < 0) {
  1287. dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
  1288. status = -ENODEV;
  1289. goto err_no_irqres;
  1290. }
  1291. status = request_irq(irq, spi_int, IRQF_DISABLED,
  1292. dev_name(dev), drv_data);
  1293. if (status < 0) {
  1294. dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
  1295. goto err_no_irqres;
  1296. }
  1297. /* Setup DMA if requested */
  1298. drv_data->tx_channel = -1;
  1299. drv_data->rx_channel = -1;
  1300. if (platform_info->enable_dma) {
  1301. /* Get rx DMA channel */
  1302. drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
  1303. DMA_PRIO_HIGH);
  1304. if (drv_data->rx_channel < 0) {
  1305. dev_err(dev,
  1306. "probe - problem (%d) requesting rx channel\n",
  1307. drv_data->rx_channel);
  1308. goto err_no_rxdma;
  1309. } else
  1310. imx_dma_setup_handlers(drv_data->rx_channel, NULL,
  1311. dma_err_handler, drv_data);
  1312. /* Get tx DMA channel */
  1313. drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
  1314. DMA_PRIO_MEDIUM);
  1315. if (drv_data->tx_channel < 0) {
  1316. dev_err(dev,
  1317. "probe - problem (%d) requesting tx channel\n",
  1318. drv_data->tx_channel);
  1319. imx_dma_free(drv_data->rx_channel);
  1320. goto err_no_txdma;
  1321. } else
  1322. imx_dma_setup_handlers(drv_data->tx_channel,
  1323. dma_tx_handler, dma_err_handler,
  1324. drv_data);
  1325. /* Set request source and burst length for allocated channels */
  1326. switch (drv_data->pdev->id) {
  1327. case 1:
  1328. /* Using SPI1 */
  1329. RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
  1330. RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
  1331. break;
  1332. case 2:
  1333. /* Using SPI2 */
  1334. RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
  1335. RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
  1336. break;
  1337. default:
  1338. dev_err(dev, "probe - bad SPI Id\n");
  1339. imx_dma_free(drv_data->rx_channel);
  1340. imx_dma_free(drv_data->tx_channel);
  1341. status = -ENODEV;
  1342. goto err_no_devid;
  1343. }
  1344. BLR(drv_data->rx_channel) = SPI_DMA_BLR;
  1345. BLR(drv_data->tx_channel) = SPI_DMA_BLR;
  1346. }
  1347. /* Load default SPI configuration */
  1348. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1349. writel(0, drv_data->regs + SPI_RESET);
  1350. writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
  1351. /* Initial and start queue */
  1352. status = init_queue(drv_data);
  1353. if (status != 0) {
  1354. dev_err(&pdev->dev, "probe - problem initializing queue\n");
  1355. goto err_init_queue;
  1356. }
  1357. status = start_queue(drv_data);
  1358. if (status != 0) {
  1359. dev_err(&pdev->dev, "probe - problem starting queue\n");
  1360. goto err_start_queue;
  1361. }
  1362. /* Register with the SPI framework */
  1363. platform_set_drvdata(pdev, drv_data);
  1364. status = spi_register_master(master);
  1365. if (status != 0) {
  1366. dev_err(&pdev->dev, "probe - problem registering spi master\n");
  1367. goto err_spi_register;
  1368. }
  1369. dev_dbg(dev, "probe succeded\n");
  1370. return 0;
  1371. err_init_queue:
  1372. err_start_queue:
  1373. err_spi_register:
  1374. destroy_queue(drv_data);
  1375. err_no_rxdma:
  1376. err_no_txdma:
  1377. err_no_devid:
  1378. free_irq(irq, drv_data);
  1379. err_no_irqres:
  1380. iounmap(drv_data->regs);
  1381. err_no_iomap:
  1382. release_resource(drv_data->ioarea);
  1383. kfree(drv_data->ioarea);
  1384. err_no_iores:
  1385. clk_disable(drv_data->clk);
  1386. clk_put(drv_data->clk);
  1387. err_no_clk:
  1388. spi_master_put(master);
  1389. err_no_pdata:
  1390. err_no_mem:
  1391. return status;
  1392. }
  1393. static int __exit spi_imx_remove(struct platform_device *pdev)
  1394. {
  1395. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1396. int irq;
  1397. int status = 0;
  1398. if (!drv_data)
  1399. return 0;
  1400. tasklet_kill(&drv_data->pump_transfers);
  1401. /* Remove the queue */
  1402. status = destroy_queue(drv_data);
  1403. if (status != 0) {
  1404. dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
  1405. return status;
  1406. }
  1407. /* Reset SPI */
  1408. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1409. writel(0, drv_data->regs + SPI_RESET);
  1410. /* Release DMA */
  1411. if (drv_data->master_info->enable_dma) {
  1412. RSSR(drv_data->rx_channel) = 0;
  1413. RSSR(drv_data->tx_channel) = 0;
  1414. imx_dma_free(drv_data->tx_channel);
  1415. imx_dma_free(drv_data->rx_channel);
  1416. }
  1417. /* Release IRQ */
  1418. irq = platform_get_irq(pdev, 0);
  1419. if (irq >= 0)
  1420. free_irq(irq, drv_data);
  1421. clk_disable(drv_data->clk);
  1422. clk_put(drv_data->clk);
  1423. /* Release map resources */
  1424. iounmap(drv_data->regs);
  1425. release_resource(drv_data->ioarea);
  1426. kfree(drv_data->ioarea);
  1427. /* Disconnect from the SPI framework */
  1428. spi_unregister_master(drv_data->master);
  1429. spi_master_put(drv_data->master);
  1430. /* Prevent double remove */
  1431. platform_set_drvdata(pdev, NULL);
  1432. dev_dbg(&pdev->dev, "remove succeded\n");
  1433. return 0;
  1434. }
  1435. static void spi_imx_shutdown(struct platform_device *pdev)
  1436. {
  1437. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1438. /* Reset SPI */
  1439. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1440. writel(0, drv_data->regs + SPI_RESET);
  1441. dev_dbg(&pdev->dev, "shutdown succeded\n");
  1442. }
  1443. #ifdef CONFIG_PM
  1444. static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
  1445. {
  1446. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1447. int status = 0;
  1448. status = stop_queue(drv_data);
  1449. if (status != 0) {
  1450. dev_warn(&pdev->dev, "suspend cannot stop queue\n");
  1451. return status;
  1452. }
  1453. dev_dbg(&pdev->dev, "suspended\n");
  1454. return 0;
  1455. }
  1456. static int spi_imx_resume(struct platform_device *pdev)
  1457. {
  1458. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1459. int status = 0;
  1460. /* Start the queue running */
  1461. status = start_queue(drv_data);
  1462. if (status != 0)
  1463. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1464. else
  1465. dev_dbg(&pdev->dev, "resumed\n");
  1466. return status;
  1467. }
  1468. #else
  1469. #define spi_imx_suspend NULL
  1470. #define spi_imx_resume NULL
  1471. #endif /* CONFIG_PM */
  1472. /* work with hotplug and coldplug */
  1473. MODULE_ALIAS("platform:spi_imx");
  1474. static struct platform_driver driver = {
  1475. .driver = {
  1476. .name = "spi_imx",
  1477. .owner = THIS_MODULE,
  1478. },
  1479. .remove = __exit_p(spi_imx_remove),
  1480. .shutdown = spi_imx_shutdown,
  1481. .suspend = spi_imx_suspend,
  1482. .resume = spi_imx_resume,
  1483. };
  1484. static int __init spi_imx_init(void)
  1485. {
  1486. return platform_driver_probe(&driver, spi_imx_probe);
  1487. }
  1488. module_init(spi_imx_init);
  1489. static void __exit spi_imx_exit(void)
  1490. {
  1491. platform_driver_unregister(&driver);
  1492. }
  1493. module_exit(spi_imx_exit);
  1494. MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
  1495. MODULE_DESCRIPTION("iMX SPI Controller Driver");
  1496. MODULE_LICENSE("GPL");