spi_imx.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770
  1. /*
  2. * drivers/spi/spi_imx.c
  3. *
  4. * Copyright (C) 2006 SWAPP
  5. * Andrea Paterniani <a.paterniani@swapp-eng.it>
  6. *
  7. * Initial version inspired by:
  8. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/device.h>
  23. #include <linux/ioport.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/spi/spi.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/delay.h>
  31. #include <linux/clk.h>
  32. #include <asm/io.h>
  33. #include <asm/irq.h>
  34. #include <asm/delay.h>
  35. #include <mach/hardware.h>
  36. #include <mach/imx-dma.h>
  37. #include <mach/spi_imx.h>
  38. /*-------------------------------------------------------------------------*/
  39. /* SPI Registers offsets from peripheral base address */
  40. #define SPI_RXDATA (0x00)
  41. #define SPI_TXDATA (0x04)
  42. #define SPI_CONTROL (0x08)
  43. #define SPI_INT_STATUS (0x0C)
  44. #define SPI_TEST (0x10)
  45. #define SPI_PERIOD (0x14)
  46. #define SPI_DMA (0x18)
  47. #define SPI_RESET (0x1C)
  48. /* SPI Control Register Bit Fields & Masks */
  49. #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
  50. #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
  51. #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
  52. #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
  53. #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
  54. #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
  55. #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
  56. #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
  57. #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
  58. #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
  59. Slave: RXFIFO advanced by BIT_COUNT */
  60. #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
  61. Slave: RXFIFO advanced by /SS rising edge */
  62. #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
  63. #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
  64. #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
  65. #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
  66. #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
  67. #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
  68. #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
  69. #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
  70. #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
  71. #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
  72. #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
  73. #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
  74. #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
  75. #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
  76. #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
  77. #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
  78. #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
  79. #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
  80. /* SPI Interrupt/Status Register Bit Fields & Masks */
  81. #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
  82. #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
  83. #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
  84. #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
  85. #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
  86. #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
  87. #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
  88. #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
  89. #define SPI_STATUS (0xFF) /* SPI Status Mask */
  90. #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
  91. #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
  92. #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
  93. #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
  94. #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
  95. #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
  96. #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
  97. #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
  98. #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
  99. /* SPI Test Register Bit Fields & Masks */
  100. #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
  101. #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
  102. #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
  103. #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
  104. #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
  105. /* SPI Period Register Bit Fields & Masks */
  106. #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
  107. #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
  108. Transactions */
  109. #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
  110. #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
  111. Bit Clock */
  112. #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
  113. 32.768 KHz Clock */
  114. /* SPI DMA Register Bit Fields & Masks */
  115. #define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
  116. #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
  117. #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
  118. #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
  119. #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
  120. #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
  121. #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
  122. #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
  123. /* SPI Soft Reset Register Bit Fields & Masks */
  124. #define SPI_RESET_START (0x1) /* Start */
  125. /* Default SPI configuration values */
  126. #define SPI_DEFAULT_CONTROL \
  127. ( \
  128. SPI_CONTROL_BITCOUNT(16) | \
  129. SPI_CONTROL_POL_ACT_HIGH | \
  130. SPI_CONTROL_PHA_0 | \
  131. SPI_CONTROL_SPIEN | \
  132. SPI_CONTROL_SSCTL_1 | \
  133. SPI_CONTROL_MODE_MASTER | \
  134. SPI_CONTROL_DRCTL_0 | \
  135. SPI_CONTROL_DATARATE_MIN \
  136. )
  137. #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
  138. #define SPI_DEFAULT_ENABLE_DMA (0)
  139. #define SPI_DEFAULT_PERIOD_WAIT (8)
  140. /*-------------------------------------------------------------------------*/
  141. /*-------------------------------------------------------------------------*/
  142. /* TX/RX SPI FIFO size */
  143. #define SPI_FIFO_DEPTH (8)
  144. #define SPI_FIFO_BYTE_WIDTH (2)
  145. #define SPI_FIFO_OVERFLOW_MARGIN (2)
  146. /* DMA burst length for half full/empty request trigger */
  147. #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
  148. /* Dummy char output to achieve reads.
  149. Choosing something different from all zeroes may help pattern recogition
  150. for oscilloscope analysis, but may break some drivers. */
  151. #define SPI_DUMMY_u8 0
  152. #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
  153. #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
  154. /**
  155. * Macro to change a u32 field:
  156. * @r : register to edit
  157. * @m : bit mask
  158. * @v : new value for the field correctly bit-alligned
  159. */
  160. #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
  161. /* Message state */
  162. #define START_STATE ((void*)0)
  163. #define RUNNING_STATE ((void*)1)
  164. #define DONE_STATE ((void*)2)
  165. #define ERROR_STATE ((void*)-1)
  166. /* Queue state */
  167. #define QUEUE_RUNNING (0)
  168. #define QUEUE_STOPPED (1)
  169. #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
  170. #define DMA_ALIGNMENT 4
  171. /*-------------------------------------------------------------------------*/
  172. /*-------------------------------------------------------------------------*/
  173. /* Driver data structs */
  174. /* Context */
  175. struct driver_data {
  176. /* Driver model hookup */
  177. struct platform_device *pdev;
  178. /* SPI framework hookup */
  179. struct spi_master *master;
  180. /* IMX hookup */
  181. struct spi_imx_master *master_info;
  182. /* Memory resources and SPI regs virtual address */
  183. struct resource *ioarea;
  184. void __iomem *regs;
  185. /* SPI RX_DATA physical address */
  186. dma_addr_t rd_data_phys;
  187. /* Driver message queue */
  188. struct workqueue_struct *workqueue;
  189. struct work_struct work;
  190. spinlock_t lock;
  191. struct list_head queue;
  192. int busy;
  193. int run;
  194. /* Message Transfer pump */
  195. struct tasklet_struct pump_transfers;
  196. /* Current message, transfer and state */
  197. struct spi_message *cur_msg;
  198. struct spi_transfer *cur_transfer;
  199. struct chip_data *cur_chip;
  200. /* Rd / Wr buffers pointers */
  201. size_t len;
  202. void *tx;
  203. void *tx_end;
  204. void *rx;
  205. void *rx_end;
  206. u8 rd_only;
  207. u8 n_bytes;
  208. int cs_change;
  209. /* Function pointers */
  210. irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
  211. void (*cs_control)(u32 command);
  212. /* DMA setup */
  213. int rx_channel;
  214. int tx_channel;
  215. dma_addr_t rx_dma;
  216. dma_addr_t tx_dma;
  217. int rx_dma_needs_unmap;
  218. int tx_dma_needs_unmap;
  219. size_t tx_map_len;
  220. u32 dummy_dma_buf ____cacheline_aligned;
  221. struct clk *clk;
  222. };
  223. /* Runtime state */
  224. struct chip_data {
  225. u32 control;
  226. u32 period;
  227. u32 test;
  228. u8 enable_dma:1;
  229. u8 bits_per_word;
  230. u8 n_bytes;
  231. u32 max_speed_hz;
  232. void (*cs_control)(u32 command);
  233. };
  234. /*-------------------------------------------------------------------------*/
  235. static void pump_messages(struct work_struct *work);
  236. static void flush(struct driver_data *drv_data)
  237. {
  238. void __iomem *regs = drv_data->regs;
  239. u32 control;
  240. dev_dbg(&drv_data->pdev->dev, "flush\n");
  241. /* Wait for end of transaction */
  242. do {
  243. control = readl(regs + SPI_CONTROL);
  244. } while (control & SPI_CONTROL_XCH);
  245. /* Release chip select if requested, transfer delays are
  246. handled in pump_transfers */
  247. if (drv_data->cs_change)
  248. drv_data->cs_control(SPI_CS_DEASSERT);
  249. /* Disable SPI to flush FIFOs */
  250. writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
  251. writel(control, regs + SPI_CONTROL);
  252. }
  253. static void restore_state(struct driver_data *drv_data)
  254. {
  255. void __iomem *regs = drv_data->regs;
  256. struct chip_data *chip = drv_data->cur_chip;
  257. /* Load chip registers */
  258. dev_dbg(&drv_data->pdev->dev,
  259. "restore_state\n"
  260. " test = 0x%08X\n"
  261. " control = 0x%08X\n",
  262. chip->test,
  263. chip->control);
  264. writel(chip->test, regs + SPI_TEST);
  265. writel(chip->period, regs + SPI_PERIOD);
  266. writel(0, regs + SPI_INT_STATUS);
  267. writel(chip->control, regs + SPI_CONTROL);
  268. }
  269. static void null_cs_control(u32 command)
  270. {
  271. }
  272. static inline u32 data_to_write(struct driver_data *drv_data)
  273. {
  274. return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
  275. }
  276. static inline u32 data_to_read(struct driver_data *drv_data)
  277. {
  278. return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
  279. }
  280. static int write(struct driver_data *drv_data)
  281. {
  282. void __iomem *regs = drv_data->regs;
  283. void *tx = drv_data->tx;
  284. void *tx_end = drv_data->tx_end;
  285. u8 n_bytes = drv_data->n_bytes;
  286. u32 remaining_writes;
  287. u32 fifo_avail_space;
  288. u32 n;
  289. u16 d;
  290. /* Compute how many fifo writes to do */
  291. remaining_writes = (u32)(tx_end - tx) / n_bytes;
  292. fifo_avail_space = SPI_FIFO_DEPTH -
  293. (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
  294. if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
  295. /* Fix misunderstood receive overflow */
  296. fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
  297. n = min(remaining_writes, fifo_avail_space);
  298. dev_dbg(&drv_data->pdev->dev,
  299. "write type %s\n"
  300. " remaining writes = %d\n"
  301. " fifo avail space = %d\n"
  302. " fifo writes = %d\n",
  303. (n_bytes == 1) ? "u8" : "u16",
  304. remaining_writes,
  305. fifo_avail_space,
  306. n);
  307. if (n > 0) {
  308. /* Fill SPI TXFIFO */
  309. if (drv_data->rd_only) {
  310. tx += n * n_bytes;
  311. while (n--)
  312. writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
  313. } else {
  314. if (n_bytes == 1) {
  315. while (n--) {
  316. d = *(u8*)tx;
  317. writel(d, regs + SPI_TXDATA);
  318. tx += 1;
  319. }
  320. } else {
  321. while (n--) {
  322. d = *(u16*)tx;
  323. writel(d, regs + SPI_TXDATA);
  324. tx += 2;
  325. }
  326. }
  327. }
  328. /* Trigger transfer */
  329. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  330. regs + SPI_CONTROL);
  331. /* Update tx pointer */
  332. drv_data->tx = tx;
  333. }
  334. return (tx >= tx_end);
  335. }
  336. static int read(struct driver_data *drv_data)
  337. {
  338. void __iomem *regs = drv_data->regs;
  339. void *rx = drv_data->rx;
  340. void *rx_end = drv_data->rx_end;
  341. u8 n_bytes = drv_data->n_bytes;
  342. u32 remaining_reads;
  343. u32 fifo_rxcnt;
  344. u32 n;
  345. u16 d;
  346. /* Compute how many fifo reads to do */
  347. remaining_reads = (u32)(rx_end - rx) / n_bytes;
  348. fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
  349. SPI_TEST_RXCNT_LSB;
  350. n = min(remaining_reads, fifo_rxcnt);
  351. dev_dbg(&drv_data->pdev->dev,
  352. "read type %s\n"
  353. " remaining reads = %d\n"
  354. " fifo rx count = %d\n"
  355. " fifo reads = %d\n",
  356. (n_bytes == 1) ? "u8" : "u16",
  357. remaining_reads,
  358. fifo_rxcnt,
  359. n);
  360. if (n > 0) {
  361. /* Read SPI RXFIFO */
  362. if (n_bytes == 1) {
  363. while (n--) {
  364. d = readl(regs + SPI_RXDATA);
  365. *((u8*)rx) = d;
  366. rx += 1;
  367. }
  368. } else {
  369. while (n--) {
  370. d = readl(regs + SPI_RXDATA);
  371. *((u16*)rx) = d;
  372. rx += 2;
  373. }
  374. }
  375. /* Update rx pointer */
  376. drv_data->rx = rx;
  377. }
  378. return (rx >= rx_end);
  379. }
  380. static void *next_transfer(struct driver_data *drv_data)
  381. {
  382. struct spi_message *msg = drv_data->cur_msg;
  383. struct spi_transfer *trans = drv_data->cur_transfer;
  384. /* Move to next transfer */
  385. if (trans->transfer_list.next != &msg->transfers) {
  386. drv_data->cur_transfer =
  387. list_entry(trans->transfer_list.next,
  388. struct spi_transfer,
  389. transfer_list);
  390. return RUNNING_STATE;
  391. }
  392. return DONE_STATE;
  393. }
  394. static int map_dma_buffers(struct driver_data *drv_data)
  395. {
  396. struct spi_message *msg;
  397. struct device *dev;
  398. void *buf;
  399. drv_data->rx_dma_needs_unmap = 0;
  400. drv_data->tx_dma_needs_unmap = 0;
  401. if (!drv_data->master_info->enable_dma ||
  402. !drv_data->cur_chip->enable_dma)
  403. return -1;
  404. msg = drv_data->cur_msg;
  405. dev = &msg->spi->dev;
  406. if (msg->is_dma_mapped) {
  407. if (drv_data->tx_dma)
  408. /* The caller provided at least dma and cpu virtual
  409. address for write; pump_transfers() will consider the
  410. transfer as write only if cpu rx virtual address is
  411. NULL */
  412. return 0;
  413. if (drv_data->rx_dma) {
  414. /* The caller provided dma and cpu virtual address to
  415. performe read only transfer -->
  416. use drv_data->dummy_dma_buf for dummy writes to
  417. achive reads */
  418. buf = &drv_data->dummy_dma_buf;
  419. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  420. drv_data->tx_dma = dma_map_single(dev,
  421. buf,
  422. drv_data->tx_map_len,
  423. DMA_TO_DEVICE);
  424. if (dma_mapping_error(dev, drv_data->tx_dma))
  425. return -1;
  426. drv_data->tx_dma_needs_unmap = 1;
  427. /* Flags transfer as rd_only for pump_transfers() DMA
  428. regs programming (should be redundant) */
  429. drv_data->tx = NULL;
  430. return 0;
  431. }
  432. }
  433. if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
  434. return -1;
  435. if (drv_data->tx == NULL) {
  436. /* Read only message --> use drv_data->dummy_dma_buf for dummy
  437. writes to achive reads */
  438. buf = &drv_data->dummy_dma_buf;
  439. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  440. } else {
  441. buf = drv_data->tx;
  442. drv_data->tx_map_len = drv_data->len;
  443. }
  444. drv_data->tx_dma = dma_map_single(dev,
  445. buf,
  446. drv_data->tx_map_len,
  447. DMA_TO_DEVICE);
  448. if (dma_mapping_error(dev, drv_data->tx_dma))
  449. return -1;
  450. drv_data->tx_dma_needs_unmap = 1;
  451. /* NULL rx means write-only transfer and no map needed
  452. * since rx DMA will not be used */
  453. if (drv_data->rx) {
  454. buf = drv_data->rx;
  455. drv_data->rx_dma = dma_map_single(dev,
  456. buf,
  457. drv_data->len,
  458. DMA_FROM_DEVICE);
  459. if (dma_mapping_error(dev, drv_data->rx_dma)) {
  460. if (drv_data->tx_dma) {
  461. dma_unmap_single(dev,
  462. drv_data->tx_dma,
  463. drv_data->tx_map_len,
  464. DMA_TO_DEVICE);
  465. drv_data->tx_dma_needs_unmap = 0;
  466. }
  467. return -1;
  468. }
  469. drv_data->rx_dma_needs_unmap = 1;
  470. }
  471. return 0;
  472. }
  473. static void unmap_dma_buffers(struct driver_data *drv_data)
  474. {
  475. struct spi_message *msg = drv_data->cur_msg;
  476. struct device *dev = &msg->spi->dev;
  477. if (drv_data->rx_dma_needs_unmap) {
  478. dma_unmap_single(dev,
  479. drv_data->rx_dma,
  480. drv_data->len,
  481. DMA_FROM_DEVICE);
  482. drv_data->rx_dma_needs_unmap = 0;
  483. }
  484. if (drv_data->tx_dma_needs_unmap) {
  485. dma_unmap_single(dev,
  486. drv_data->tx_dma,
  487. drv_data->tx_map_len,
  488. DMA_TO_DEVICE);
  489. drv_data->tx_dma_needs_unmap = 0;
  490. }
  491. }
  492. /* Caller already set message->status (dma is already blocked) */
  493. static void giveback(struct spi_message *message, struct driver_data *drv_data)
  494. {
  495. void __iomem *regs = drv_data->regs;
  496. /* Bring SPI to sleep; restore_state() and pump_transfer()
  497. will do new setup */
  498. writel(0, regs + SPI_INT_STATUS);
  499. writel(0, regs + SPI_DMA);
  500. /* Unconditioned deselct */
  501. drv_data->cs_control(SPI_CS_DEASSERT);
  502. message->state = NULL;
  503. if (message->complete)
  504. message->complete(message->context);
  505. drv_data->cur_msg = NULL;
  506. drv_data->cur_transfer = NULL;
  507. drv_data->cur_chip = NULL;
  508. queue_work(drv_data->workqueue, &drv_data->work);
  509. }
  510. static void dma_err_handler(int channel, void *data, int errcode)
  511. {
  512. struct driver_data *drv_data = data;
  513. struct spi_message *msg = drv_data->cur_msg;
  514. dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
  515. /* Disable both rx and tx dma channels */
  516. imx_dma_disable(drv_data->rx_channel);
  517. imx_dma_disable(drv_data->tx_channel);
  518. unmap_dma_buffers(drv_data);
  519. flush(drv_data);
  520. msg->state = ERROR_STATE;
  521. tasklet_schedule(&drv_data->pump_transfers);
  522. }
  523. static void dma_tx_handler(int channel, void *data)
  524. {
  525. struct driver_data *drv_data = data;
  526. dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
  527. imx_dma_disable(channel);
  528. /* Now waits for TX FIFO empty */
  529. writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
  530. }
  531. static irqreturn_t dma_transfer(struct driver_data *drv_data)
  532. {
  533. u32 status;
  534. struct spi_message *msg = drv_data->cur_msg;
  535. void __iomem *regs = drv_data->regs;
  536. status = readl(regs + SPI_INT_STATUS);
  537. if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
  538. == (SPI_INTEN_RO | SPI_STATUS_RO)) {
  539. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  540. imx_dma_disable(drv_data->tx_channel);
  541. imx_dma_disable(drv_data->rx_channel);
  542. unmap_dma_buffers(drv_data);
  543. flush(drv_data);
  544. dev_warn(&drv_data->pdev->dev,
  545. "dma_transfer - fifo overun\n");
  546. msg->state = ERROR_STATE;
  547. tasklet_schedule(&drv_data->pump_transfers);
  548. return IRQ_HANDLED;
  549. }
  550. if (status & SPI_STATUS_TE) {
  551. writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
  552. if (drv_data->rx) {
  553. /* Wait end of transfer before read trailing data */
  554. while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
  555. cpu_relax();
  556. imx_dma_disable(drv_data->rx_channel);
  557. unmap_dma_buffers(drv_data);
  558. /* Release chip select if requested, transfer delays are
  559. handled in pump_transfers() */
  560. if (drv_data->cs_change)
  561. drv_data->cs_control(SPI_CS_DEASSERT);
  562. /* Calculate number of trailing data and read them */
  563. dev_dbg(&drv_data->pdev->dev,
  564. "dma_transfer - test = 0x%08X\n",
  565. readl(regs + SPI_TEST));
  566. drv_data->rx = drv_data->rx_end -
  567. ((readl(regs + SPI_TEST) &
  568. SPI_TEST_RXCNT) >>
  569. SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
  570. read(drv_data);
  571. } else {
  572. /* Write only transfer */
  573. unmap_dma_buffers(drv_data);
  574. flush(drv_data);
  575. }
  576. /* End of transfer, update total byte transfered */
  577. msg->actual_length += drv_data->len;
  578. /* Move to next transfer */
  579. msg->state = next_transfer(drv_data);
  580. /* Schedule transfer tasklet */
  581. tasklet_schedule(&drv_data->pump_transfers);
  582. return IRQ_HANDLED;
  583. }
  584. /* Opps problem detected */
  585. return IRQ_NONE;
  586. }
  587. static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
  588. {
  589. struct spi_message *msg = drv_data->cur_msg;
  590. void __iomem *regs = drv_data->regs;
  591. u32 status;
  592. irqreturn_t handled = IRQ_NONE;
  593. status = readl(regs + SPI_INT_STATUS);
  594. if (status & SPI_INTEN_TE) {
  595. /* TXFIFO Empty Interrupt on the last transfered word */
  596. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  597. dev_dbg(&drv_data->pdev->dev,
  598. "interrupt_wronly_transfer - end of tx\n");
  599. flush(drv_data);
  600. /* Update total byte transfered */
  601. msg->actual_length += drv_data->len;
  602. /* Move to next transfer */
  603. msg->state = next_transfer(drv_data);
  604. /* Schedule transfer tasklet */
  605. tasklet_schedule(&drv_data->pump_transfers);
  606. return IRQ_HANDLED;
  607. } else {
  608. while (status & SPI_STATUS_TH) {
  609. dev_dbg(&drv_data->pdev->dev,
  610. "interrupt_wronly_transfer - status = 0x%08X\n",
  611. status);
  612. /* Pump data */
  613. if (write(drv_data)) {
  614. /* End of TXFIFO writes,
  615. now wait until TXFIFO is empty */
  616. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  617. return IRQ_HANDLED;
  618. }
  619. status = readl(regs + SPI_INT_STATUS);
  620. /* We did something */
  621. handled = IRQ_HANDLED;
  622. }
  623. }
  624. return handled;
  625. }
  626. static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
  627. {
  628. struct spi_message *msg = drv_data->cur_msg;
  629. void __iomem *regs = drv_data->regs;
  630. u32 status, control;
  631. irqreturn_t handled = IRQ_NONE;
  632. unsigned long limit;
  633. status = readl(regs + SPI_INT_STATUS);
  634. if (status & SPI_INTEN_TE) {
  635. /* TXFIFO Empty Interrupt on the last transfered word */
  636. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  637. dev_dbg(&drv_data->pdev->dev,
  638. "interrupt_transfer - end of tx\n");
  639. if (msg->state == ERROR_STATE) {
  640. /* RXFIFO overrun was detected and message aborted */
  641. flush(drv_data);
  642. } else {
  643. /* Wait for end of transaction */
  644. do {
  645. control = readl(regs + SPI_CONTROL);
  646. } while (control & SPI_CONTROL_XCH);
  647. /* Release chip select if requested, transfer delays are
  648. handled in pump_transfers */
  649. if (drv_data->cs_change)
  650. drv_data->cs_control(SPI_CS_DEASSERT);
  651. /* Read trailing bytes */
  652. limit = loops_per_jiffy << 1;
  653. while ((read(drv_data) == 0) && --limit)
  654. cpu_relax();
  655. if (limit == 0)
  656. dev_err(&drv_data->pdev->dev,
  657. "interrupt_transfer - "
  658. "trailing byte read failed\n");
  659. else
  660. dev_dbg(&drv_data->pdev->dev,
  661. "interrupt_transfer - end of rx\n");
  662. /* Update total byte transfered */
  663. msg->actual_length += drv_data->len;
  664. /* Move to next transfer */
  665. msg->state = next_transfer(drv_data);
  666. }
  667. /* Schedule transfer tasklet */
  668. tasklet_schedule(&drv_data->pump_transfers);
  669. return IRQ_HANDLED;
  670. } else {
  671. while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
  672. dev_dbg(&drv_data->pdev->dev,
  673. "interrupt_transfer - status = 0x%08X\n",
  674. status);
  675. if (status & SPI_STATUS_RO) {
  676. /* RXFIFO overrun, abort message end wait
  677. until TXFIFO is empty */
  678. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  679. dev_warn(&drv_data->pdev->dev,
  680. "interrupt_transfer - fifo overun\n"
  681. " data not yet written = %d\n"
  682. " data not yet read = %d\n",
  683. data_to_write(drv_data),
  684. data_to_read(drv_data));
  685. msg->state = ERROR_STATE;
  686. return IRQ_HANDLED;
  687. }
  688. /* Pump data */
  689. read(drv_data);
  690. if (write(drv_data)) {
  691. /* End of TXFIFO writes,
  692. now wait until TXFIFO is empty */
  693. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  694. return IRQ_HANDLED;
  695. }
  696. status = readl(regs + SPI_INT_STATUS);
  697. /* We did something */
  698. handled = IRQ_HANDLED;
  699. }
  700. }
  701. return handled;
  702. }
  703. static irqreturn_t spi_int(int irq, void *dev_id)
  704. {
  705. struct driver_data *drv_data = (struct driver_data *)dev_id;
  706. if (!drv_data->cur_msg) {
  707. dev_err(&drv_data->pdev->dev,
  708. "spi_int - bad message state\n");
  709. /* Never fail */
  710. return IRQ_HANDLED;
  711. }
  712. return drv_data->transfer_handler(drv_data);
  713. }
  714. static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
  715. {
  716. return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
  717. }
  718. static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
  719. {
  720. u32 div;
  721. u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
  722. for (div = SPI_PERCLK2_DIV_MIN;
  723. div <= SPI_PERCLK2_DIV_MAX;
  724. div++, quantized_hz >>= 1) {
  725. if (quantized_hz <= speed_hz)
  726. /* Max available speed LEQ required speed */
  727. return div << 13;
  728. }
  729. return SPI_CONTROL_DATARATE_BAD;
  730. }
  731. static void pump_transfers(unsigned long data)
  732. {
  733. struct driver_data *drv_data = (struct driver_data *)data;
  734. struct spi_message *message;
  735. struct spi_transfer *transfer, *previous;
  736. struct chip_data *chip;
  737. void __iomem *regs;
  738. u32 tmp, control;
  739. dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
  740. message = drv_data->cur_msg;
  741. /* Handle for abort */
  742. if (message->state == ERROR_STATE) {
  743. message->status = -EIO;
  744. giveback(message, drv_data);
  745. return;
  746. }
  747. /* Handle end of message */
  748. if (message->state == DONE_STATE) {
  749. message->status = 0;
  750. giveback(message, drv_data);
  751. return;
  752. }
  753. chip = drv_data->cur_chip;
  754. /* Delay if requested at end of transfer*/
  755. transfer = drv_data->cur_transfer;
  756. if (message->state == RUNNING_STATE) {
  757. previous = list_entry(transfer->transfer_list.prev,
  758. struct spi_transfer,
  759. transfer_list);
  760. if (previous->delay_usecs)
  761. udelay(previous->delay_usecs);
  762. } else {
  763. /* START_STATE */
  764. message->state = RUNNING_STATE;
  765. drv_data->cs_control = chip->cs_control;
  766. }
  767. transfer = drv_data->cur_transfer;
  768. drv_data->tx = (void *)transfer->tx_buf;
  769. drv_data->tx_end = drv_data->tx + transfer->len;
  770. drv_data->rx = transfer->rx_buf;
  771. drv_data->rx_end = drv_data->rx + transfer->len;
  772. drv_data->rx_dma = transfer->rx_dma;
  773. drv_data->tx_dma = transfer->tx_dma;
  774. drv_data->len = transfer->len;
  775. drv_data->cs_change = transfer->cs_change;
  776. drv_data->rd_only = (drv_data->tx == NULL);
  777. regs = drv_data->regs;
  778. control = readl(regs + SPI_CONTROL);
  779. /* Bits per word setup */
  780. tmp = transfer->bits_per_word;
  781. if (tmp == 0) {
  782. /* Use device setup */
  783. tmp = chip->bits_per_word;
  784. drv_data->n_bytes = chip->n_bytes;
  785. } else
  786. /* Use per-transfer setup */
  787. drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
  788. u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  789. /* Speed setup (surely valid because already checked) */
  790. tmp = transfer->speed_hz;
  791. if (tmp == 0)
  792. tmp = chip->max_speed_hz;
  793. tmp = spi_data_rate(drv_data, tmp);
  794. u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
  795. writel(control, regs + SPI_CONTROL);
  796. /* Assert device chip-select */
  797. drv_data->cs_control(SPI_CS_ASSERT);
  798. /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
  799. if bits_per_word is less or equal 8 PIO transfers are performed.
  800. Moreover DMA is convinient for transfer length bigger than FIFOs
  801. byte size. */
  802. if ((drv_data->n_bytes == 2) &&
  803. (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
  804. (map_dma_buffers(drv_data) == 0)) {
  805. dev_dbg(&drv_data->pdev->dev,
  806. "pump dma transfer\n"
  807. " tx = %p\n"
  808. " tx_dma = %08X\n"
  809. " rx = %p\n"
  810. " rx_dma = %08X\n"
  811. " len = %d\n",
  812. drv_data->tx,
  813. (unsigned int)drv_data->tx_dma,
  814. drv_data->rx,
  815. (unsigned int)drv_data->rx_dma,
  816. drv_data->len);
  817. /* Ensure we have the correct interrupt handler */
  818. drv_data->transfer_handler = dma_transfer;
  819. /* Trigger transfer */
  820. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  821. regs + SPI_CONTROL);
  822. /* Setup tx DMA */
  823. if (drv_data->tx)
  824. /* Linear source address */
  825. CCR(drv_data->tx_channel) =
  826. CCR_DMOD_FIFO |
  827. CCR_SMOD_LINEAR |
  828. CCR_SSIZ_32 | CCR_DSIZ_16 |
  829. CCR_REN;
  830. else
  831. /* Read only transfer -> fixed source address for
  832. dummy write to achive read */
  833. CCR(drv_data->tx_channel) =
  834. CCR_DMOD_FIFO |
  835. CCR_SMOD_FIFO |
  836. CCR_SSIZ_32 | CCR_DSIZ_16 |
  837. CCR_REN;
  838. imx_dma_setup_single(
  839. drv_data->tx_channel,
  840. drv_data->tx_dma,
  841. drv_data->len,
  842. drv_data->rd_data_phys + 4,
  843. DMA_MODE_WRITE);
  844. if (drv_data->rx) {
  845. /* Setup rx DMA for linear destination address */
  846. CCR(drv_data->rx_channel) =
  847. CCR_DMOD_LINEAR |
  848. CCR_SMOD_FIFO |
  849. CCR_DSIZ_32 | CCR_SSIZ_16 |
  850. CCR_REN;
  851. imx_dma_setup_single(
  852. drv_data->rx_channel,
  853. drv_data->rx_dma,
  854. drv_data->len,
  855. drv_data->rd_data_phys,
  856. DMA_MODE_READ);
  857. imx_dma_enable(drv_data->rx_channel);
  858. /* Enable SPI interrupt */
  859. writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
  860. /* Set SPI to request DMA service on both
  861. Rx and Tx half fifo watermark */
  862. writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
  863. } else
  864. /* Write only access -> set SPI to request DMA
  865. service on Tx half fifo watermark */
  866. writel(SPI_DMA_THDEN, regs + SPI_DMA);
  867. imx_dma_enable(drv_data->tx_channel);
  868. } else {
  869. dev_dbg(&drv_data->pdev->dev,
  870. "pump pio transfer\n"
  871. " tx = %p\n"
  872. " rx = %p\n"
  873. " len = %d\n",
  874. drv_data->tx,
  875. drv_data->rx,
  876. drv_data->len);
  877. /* Ensure we have the correct interrupt handler */
  878. if (drv_data->rx)
  879. drv_data->transfer_handler = interrupt_transfer;
  880. else
  881. drv_data->transfer_handler = interrupt_wronly_transfer;
  882. /* Enable SPI interrupt */
  883. if (drv_data->rx)
  884. writel(SPI_INTEN_TH | SPI_INTEN_RO,
  885. regs + SPI_INT_STATUS);
  886. else
  887. writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
  888. }
  889. }
  890. static void pump_messages(struct work_struct *work)
  891. {
  892. struct driver_data *drv_data =
  893. container_of(work, struct driver_data, work);
  894. unsigned long flags;
  895. /* Lock queue and check for queue work */
  896. spin_lock_irqsave(&drv_data->lock, flags);
  897. if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
  898. drv_data->busy = 0;
  899. spin_unlock_irqrestore(&drv_data->lock, flags);
  900. return;
  901. }
  902. /* Make sure we are not already running a message */
  903. if (drv_data->cur_msg) {
  904. spin_unlock_irqrestore(&drv_data->lock, flags);
  905. return;
  906. }
  907. /* Extract head of queue */
  908. drv_data->cur_msg = list_entry(drv_data->queue.next,
  909. struct spi_message, queue);
  910. list_del_init(&drv_data->cur_msg->queue);
  911. drv_data->busy = 1;
  912. spin_unlock_irqrestore(&drv_data->lock, flags);
  913. /* Initial message state */
  914. drv_data->cur_msg->state = START_STATE;
  915. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  916. struct spi_transfer,
  917. transfer_list);
  918. /* Setup the SPI using the per chip configuration */
  919. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  920. restore_state(drv_data);
  921. /* Mark as busy and launch transfers */
  922. tasklet_schedule(&drv_data->pump_transfers);
  923. }
  924. static int transfer(struct spi_device *spi, struct spi_message *msg)
  925. {
  926. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  927. u32 min_speed_hz, max_speed_hz, tmp;
  928. struct spi_transfer *trans;
  929. unsigned long flags;
  930. msg->actual_length = 0;
  931. /* Per transfer setup check */
  932. min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
  933. max_speed_hz = spi->max_speed_hz;
  934. list_for_each_entry(trans, &msg->transfers, transfer_list) {
  935. tmp = trans->bits_per_word;
  936. if (tmp > 16) {
  937. dev_err(&drv_data->pdev->dev,
  938. "message rejected : "
  939. "invalid transfer bits_per_word (%d bits)\n",
  940. tmp);
  941. goto msg_rejected;
  942. }
  943. tmp = trans->speed_hz;
  944. if (tmp) {
  945. if (tmp < min_speed_hz) {
  946. dev_err(&drv_data->pdev->dev,
  947. "message rejected : "
  948. "device min speed (%d Hz) exceeds "
  949. "required transfer speed (%d Hz)\n",
  950. min_speed_hz,
  951. tmp);
  952. goto msg_rejected;
  953. } else if (tmp > max_speed_hz) {
  954. dev_err(&drv_data->pdev->dev,
  955. "message rejected : "
  956. "transfer speed (%d Hz) exceeds "
  957. "device max speed (%d Hz)\n",
  958. tmp,
  959. max_speed_hz);
  960. goto msg_rejected;
  961. }
  962. }
  963. }
  964. /* Message accepted */
  965. msg->status = -EINPROGRESS;
  966. msg->state = START_STATE;
  967. spin_lock_irqsave(&drv_data->lock, flags);
  968. if (drv_data->run == QUEUE_STOPPED) {
  969. spin_unlock_irqrestore(&drv_data->lock, flags);
  970. return -ESHUTDOWN;
  971. }
  972. list_add_tail(&msg->queue, &drv_data->queue);
  973. if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
  974. queue_work(drv_data->workqueue, &drv_data->work);
  975. spin_unlock_irqrestore(&drv_data->lock, flags);
  976. return 0;
  977. msg_rejected:
  978. /* Message rejected and not queued */
  979. msg->status = -EINVAL;
  980. msg->state = ERROR_STATE;
  981. if (msg->complete)
  982. msg->complete(msg->context);
  983. return -EINVAL;
  984. }
  985. /* On first setup bad values must free chip_data memory since will cause
  986. spi_new_device to fail. Bad value setup from protocol driver are simply not
  987. applied and notified to the calling driver. */
  988. static int setup(struct spi_device *spi)
  989. {
  990. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  991. struct spi_imx_chip *chip_info;
  992. struct chip_data *chip;
  993. int first_setup = 0;
  994. u32 tmp;
  995. int status = 0;
  996. /* Get controller data */
  997. chip_info = spi->controller_data;
  998. /* Get controller_state */
  999. chip = spi_get_ctldata(spi);
  1000. if (chip == NULL) {
  1001. first_setup = 1;
  1002. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1003. if (!chip) {
  1004. dev_err(&spi->dev,
  1005. "setup - cannot allocate controller state\n");
  1006. return -ENOMEM;
  1007. }
  1008. chip->control = SPI_DEFAULT_CONTROL;
  1009. if (chip_info == NULL) {
  1010. /* spi_board_info.controller_data not is supplied */
  1011. chip_info = kzalloc(sizeof(struct spi_imx_chip),
  1012. GFP_KERNEL);
  1013. if (!chip_info) {
  1014. dev_err(&spi->dev,
  1015. "setup - "
  1016. "cannot allocate controller data\n");
  1017. status = -ENOMEM;
  1018. goto err_first_setup;
  1019. }
  1020. /* Set controller data default value */
  1021. chip_info->enable_loopback =
  1022. SPI_DEFAULT_ENABLE_LOOPBACK;
  1023. chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
  1024. chip_info->ins_ss_pulse = 1;
  1025. chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
  1026. chip_info->cs_control = null_cs_control;
  1027. }
  1028. }
  1029. /* Now set controller state based on controller data */
  1030. if (first_setup) {
  1031. /* SPI loopback */
  1032. if (chip_info->enable_loopback)
  1033. chip->test = SPI_TEST_LBC;
  1034. else
  1035. chip->test = 0;
  1036. /* SPI dma driven */
  1037. chip->enable_dma = chip_info->enable_dma;
  1038. /* SPI /SS pulse between spi burst */
  1039. if (chip_info->ins_ss_pulse)
  1040. u32_EDIT(chip->control,
  1041. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
  1042. else
  1043. u32_EDIT(chip->control,
  1044. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
  1045. /* SPI bclk waits between each bits_per_word spi burst */
  1046. if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
  1047. dev_err(&spi->dev,
  1048. "setup - "
  1049. "bclk_wait exceeds max allowed (%d)\n",
  1050. SPI_PERIOD_MAX_WAIT);
  1051. goto err_first_setup;
  1052. }
  1053. chip->period = SPI_PERIOD_CSRC_BCLK |
  1054. (chip_info->bclk_wait & SPI_PERIOD_WAIT);
  1055. }
  1056. /* SPI mode */
  1057. tmp = spi->mode;
  1058. if (tmp & SPI_CS_HIGH) {
  1059. u32_EDIT(chip->control,
  1060. SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
  1061. }
  1062. switch (tmp & SPI_MODE_3) {
  1063. case SPI_MODE_0:
  1064. tmp = 0;
  1065. break;
  1066. case SPI_MODE_1:
  1067. tmp = SPI_CONTROL_PHA_1;
  1068. break;
  1069. case SPI_MODE_2:
  1070. tmp = SPI_CONTROL_POL_ACT_LOW;
  1071. break;
  1072. default:
  1073. /* SPI_MODE_3 */
  1074. tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
  1075. break;
  1076. }
  1077. u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
  1078. /* SPI word width */
  1079. tmp = spi->bits_per_word;
  1080. if (tmp > 16) {
  1081. status = -EINVAL;
  1082. dev_err(&spi->dev,
  1083. "setup - "
  1084. "invalid bits_per_word (%d)\n",
  1085. tmp);
  1086. if (first_setup)
  1087. goto err_first_setup;
  1088. else {
  1089. /* Undo setup using chip as backup copy */
  1090. tmp = chip->bits_per_word;
  1091. spi->bits_per_word = tmp;
  1092. }
  1093. }
  1094. chip->bits_per_word = tmp;
  1095. u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  1096. chip->n_bytes = (tmp <= 8) ? 1 : 2;
  1097. /* SPI datarate */
  1098. tmp = spi_data_rate(drv_data, spi->max_speed_hz);
  1099. if (tmp == SPI_CONTROL_DATARATE_BAD) {
  1100. status = -EINVAL;
  1101. dev_err(&spi->dev,
  1102. "setup - "
  1103. "HW min speed (%d Hz) exceeds required "
  1104. "max speed (%d Hz)\n",
  1105. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1106. spi->max_speed_hz);
  1107. if (first_setup)
  1108. goto err_first_setup;
  1109. else
  1110. /* Undo setup using chip as backup copy */
  1111. spi->max_speed_hz = chip->max_speed_hz;
  1112. } else {
  1113. u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
  1114. /* Actual rounded max_speed_hz */
  1115. tmp = spi_speed_hz(drv_data, tmp);
  1116. spi->max_speed_hz = tmp;
  1117. chip->max_speed_hz = tmp;
  1118. }
  1119. /* SPI chip-select management */
  1120. if (chip_info->cs_control)
  1121. chip->cs_control = chip_info->cs_control;
  1122. else
  1123. chip->cs_control = null_cs_control;
  1124. /* Save controller_state */
  1125. spi_set_ctldata(spi, chip);
  1126. /* Summary */
  1127. dev_dbg(&spi->dev,
  1128. "setup succeded\n"
  1129. " loopback enable = %s\n"
  1130. " dma enable = %s\n"
  1131. " insert /ss pulse = %s\n"
  1132. " period wait = %d\n"
  1133. " mode = %d\n"
  1134. " bits per word = %d\n"
  1135. " min speed = %d Hz\n"
  1136. " rounded max speed = %d Hz\n",
  1137. chip->test & SPI_TEST_LBC ? "Yes" : "No",
  1138. chip->enable_dma ? "Yes" : "No",
  1139. chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
  1140. chip->period & SPI_PERIOD_WAIT,
  1141. spi->mode,
  1142. spi->bits_per_word,
  1143. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1144. spi->max_speed_hz);
  1145. return status;
  1146. err_first_setup:
  1147. kfree(chip);
  1148. return status;
  1149. }
  1150. static void cleanup(struct spi_device *spi)
  1151. {
  1152. kfree(spi_get_ctldata(spi));
  1153. }
  1154. static int __init init_queue(struct driver_data *drv_data)
  1155. {
  1156. INIT_LIST_HEAD(&drv_data->queue);
  1157. spin_lock_init(&drv_data->lock);
  1158. drv_data->run = QUEUE_STOPPED;
  1159. drv_data->busy = 0;
  1160. tasklet_init(&drv_data->pump_transfers,
  1161. pump_transfers, (unsigned long)drv_data);
  1162. INIT_WORK(&drv_data->work, pump_messages);
  1163. drv_data->workqueue = create_singlethread_workqueue(
  1164. dev_name(drv_data->master->dev.parent));
  1165. if (drv_data->workqueue == NULL)
  1166. return -EBUSY;
  1167. return 0;
  1168. }
  1169. static int start_queue(struct driver_data *drv_data)
  1170. {
  1171. unsigned long flags;
  1172. spin_lock_irqsave(&drv_data->lock, flags);
  1173. if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
  1174. spin_unlock_irqrestore(&drv_data->lock, flags);
  1175. return -EBUSY;
  1176. }
  1177. drv_data->run = QUEUE_RUNNING;
  1178. drv_data->cur_msg = NULL;
  1179. drv_data->cur_transfer = NULL;
  1180. drv_data->cur_chip = NULL;
  1181. spin_unlock_irqrestore(&drv_data->lock, flags);
  1182. queue_work(drv_data->workqueue, &drv_data->work);
  1183. return 0;
  1184. }
  1185. static int stop_queue(struct driver_data *drv_data)
  1186. {
  1187. unsigned long flags;
  1188. unsigned limit = 500;
  1189. int status = 0;
  1190. spin_lock_irqsave(&drv_data->lock, flags);
  1191. /* This is a bit lame, but is optimized for the common execution path.
  1192. * A wait_queue on the drv_data->busy could be used, but then the common
  1193. * execution path (pump_messages) would be required to call wake_up or
  1194. * friends on every SPI message. Do this instead */
  1195. drv_data->run = QUEUE_STOPPED;
  1196. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  1197. spin_unlock_irqrestore(&drv_data->lock, flags);
  1198. msleep(10);
  1199. spin_lock_irqsave(&drv_data->lock, flags);
  1200. }
  1201. if (!list_empty(&drv_data->queue) || drv_data->busy)
  1202. status = -EBUSY;
  1203. spin_unlock_irqrestore(&drv_data->lock, flags);
  1204. return status;
  1205. }
  1206. static int destroy_queue(struct driver_data *drv_data)
  1207. {
  1208. int status;
  1209. status = stop_queue(drv_data);
  1210. if (status != 0)
  1211. return status;
  1212. if (drv_data->workqueue)
  1213. destroy_workqueue(drv_data->workqueue);
  1214. return 0;
  1215. }
  1216. static int __init spi_imx_probe(struct platform_device *pdev)
  1217. {
  1218. struct device *dev = &pdev->dev;
  1219. struct spi_imx_master *platform_info;
  1220. struct spi_master *master;
  1221. struct driver_data *drv_data;
  1222. struct resource *res;
  1223. int irq, status = 0;
  1224. platform_info = dev->platform_data;
  1225. if (platform_info == NULL) {
  1226. dev_err(&pdev->dev, "probe - no platform data supplied\n");
  1227. status = -ENODEV;
  1228. goto err_no_pdata;
  1229. }
  1230. /* Allocate master with space for drv_data */
  1231. master = spi_alloc_master(dev, sizeof(struct driver_data));
  1232. if (!master) {
  1233. dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
  1234. status = -ENOMEM;
  1235. goto err_no_mem;
  1236. }
  1237. drv_data = spi_master_get_devdata(master);
  1238. drv_data->master = master;
  1239. drv_data->master_info = platform_info;
  1240. drv_data->pdev = pdev;
  1241. /* the spi->mode bits understood by this driver: */
  1242. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  1243. master->bus_num = pdev->id;
  1244. master->num_chipselect = platform_info->num_chipselect;
  1245. master->dma_alignment = DMA_ALIGNMENT;
  1246. master->cleanup = cleanup;
  1247. master->setup = setup;
  1248. master->transfer = transfer;
  1249. drv_data->dummy_dma_buf = SPI_DUMMY_u32;
  1250. drv_data->clk = clk_get(&pdev->dev, "perclk2");
  1251. if (IS_ERR(drv_data->clk)) {
  1252. dev_err(&pdev->dev, "probe - cannot get clock\n");
  1253. status = PTR_ERR(drv_data->clk);
  1254. goto err_no_clk;
  1255. }
  1256. clk_enable(drv_data->clk);
  1257. /* Find and map resources */
  1258. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1259. if (!res) {
  1260. dev_err(&pdev->dev, "probe - MEM resources not defined\n");
  1261. status = -ENODEV;
  1262. goto err_no_iores;
  1263. }
  1264. drv_data->ioarea = request_mem_region(res->start,
  1265. res->end - res->start + 1,
  1266. pdev->name);
  1267. if (drv_data->ioarea == NULL) {
  1268. dev_err(&pdev->dev, "probe - cannot reserve region\n");
  1269. status = -ENXIO;
  1270. goto err_no_iores;
  1271. }
  1272. drv_data->regs = ioremap(res->start, res->end - res->start + 1);
  1273. if (drv_data->regs == NULL) {
  1274. dev_err(&pdev->dev, "probe - cannot map IO\n");
  1275. status = -ENXIO;
  1276. goto err_no_iomap;
  1277. }
  1278. drv_data->rd_data_phys = (dma_addr_t)res->start;
  1279. /* Attach to IRQ */
  1280. irq = platform_get_irq(pdev, 0);
  1281. if (irq < 0) {
  1282. dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
  1283. status = -ENODEV;
  1284. goto err_no_irqres;
  1285. }
  1286. status = request_irq(irq, spi_int, IRQF_DISABLED,
  1287. dev_name(dev), drv_data);
  1288. if (status < 0) {
  1289. dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
  1290. goto err_no_irqres;
  1291. }
  1292. /* Setup DMA if requested */
  1293. drv_data->tx_channel = -1;
  1294. drv_data->rx_channel = -1;
  1295. if (platform_info->enable_dma) {
  1296. /* Get rx DMA channel */
  1297. drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
  1298. DMA_PRIO_HIGH);
  1299. if (drv_data->rx_channel < 0) {
  1300. dev_err(dev,
  1301. "probe - problem (%d) requesting rx channel\n",
  1302. drv_data->rx_channel);
  1303. goto err_no_rxdma;
  1304. } else
  1305. imx_dma_setup_handlers(drv_data->rx_channel, NULL,
  1306. dma_err_handler, drv_data);
  1307. /* Get tx DMA channel */
  1308. drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
  1309. DMA_PRIO_MEDIUM);
  1310. if (drv_data->tx_channel < 0) {
  1311. dev_err(dev,
  1312. "probe - problem (%d) requesting tx channel\n",
  1313. drv_data->tx_channel);
  1314. imx_dma_free(drv_data->rx_channel);
  1315. goto err_no_txdma;
  1316. } else
  1317. imx_dma_setup_handlers(drv_data->tx_channel,
  1318. dma_tx_handler, dma_err_handler,
  1319. drv_data);
  1320. /* Set request source and burst length for allocated channels */
  1321. switch (drv_data->pdev->id) {
  1322. case 1:
  1323. /* Using SPI1 */
  1324. RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
  1325. RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
  1326. break;
  1327. case 2:
  1328. /* Using SPI2 */
  1329. RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
  1330. RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
  1331. break;
  1332. default:
  1333. dev_err(dev, "probe - bad SPI Id\n");
  1334. imx_dma_free(drv_data->rx_channel);
  1335. imx_dma_free(drv_data->tx_channel);
  1336. status = -ENODEV;
  1337. goto err_no_devid;
  1338. }
  1339. BLR(drv_data->rx_channel) = SPI_DMA_BLR;
  1340. BLR(drv_data->tx_channel) = SPI_DMA_BLR;
  1341. }
  1342. /* Load default SPI configuration */
  1343. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1344. writel(0, drv_data->regs + SPI_RESET);
  1345. writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
  1346. /* Initial and start queue */
  1347. status = init_queue(drv_data);
  1348. if (status != 0) {
  1349. dev_err(&pdev->dev, "probe - problem initializing queue\n");
  1350. goto err_init_queue;
  1351. }
  1352. status = start_queue(drv_data);
  1353. if (status != 0) {
  1354. dev_err(&pdev->dev, "probe - problem starting queue\n");
  1355. goto err_start_queue;
  1356. }
  1357. /* Register with the SPI framework */
  1358. platform_set_drvdata(pdev, drv_data);
  1359. status = spi_register_master(master);
  1360. if (status != 0) {
  1361. dev_err(&pdev->dev, "probe - problem registering spi master\n");
  1362. goto err_spi_register;
  1363. }
  1364. dev_dbg(dev, "probe succeded\n");
  1365. return 0;
  1366. err_init_queue:
  1367. err_start_queue:
  1368. err_spi_register:
  1369. destroy_queue(drv_data);
  1370. err_no_rxdma:
  1371. err_no_txdma:
  1372. err_no_devid:
  1373. free_irq(irq, drv_data);
  1374. err_no_irqres:
  1375. iounmap(drv_data->regs);
  1376. err_no_iomap:
  1377. release_resource(drv_data->ioarea);
  1378. kfree(drv_data->ioarea);
  1379. err_no_iores:
  1380. clk_disable(drv_data->clk);
  1381. clk_put(drv_data->clk);
  1382. err_no_clk:
  1383. spi_master_put(master);
  1384. err_no_pdata:
  1385. err_no_mem:
  1386. return status;
  1387. }
  1388. static int __exit spi_imx_remove(struct platform_device *pdev)
  1389. {
  1390. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1391. int irq;
  1392. int status = 0;
  1393. if (!drv_data)
  1394. return 0;
  1395. tasklet_kill(&drv_data->pump_transfers);
  1396. /* Remove the queue */
  1397. status = destroy_queue(drv_data);
  1398. if (status != 0) {
  1399. dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
  1400. return status;
  1401. }
  1402. /* Reset SPI */
  1403. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1404. writel(0, drv_data->regs + SPI_RESET);
  1405. /* Release DMA */
  1406. if (drv_data->master_info->enable_dma) {
  1407. RSSR(drv_data->rx_channel) = 0;
  1408. RSSR(drv_data->tx_channel) = 0;
  1409. imx_dma_free(drv_data->tx_channel);
  1410. imx_dma_free(drv_data->rx_channel);
  1411. }
  1412. /* Release IRQ */
  1413. irq = platform_get_irq(pdev, 0);
  1414. if (irq >= 0)
  1415. free_irq(irq, drv_data);
  1416. clk_disable(drv_data->clk);
  1417. clk_put(drv_data->clk);
  1418. /* Release map resources */
  1419. iounmap(drv_data->regs);
  1420. release_resource(drv_data->ioarea);
  1421. kfree(drv_data->ioarea);
  1422. /* Disconnect from the SPI framework */
  1423. spi_unregister_master(drv_data->master);
  1424. spi_master_put(drv_data->master);
  1425. /* Prevent double remove */
  1426. platform_set_drvdata(pdev, NULL);
  1427. dev_dbg(&pdev->dev, "remove succeded\n");
  1428. return 0;
  1429. }
  1430. static void spi_imx_shutdown(struct platform_device *pdev)
  1431. {
  1432. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1433. /* Reset SPI */
  1434. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1435. writel(0, drv_data->regs + SPI_RESET);
  1436. dev_dbg(&pdev->dev, "shutdown succeded\n");
  1437. }
  1438. #ifdef CONFIG_PM
  1439. static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
  1440. {
  1441. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1442. int status = 0;
  1443. status = stop_queue(drv_data);
  1444. if (status != 0) {
  1445. dev_warn(&pdev->dev, "suspend cannot stop queue\n");
  1446. return status;
  1447. }
  1448. dev_dbg(&pdev->dev, "suspended\n");
  1449. return 0;
  1450. }
  1451. static int spi_imx_resume(struct platform_device *pdev)
  1452. {
  1453. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1454. int status = 0;
  1455. /* Start the queue running */
  1456. status = start_queue(drv_data);
  1457. if (status != 0)
  1458. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1459. else
  1460. dev_dbg(&pdev->dev, "resumed\n");
  1461. return status;
  1462. }
  1463. #else
  1464. #define spi_imx_suspend NULL
  1465. #define spi_imx_resume NULL
  1466. #endif /* CONFIG_PM */
  1467. /* work with hotplug and coldplug */
  1468. MODULE_ALIAS("platform:spi_imx");
  1469. static struct platform_driver driver = {
  1470. .driver = {
  1471. .name = "spi_imx",
  1472. .owner = THIS_MODULE,
  1473. },
  1474. .remove = __exit_p(spi_imx_remove),
  1475. .shutdown = spi_imx_shutdown,
  1476. .suspend = spi_imx_suspend,
  1477. .resume = spi_imx_resume,
  1478. };
  1479. static int __init spi_imx_init(void)
  1480. {
  1481. return platform_driver_probe(&driver, spi_imx_probe);
  1482. }
  1483. module_init(spi_imx_init);
  1484. static void __exit spi_imx_exit(void)
  1485. {
  1486. platform_driver_unregister(&driver);
  1487. }
  1488. module_exit(spi_imx_exit);
  1489. MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
  1490. MODULE_DESCRIPTION("iMX SPI Controller Driver");
  1491. MODULE_LICENSE("GPL");