spi_imx.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775
  1. /*
  2. * drivers/spi/spi_imx.c
  3. *
  4. * Copyright (C) 2006 SWAPP
  5. * Andrea Paterniani <a.paterniani@swapp-eng.it>
  6. *
  7. * Initial version inspired by:
  8. * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/device.h>
  23. #include <linux/ioport.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/spi/spi.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/delay.h>
  31. #include <linux/clk.h>
  32. #include <asm/io.h>
  33. #include <asm/irq.h>
  34. #include <asm/delay.h>
  35. #include <mach/hardware.h>
  36. #include <mach/imx-dma.h>
  37. #include <mach/spi_imx.h>
  38. /*-------------------------------------------------------------------------*/
  39. /* SPI Registers offsets from peripheral base address */
  40. #define SPI_RXDATA (0x00)
  41. #define SPI_TXDATA (0x04)
  42. #define SPI_CONTROL (0x08)
  43. #define SPI_INT_STATUS (0x0C)
  44. #define SPI_TEST (0x10)
  45. #define SPI_PERIOD (0x14)
  46. #define SPI_DMA (0x18)
  47. #define SPI_RESET (0x1C)
  48. /* SPI Control Register Bit Fields & Masks */
  49. #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
  50. #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
  51. #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
  52. #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
  53. #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
  54. #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
  55. #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
  56. #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
  57. #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
  58. #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
  59. Slave: RXFIFO advanced by BIT_COUNT */
  60. #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
  61. Slave: RXFIFO advanced by /SS rising edge */
  62. #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
  63. #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
  64. #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
  65. #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
  66. #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
  67. #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
  68. #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
  69. #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
  70. #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
  71. #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
  72. #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
  73. #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
  74. #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
  75. #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
  76. #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
  77. #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
  78. #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
  79. #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
  80. /* SPI Interrupt/Status Register Bit Fields & Masks */
  81. #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
  82. #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
  83. #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
  84. #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
  85. #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
  86. #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
  87. #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
  88. #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
  89. #define SPI_STATUS (0xFF) /* SPI Status Mask */
  90. #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
  91. #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
  92. #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
  93. #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
  94. #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
  95. #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
  96. #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
  97. #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
  98. #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
  99. /* SPI Test Register Bit Fields & Masks */
  100. #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
  101. #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
  102. #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
  103. #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
  104. #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
  105. /* SPI Period Register Bit Fields & Masks */
  106. #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
  107. #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
  108. Transactions */
  109. #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
  110. #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
  111. Bit Clock */
  112. #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
  113. 32.768 KHz Clock */
  114. /* SPI DMA Register Bit Fields & Masks */
  115. #define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
  116. #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
  117. #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
  118. #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
  119. #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
  120. #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
  121. #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
  122. #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
  123. /* SPI Soft Reset Register Bit Fields & Masks */
  124. #define SPI_RESET_START (0x1) /* Start */
  125. /* Default SPI configuration values */
  126. #define SPI_DEFAULT_CONTROL \
  127. ( \
  128. SPI_CONTROL_BITCOUNT(16) | \
  129. SPI_CONTROL_POL_ACT_HIGH | \
  130. SPI_CONTROL_PHA_0 | \
  131. SPI_CONTROL_SPIEN | \
  132. SPI_CONTROL_SSCTL_1 | \
  133. SPI_CONTROL_MODE_MASTER | \
  134. SPI_CONTROL_DRCTL_0 | \
  135. SPI_CONTROL_DATARATE_MIN \
  136. )
  137. #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
  138. #define SPI_DEFAULT_ENABLE_DMA (0)
  139. #define SPI_DEFAULT_PERIOD_WAIT (8)
  140. /*-------------------------------------------------------------------------*/
  141. /*-------------------------------------------------------------------------*/
  142. /* TX/RX SPI FIFO size */
  143. #define SPI_FIFO_DEPTH (8)
  144. #define SPI_FIFO_BYTE_WIDTH (2)
  145. #define SPI_FIFO_OVERFLOW_MARGIN (2)
  146. /* DMA burst length for half full/empty request trigger */
  147. #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
  148. /* Dummy char output to achieve reads.
  149. Choosing something different from all zeroes may help pattern recogition
  150. for oscilloscope analysis, but may break some drivers. */
  151. #define SPI_DUMMY_u8 0
  152. #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
  153. #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
  154. /**
  155. * Macro to change a u32 field:
  156. * @r : register to edit
  157. * @m : bit mask
  158. * @v : new value for the field correctly bit-alligned
  159. */
  160. #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
  161. /* Message state */
  162. #define START_STATE ((void*)0)
  163. #define RUNNING_STATE ((void*)1)
  164. #define DONE_STATE ((void*)2)
  165. #define ERROR_STATE ((void*)-1)
  166. /* Queue state */
  167. #define QUEUE_RUNNING (0)
  168. #define QUEUE_STOPPED (1)
  169. #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
  170. /*-------------------------------------------------------------------------*/
  171. /*-------------------------------------------------------------------------*/
  172. /* Driver data structs */
  173. /* Context */
  174. struct driver_data {
  175. /* Driver model hookup */
  176. struct platform_device *pdev;
  177. /* SPI framework hookup */
  178. struct spi_master *master;
  179. /* IMX hookup */
  180. struct spi_imx_master *master_info;
  181. /* Memory resources and SPI regs virtual address */
  182. struct resource *ioarea;
  183. void __iomem *regs;
  184. /* SPI RX_DATA physical address */
  185. dma_addr_t rd_data_phys;
  186. /* Driver message queue */
  187. struct workqueue_struct *workqueue;
  188. struct work_struct work;
  189. spinlock_t lock;
  190. struct list_head queue;
  191. int busy;
  192. int run;
  193. /* Message Transfer pump */
  194. struct tasklet_struct pump_transfers;
  195. /* Current message, transfer and state */
  196. struct spi_message *cur_msg;
  197. struct spi_transfer *cur_transfer;
  198. struct chip_data *cur_chip;
  199. /* Rd / Wr buffers pointers */
  200. size_t len;
  201. void *tx;
  202. void *tx_end;
  203. void *rx;
  204. void *rx_end;
  205. u8 rd_only;
  206. u8 n_bytes;
  207. int cs_change;
  208. /* Function pointers */
  209. irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
  210. void (*cs_control)(u32 command);
  211. /* DMA setup */
  212. int rx_channel;
  213. int tx_channel;
  214. dma_addr_t rx_dma;
  215. dma_addr_t tx_dma;
  216. int rx_dma_needs_unmap;
  217. int tx_dma_needs_unmap;
  218. size_t tx_map_len;
  219. u32 dummy_dma_buf ____cacheline_aligned;
  220. struct clk *clk;
  221. };
  222. /* Runtime state */
  223. struct chip_data {
  224. u32 control;
  225. u32 period;
  226. u32 test;
  227. u8 enable_dma:1;
  228. u8 bits_per_word;
  229. u8 n_bytes;
  230. u32 max_speed_hz;
  231. void (*cs_control)(u32 command);
  232. };
  233. /*-------------------------------------------------------------------------*/
  234. static void pump_messages(struct work_struct *work);
  235. static void flush(struct driver_data *drv_data)
  236. {
  237. void __iomem *regs = drv_data->regs;
  238. u32 control;
  239. dev_dbg(&drv_data->pdev->dev, "flush\n");
  240. /* Wait for end of transaction */
  241. do {
  242. control = readl(regs + SPI_CONTROL);
  243. } while (control & SPI_CONTROL_XCH);
  244. /* Release chip select if requested, transfer delays are
  245. handled in pump_transfers */
  246. if (drv_data->cs_change)
  247. drv_data->cs_control(SPI_CS_DEASSERT);
  248. /* Disable SPI to flush FIFOs */
  249. writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
  250. writel(control, regs + SPI_CONTROL);
  251. }
  252. static void restore_state(struct driver_data *drv_data)
  253. {
  254. void __iomem *regs = drv_data->regs;
  255. struct chip_data *chip = drv_data->cur_chip;
  256. /* Load chip registers */
  257. dev_dbg(&drv_data->pdev->dev,
  258. "restore_state\n"
  259. " test = 0x%08X\n"
  260. " control = 0x%08X\n",
  261. chip->test,
  262. chip->control);
  263. writel(chip->test, regs + SPI_TEST);
  264. writel(chip->period, regs + SPI_PERIOD);
  265. writel(0, regs + SPI_INT_STATUS);
  266. writel(chip->control, regs + SPI_CONTROL);
  267. }
  268. static void null_cs_control(u32 command)
  269. {
  270. }
  271. static inline u32 data_to_write(struct driver_data *drv_data)
  272. {
  273. return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
  274. }
  275. static inline u32 data_to_read(struct driver_data *drv_data)
  276. {
  277. return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
  278. }
  279. static int write(struct driver_data *drv_data)
  280. {
  281. void __iomem *regs = drv_data->regs;
  282. void *tx = drv_data->tx;
  283. void *tx_end = drv_data->tx_end;
  284. u8 n_bytes = drv_data->n_bytes;
  285. u32 remaining_writes;
  286. u32 fifo_avail_space;
  287. u32 n;
  288. u16 d;
  289. /* Compute how many fifo writes to do */
  290. remaining_writes = (u32)(tx_end - tx) / n_bytes;
  291. fifo_avail_space = SPI_FIFO_DEPTH -
  292. (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
  293. if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
  294. /* Fix misunderstood receive overflow */
  295. fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
  296. n = min(remaining_writes, fifo_avail_space);
  297. dev_dbg(&drv_data->pdev->dev,
  298. "write type %s\n"
  299. " remaining writes = %d\n"
  300. " fifo avail space = %d\n"
  301. " fifo writes = %d\n",
  302. (n_bytes == 1) ? "u8" : "u16",
  303. remaining_writes,
  304. fifo_avail_space,
  305. n);
  306. if (n > 0) {
  307. /* Fill SPI TXFIFO */
  308. if (drv_data->rd_only) {
  309. tx += n * n_bytes;
  310. while (n--)
  311. writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
  312. } else {
  313. if (n_bytes == 1) {
  314. while (n--) {
  315. d = *(u8*)tx;
  316. writel(d, regs + SPI_TXDATA);
  317. tx += 1;
  318. }
  319. } else {
  320. while (n--) {
  321. d = *(u16*)tx;
  322. writel(d, regs + SPI_TXDATA);
  323. tx += 2;
  324. }
  325. }
  326. }
  327. /* Trigger transfer */
  328. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  329. regs + SPI_CONTROL);
  330. /* Update tx pointer */
  331. drv_data->tx = tx;
  332. }
  333. return (tx >= tx_end);
  334. }
  335. static int read(struct driver_data *drv_data)
  336. {
  337. void __iomem *regs = drv_data->regs;
  338. void *rx = drv_data->rx;
  339. void *rx_end = drv_data->rx_end;
  340. u8 n_bytes = drv_data->n_bytes;
  341. u32 remaining_reads;
  342. u32 fifo_rxcnt;
  343. u32 n;
  344. u16 d;
  345. /* Compute how many fifo reads to do */
  346. remaining_reads = (u32)(rx_end - rx) / n_bytes;
  347. fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
  348. SPI_TEST_RXCNT_LSB;
  349. n = min(remaining_reads, fifo_rxcnt);
  350. dev_dbg(&drv_data->pdev->dev,
  351. "read type %s\n"
  352. " remaining reads = %d\n"
  353. " fifo rx count = %d\n"
  354. " fifo reads = %d\n",
  355. (n_bytes == 1) ? "u8" : "u16",
  356. remaining_reads,
  357. fifo_rxcnt,
  358. n);
  359. if (n > 0) {
  360. /* Read SPI RXFIFO */
  361. if (n_bytes == 1) {
  362. while (n--) {
  363. d = readl(regs + SPI_RXDATA);
  364. *((u8*)rx) = d;
  365. rx += 1;
  366. }
  367. } else {
  368. while (n--) {
  369. d = readl(regs + SPI_RXDATA);
  370. *((u16*)rx) = d;
  371. rx += 2;
  372. }
  373. }
  374. /* Update rx pointer */
  375. drv_data->rx = rx;
  376. }
  377. return (rx >= rx_end);
  378. }
  379. static void *next_transfer(struct driver_data *drv_data)
  380. {
  381. struct spi_message *msg = drv_data->cur_msg;
  382. struct spi_transfer *trans = drv_data->cur_transfer;
  383. /* Move to next transfer */
  384. if (trans->transfer_list.next != &msg->transfers) {
  385. drv_data->cur_transfer =
  386. list_entry(trans->transfer_list.next,
  387. struct spi_transfer,
  388. transfer_list);
  389. return RUNNING_STATE;
  390. }
  391. return DONE_STATE;
  392. }
  393. static int map_dma_buffers(struct driver_data *drv_data)
  394. {
  395. struct spi_message *msg;
  396. struct device *dev;
  397. void *buf;
  398. drv_data->rx_dma_needs_unmap = 0;
  399. drv_data->tx_dma_needs_unmap = 0;
  400. if (!drv_data->master_info->enable_dma ||
  401. !drv_data->cur_chip->enable_dma)
  402. return -1;
  403. msg = drv_data->cur_msg;
  404. dev = &msg->spi->dev;
  405. if (msg->is_dma_mapped) {
  406. if (drv_data->tx_dma)
  407. /* The caller provided at least dma and cpu virtual
  408. address for write; pump_transfers() will consider the
  409. transfer as write only if cpu rx virtual address is
  410. NULL */
  411. return 0;
  412. if (drv_data->rx_dma) {
  413. /* The caller provided dma and cpu virtual address to
  414. performe read only transfer -->
  415. use drv_data->dummy_dma_buf for dummy writes to
  416. achive reads */
  417. buf = &drv_data->dummy_dma_buf;
  418. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  419. drv_data->tx_dma = dma_map_single(dev,
  420. buf,
  421. drv_data->tx_map_len,
  422. DMA_TO_DEVICE);
  423. if (dma_mapping_error(dev, drv_data->tx_dma))
  424. return -1;
  425. drv_data->tx_dma_needs_unmap = 1;
  426. /* Flags transfer as rd_only for pump_transfers() DMA
  427. regs programming (should be redundant) */
  428. drv_data->tx = NULL;
  429. return 0;
  430. }
  431. }
  432. if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
  433. return -1;
  434. /* NULL rx means write-only transfer and no map needed
  435. since rx DMA will not be used */
  436. if (drv_data->rx) {
  437. buf = drv_data->rx;
  438. drv_data->rx_dma = dma_map_single(
  439. dev,
  440. buf,
  441. drv_data->len,
  442. DMA_FROM_DEVICE);
  443. if (dma_mapping_error(dev, drv_data->rx_dma))
  444. return -1;
  445. drv_data->rx_dma_needs_unmap = 1;
  446. }
  447. if (drv_data->tx == NULL) {
  448. /* Read only message --> use drv_data->dummy_dma_buf for dummy
  449. writes to achive reads */
  450. buf = &drv_data->dummy_dma_buf;
  451. drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
  452. } else {
  453. buf = drv_data->tx;
  454. drv_data->tx_map_len = drv_data->len;
  455. }
  456. drv_data->tx_dma = dma_map_single(dev,
  457. buf,
  458. drv_data->tx_map_len,
  459. DMA_TO_DEVICE);
  460. if (dma_mapping_error(dev, drv_data->tx_dma)) {
  461. if (drv_data->rx_dma) {
  462. dma_unmap_single(dev,
  463. drv_data->rx_dma,
  464. drv_data->len,
  465. DMA_FROM_DEVICE);
  466. drv_data->rx_dma_needs_unmap = 0;
  467. }
  468. return -1;
  469. }
  470. drv_data->tx_dma_needs_unmap = 1;
  471. return 0;
  472. }
  473. static void unmap_dma_buffers(struct driver_data *drv_data)
  474. {
  475. struct spi_message *msg = drv_data->cur_msg;
  476. struct device *dev = &msg->spi->dev;
  477. if (drv_data->rx_dma_needs_unmap) {
  478. dma_unmap_single(dev,
  479. drv_data->rx_dma,
  480. drv_data->len,
  481. DMA_FROM_DEVICE);
  482. drv_data->rx_dma_needs_unmap = 0;
  483. }
  484. if (drv_data->tx_dma_needs_unmap) {
  485. dma_unmap_single(dev,
  486. drv_data->tx_dma,
  487. drv_data->tx_map_len,
  488. DMA_TO_DEVICE);
  489. drv_data->tx_dma_needs_unmap = 0;
  490. }
  491. }
  492. /* Caller already set message->status (dma is already blocked) */
  493. static void giveback(struct spi_message *message, struct driver_data *drv_data)
  494. {
  495. void __iomem *regs = drv_data->regs;
  496. /* Bring SPI to sleep; restore_state() and pump_transfer()
  497. will do new setup */
  498. writel(0, regs + SPI_INT_STATUS);
  499. writel(0, regs + SPI_DMA);
  500. /* Unconditioned deselct */
  501. drv_data->cs_control(SPI_CS_DEASSERT);
  502. message->state = NULL;
  503. if (message->complete)
  504. message->complete(message->context);
  505. drv_data->cur_msg = NULL;
  506. drv_data->cur_transfer = NULL;
  507. drv_data->cur_chip = NULL;
  508. queue_work(drv_data->workqueue, &drv_data->work);
  509. }
  510. static void dma_err_handler(int channel, void *data, int errcode)
  511. {
  512. struct driver_data *drv_data = data;
  513. struct spi_message *msg = drv_data->cur_msg;
  514. dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
  515. /* Disable both rx and tx dma channels */
  516. imx_dma_disable(drv_data->rx_channel);
  517. imx_dma_disable(drv_data->tx_channel);
  518. unmap_dma_buffers(drv_data);
  519. flush(drv_data);
  520. msg->state = ERROR_STATE;
  521. tasklet_schedule(&drv_data->pump_transfers);
  522. }
  523. static void dma_tx_handler(int channel, void *data)
  524. {
  525. struct driver_data *drv_data = data;
  526. dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
  527. imx_dma_disable(channel);
  528. /* Now waits for TX FIFO empty */
  529. writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
  530. }
  531. static irqreturn_t dma_transfer(struct driver_data *drv_data)
  532. {
  533. u32 status;
  534. struct spi_message *msg = drv_data->cur_msg;
  535. void __iomem *regs = drv_data->regs;
  536. status = readl(regs + SPI_INT_STATUS);
  537. if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
  538. == (SPI_INTEN_RO | SPI_STATUS_RO)) {
  539. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  540. imx_dma_disable(drv_data->tx_channel);
  541. imx_dma_disable(drv_data->rx_channel);
  542. unmap_dma_buffers(drv_data);
  543. flush(drv_data);
  544. dev_warn(&drv_data->pdev->dev,
  545. "dma_transfer - fifo overun\n");
  546. msg->state = ERROR_STATE;
  547. tasklet_schedule(&drv_data->pump_transfers);
  548. return IRQ_HANDLED;
  549. }
  550. if (status & SPI_STATUS_TE) {
  551. writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
  552. if (drv_data->rx) {
  553. /* Wait end of transfer before read trailing data */
  554. while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
  555. cpu_relax();
  556. imx_dma_disable(drv_data->rx_channel);
  557. unmap_dma_buffers(drv_data);
  558. /* Release chip select if requested, transfer delays are
  559. handled in pump_transfers() */
  560. if (drv_data->cs_change)
  561. drv_data->cs_control(SPI_CS_DEASSERT);
  562. /* Calculate number of trailing data and read them */
  563. dev_dbg(&drv_data->pdev->dev,
  564. "dma_transfer - test = 0x%08X\n",
  565. readl(regs + SPI_TEST));
  566. drv_data->rx = drv_data->rx_end -
  567. ((readl(regs + SPI_TEST) &
  568. SPI_TEST_RXCNT) >>
  569. SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
  570. read(drv_data);
  571. } else {
  572. /* Write only transfer */
  573. unmap_dma_buffers(drv_data);
  574. flush(drv_data);
  575. }
  576. /* End of transfer, update total byte transfered */
  577. msg->actual_length += drv_data->len;
  578. /* Move to next transfer */
  579. msg->state = next_transfer(drv_data);
  580. /* Schedule transfer tasklet */
  581. tasklet_schedule(&drv_data->pump_transfers);
  582. return IRQ_HANDLED;
  583. }
  584. /* Opps problem detected */
  585. return IRQ_NONE;
  586. }
  587. static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
  588. {
  589. struct spi_message *msg = drv_data->cur_msg;
  590. void __iomem *regs = drv_data->regs;
  591. u32 status;
  592. irqreturn_t handled = IRQ_NONE;
  593. status = readl(regs + SPI_INT_STATUS);
  594. if (status & SPI_INTEN_TE) {
  595. /* TXFIFO Empty Interrupt on the last transfered word */
  596. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  597. dev_dbg(&drv_data->pdev->dev,
  598. "interrupt_wronly_transfer - end of tx\n");
  599. flush(drv_data);
  600. /* Update total byte transfered */
  601. msg->actual_length += drv_data->len;
  602. /* Move to next transfer */
  603. msg->state = next_transfer(drv_data);
  604. /* Schedule transfer tasklet */
  605. tasklet_schedule(&drv_data->pump_transfers);
  606. return IRQ_HANDLED;
  607. } else {
  608. while (status & SPI_STATUS_TH) {
  609. dev_dbg(&drv_data->pdev->dev,
  610. "interrupt_wronly_transfer - status = 0x%08X\n",
  611. status);
  612. /* Pump data */
  613. if (write(drv_data)) {
  614. /* End of TXFIFO writes,
  615. now wait until TXFIFO is empty */
  616. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  617. return IRQ_HANDLED;
  618. }
  619. status = readl(regs + SPI_INT_STATUS);
  620. /* We did something */
  621. handled = IRQ_HANDLED;
  622. }
  623. }
  624. return handled;
  625. }
  626. static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
  627. {
  628. struct spi_message *msg = drv_data->cur_msg;
  629. void __iomem *regs = drv_data->regs;
  630. u32 status, control;
  631. irqreturn_t handled = IRQ_NONE;
  632. unsigned long limit;
  633. status = readl(regs + SPI_INT_STATUS);
  634. if (status & SPI_INTEN_TE) {
  635. /* TXFIFO Empty Interrupt on the last transfered word */
  636. writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
  637. dev_dbg(&drv_data->pdev->dev,
  638. "interrupt_transfer - end of tx\n");
  639. if (msg->state == ERROR_STATE) {
  640. /* RXFIFO overrun was detected and message aborted */
  641. flush(drv_data);
  642. } else {
  643. /* Wait for end of transaction */
  644. do {
  645. control = readl(regs + SPI_CONTROL);
  646. } while (control & SPI_CONTROL_XCH);
  647. /* Release chip select if requested, transfer delays are
  648. handled in pump_transfers */
  649. if (drv_data->cs_change)
  650. drv_data->cs_control(SPI_CS_DEASSERT);
  651. /* Read trailing bytes */
  652. limit = loops_per_jiffy << 1;
  653. while ((read(drv_data) == 0) && limit--);
  654. if (limit == 0)
  655. dev_err(&drv_data->pdev->dev,
  656. "interrupt_transfer - "
  657. "trailing byte read failed\n");
  658. else
  659. dev_dbg(&drv_data->pdev->dev,
  660. "interrupt_transfer - end of rx\n");
  661. /* Update total byte transfered */
  662. msg->actual_length += drv_data->len;
  663. /* Move to next transfer */
  664. msg->state = next_transfer(drv_data);
  665. }
  666. /* Schedule transfer tasklet */
  667. tasklet_schedule(&drv_data->pump_transfers);
  668. return IRQ_HANDLED;
  669. } else {
  670. while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
  671. dev_dbg(&drv_data->pdev->dev,
  672. "interrupt_transfer - status = 0x%08X\n",
  673. status);
  674. if (status & SPI_STATUS_RO) {
  675. /* RXFIFO overrun, abort message end wait
  676. until TXFIFO is empty */
  677. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  678. dev_warn(&drv_data->pdev->dev,
  679. "interrupt_transfer - fifo overun\n"
  680. " data not yet written = %d\n"
  681. " data not yet read = %d\n",
  682. data_to_write(drv_data),
  683. data_to_read(drv_data));
  684. msg->state = ERROR_STATE;
  685. return IRQ_HANDLED;
  686. }
  687. /* Pump data */
  688. read(drv_data);
  689. if (write(drv_data)) {
  690. /* End of TXFIFO writes,
  691. now wait until TXFIFO is empty */
  692. writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
  693. return IRQ_HANDLED;
  694. }
  695. status = readl(regs + SPI_INT_STATUS);
  696. /* We did something */
  697. handled = IRQ_HANDLED;
  698. }
  699. }
  700. return handled;
  701. }
  702. static irqreturn_t spi_int(int irq, void *dev_id)
  703. {
  704. struct driver_data *drv_data = (struct driver_data *)dev_id;
  705. if (!drv_data->cur_msg) {
  706. dev_err(&drv_data->pdev->dev,
  707. "spi_int - bad message state\n");
  708. /* Never fail */
  709. return IRQ_HANDLED;
  710. }
  711. return drv_data->transfer_handler(drv_data);
  712. }
  713. static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
  714. {
  715. return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
  716. }
  717. static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
  718. {
  719. u32 div;
  720. u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
  721. for (div = SPI_PERCLK2_DIV_MIN;
  722. div <= SPI_PERCLK2_DIV_MAX;
  723. div++, quantized_hz >>= 1) {
  724. if (quantized_hz <= speed_hz)
  725. /* Max available speed LEQ required speed */
  726. return div << 13;
  727. }
  728. return SPI_CONTROL_DATARATE_BAD;
  729. }
  730. static void pump_transfers(unsigned long data)
  731. {
  732. struct driver_data *drv_data = (struct driver_data *)data;
  733. struct spi_message *message;
  734. struct spi_transfer *transfer, *previous;
  735. struct chip_data *chip;
  736. void __iomem *regs;
  737. u32 tmp, control;
  738. dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
  739. message = drv_data->cur_msg;
  740. /* Handle for abort */
  741. if (message->state == ERROR_STATE) {
  742. message->status = -EIO;
  743. giveback(message, drv_data);
  744. return;
  745. }
  746. /* Handle end of message */
  747. if (message->state == DONE_STATE) {
  748. message->status = 0;
  749. giveback(message, drv_data);
  750. return;
  751. }
  752. chip = drv_data->cur_chip;
  753. /* Delay if requested at end of transfer*/
  754. transfer = drv_data->cur_transfer;
  755. if (message->state == RUNNING_STATE) {
  756. previous = list_entry(transfer->transfer_list.prev,
  757. struct spi_transfer,
  758. transfer_list);
  759. if (previous->delay_usecs)
  760. udelay(previous->delay_usecs);
  761. } else {
  762. /* START_STATE */
  763. message->state = RUNNING_STATE;
  764. drv_data->cs_control = chip->cs_control;
  765. }
  766. transfer = drv_data->cur_transfer;
  767. drv_data->tx = (void *)transfer->tx_buf;
  768. drv_data->tx_end = drv_data->tx + transfer->len;
  769. drv_data->rx = transfer->rx_buf;
  770. drv_data->rx_end = drv_data->rx + transfer->len;
  771. drv_data->rx_dma = transfer->rx_dma;
  772. drv_data->tx_dma = transfer->tx_dma;
  773. drv_data->len = transfer->len;
  774. drv_data->cs_change = transfer->cs_change;
  775. drv_data->rd_only = (drv_data->tx == NULL);
  776. regs = drv_data->regs;
  777. control = readl(regs + SPI_CONTROL);
  778. /* Bits per word setup */
  779. tmp = transfer->bits_per_word;
  780. if (tmp == 0) {
  781. /* Use device setup */
  782. tmp = chip->bits_per_word;
  783. drv_data->n_bytes = chip->n_bytes;
  784. } else
  785. /* Use per-transfer setup */
  786. drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
  787. u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  788. /* Speed setup (surely valid because already checked) */
  789. tmp = transfer->speed_hz;
  790. if (tmp == 0)
  791. tmp = chip->max_speed_hz;
  792. tmp = spi_data_rate(drv_data, tmp);
  793. u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
  794. writel(control, regs + SPI_CONTROL);
  795. /* Assert device chip-select */
  796. drv_data->cs_control(SPI_CS_ASSERT);
  797. /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
  798. if bits_per_word is less or equal 8 PIO transfers are performed.
  799. Moreover DMA is convinient for transfer length bigger than FIFOs
  800. byte size. */
  801. if ((drv_data->n_bytes == 2) &&
  802. (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
  803. (map_dma_buffers(drv_data) == 0)) {
  804. dev_dbg(&drv_data->pdev->dev,
  805. "pump dma transfer\n"
  806. " tx = %p\n"
  807. " tx_dma = %08X\n"
  808. " rx = %p\n"
  809. " rx_dma = %08X\n"
  810. " len = %d\n",
  811. drv_data->tx,
  812. (unsigned int)drv_data->tx_dma,
  813. drv_data->rx,
  814. (unsigned int)drv_data->rx_dma,
  815. drv_data->len);
  816. /* Ensure we have the correct interrupt handler */
  817. drv_data->transfer_handler = dma_transfer;
  818. /* Trigger transfer */
  819. writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
  820. regs + SPI_CONTROL);
  821. /* Setup tx DMA */
  822. if (drv_data->tx)
  823. /* Linear source address */
  824. CCR(drv_data->tx_channel) =
  825. CCR_DMOD_FIFO |
  826. CCR_SMOD_LINEAR |
  827. CCR_SSIZ_32 | CCR_DSIZ_16 |
  828. CCR_REN;
  829. else
  830. /* Read only transfer -> fixed source address for
  831. dummy write to achive read */
  832. CCR(drv_data->tx_channel) =
  833. CCR_DMOD_FIFO |
  834. CCR_SMOD_FIFO |
  835. CCR_SSIZ_32 | CCR_DSIZ_16 |
  836. CCR_REN;
  837. imx_dma_setup_single(
  838. drv_data->tx_channel,
  839. drv_data->tx_dma,
  840. drv_data->len,
  841. drv_data->rd_data_phys + 4,
  842. DMA_MODE_WRITE);
  843. if (drv_data->rx) {
  844. /* Setup rx DMA for linear destination address */
  845. CCR(drv_data->rx_channel) =
  846. CCR_DMOD_LINEAR |
  847. CCR_SMOD_FIFO |
  848. CCR_DSIZ_32 | CCR_SSIZ_16 |
  849. CCR_REN;
  850. imx_dma_setup_single(
  851. drv_data->rx_channel,
  852. drv_data->rx_dma,
  853. drv_data->len,
  854. drv_data->rd_data_phys,
  855. DMA_MODE_READ);
  856. imx_dma_enable(drv_data->rx_channel);
  857. /* Enable SPI interrupt */
  858. writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
  859. /* Set SPI to request DMA service on both
  860. Rx and Tx half fifo watermark */
  861. writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
  862. } else
  863. /* Write only access -> set SPI to request DMA
  864. service on Tx half fifo watermark */
  865. writel(SPI_DMA_THDEN, regs + SPI_DMA);
  866. imx_dma_enable(drv_data->tx_channel);
  867. } else {
  868. dev_dbg(&drv_data->pdev->dev,
  869. "pump pio transfer\n"
  870. " tx = %p\n"
  871. " rx = %p\n"
  872. " len = %d\n",
  873. drv_data->tx,
  874. drv_data->rx,
  875. drv_data->len);
  876. /* Ensure we have the correct interrupt handler */
  877. if (drv_data->rx)
  878. drv_data->transfer_handler = interrupt_transfer;
  879. else
  880. drv_data->transfer_handler = interrupt_wronly_transfer;
  881. /* Enable SPI interrupt */
  882. if (drv_data->rx)
  883. writel(SPI_INTEN_TH | SPI_INTEN_RO,
  884. regs + SPI_INT_STATUS);
  885. else
  886. writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
  887. }
  888. }
  889. static void pump_messages(struct work_struct *work)
  890. {
  891. struct driver_data *drv_data =
  892. container_of(work, struct driver_data, work);
  893. unsigned long flags;
  894. /* Lock queue and check for queue work */
  895. spin_lock_irqsave(&drv_data->lock, flags);
  896. if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
  897. drv_data->busy = 0;
  898. spin_unlock_irqrestore(&drv_data->lock, flags);
  899. return;
  900. }
  901. /* Make sure we are not already running a message */
  902. if (drv_data->cur_msg) {
  903. spin_unlock_irqrestore(&drv_data->lock, flags);
  904. return;
  905. }
  906. /* Extract head of queue */
  907. drv_data->cur_msg = list_entry(drv_data->queue.next,
  908. struct spi_message, queue);
  909. list_del_init(&drv_data->cur_msg->queue);
  910. drv_data->busy = 1;
  911. spin_unlock_irqrestore(&drv_data->lock, flags);
  912. /* Initial message state */
  913. drv_data->cur_msg->state = START_STATE;
  914. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  915. struct spi_transfer,
  916. transfer_list);
  917. /* Setup the SPI using the per chip configuration */
  918. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  919. restore_state(drv_data);
  920. /* Mark as busy and launch transfers */
  921. tasklet_schedule(&drv_data->pump_transfers);
  922. }
  923. static int transfer(struct spi_device *spi, struct spi_message *msg)
  924. {
  925. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  926. u32 min_speed_hz, max_speed_hz, tmp;
  927. struct spi_transfer *trans;
  928. unsigned long flags;
  929. msg->actual_length = 0;
  930. /* Per transfer setup check */
  931. min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
  932. max_speed_hz = spi->max_speed_hz;
  933. list_for_each_entry(trans, &msg->transfers, transfer_list) {
  934. tmp = trans->bits_per_word;
  935. if (tmp > 16) {
  936. dev_err(&drv_data->pdev->dev,
  937. "message rejected : "
  938. "invalid transfer bits_per_word (%d bits)\n",
  939. tmp);
  940. goto msg_rejected;
  941. }
  942. tmp = trans->speed_hz;
  943. if (tmp) {
  944. if (tmp < min_speed_hz) {
  945. dev_err(&drv_data->pdev->dev,
  946. "message rejected : "
  947. "device min speed (%d Hz) exceeds "
  948. "required transfer speed (%d Hz)\n",
  949. min_speed_hz,
  950. tmp);
  951. goto msg_rejected;
  952. } else if (tmp > max_speed_hz) {
  953. dev_err(&drv_data->pdev->dev,
  954. "message rejected : "
  955. "transfer speed (%d Hz) exceeds "
  956. "device max speed (%d Hz)\n",
  957. tmp,
  958. max_speed_hz);
  959. goto msg_rejected;
  960. }
  961. }
  962. }
  963. /* Message accepted */
  964. msg->status = -EINPROGRESS;
  965. msg->state = START_STATE;
  966. spin_lock_irqsave(&drv_data->lock, flags);
  967. if (drv_data->run == QUEUE_STOPPED) {
  968. spin_unlock_irqrestore(&drv_data->lock, flags);
  969. return -ESHUTDOWN;
  970. }
  971. list_add_tail(&msg->queue, &drv_data->queue);
  972. if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
  973. queue_work(drv_data->workqueue, &drv_data->work);
  974. spin_unlock_irqrestore(&drv_data->lock, flags);
  975. return 0;
  976. msg_rejected:
  977. /* Message rejected and not queued */
  978. msg->status = -EINVAL;
  979. msg->state = ERROR_STATE;
  980. if (msg->complete)
  981. msg->complete(msg->context);
  982. return -EINVAL;
  983. }
  984. /* the spi->mode bits understood by this driver: */
  985. #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
  986. /* On first setup bad values must free chip_data memory since will cause
  987. spi_new_device to fail. Bad value setup from protocol driver are simply not
  988. applied and notified to the calling driver. */
  989. static int setup(struct spi_device *spi)
  990. {
  991. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  992. struct spi_imx_chip *chip_info;
  993. struct chip_data *chip;
  994. int first_setup = 0;
  995. u32 tmp;
  996. int status = 0;
  997. if (spi->mode & ~MODEBITS) {
  998. dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
  999. spi->mode & ~MODEBITS);
  1000. return -EINVAL;
  1001. }
  1002. /* Get controller data */
  1003. chip_info = spi->controller_data;
  1004. /* Get controller_state */
  1005. chip = spi_get_ctldata(spi);
  1006. if (chip == NULL) {
  1007. first_setup = 1;
  1008. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  1009. if (!chip) {
  1010. dev_err(&spi->dev,
  1011. "setup - cannot allocate controller state\n");
  1012. return -ENOMEM;
  1013. }
  1014. chip->control = SPI_DEFAULT_CONTROL;
  1015. if (chip_info == NULL) {
  1016. /* spi_board_info.controller_data not is supplied */
  1017. chip_info = kzalloc(sizeof(struct spi_imx_chip),
  1018. GFP_KERNEL);
  1019. if (!chip_info) {
  1020. dev_err(&spi->dev,
  1021. "setup - "
  1022. "cannot allocate controller data\n");
  1023. status = -ENOMEM;
  1024. goto err_first_setup;
  1025. }
  1026. /* Set controller data default value */
  1027. chip_info->enable_loopback =
  1028. SPI_DEFAULT_ENABLE_LOOPBACK;
  1029. chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
  1030. chip_info->ins_ss_pulse = 1;
  1031. chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
  1032. chip_info->cs_control = null_cs_control;
  1033. }
  1034. }
  1035. /* Now set controller state based on controller data */
  1036. if (first_setup) {
  1037. /* SPI loopback */
  1038. if (chip_info->enable_loopback)
  1039. chip->test = SPI_TEST_LBC;
  1040. else
  1041. chip->test = 0;
  1042. /* SPI dma driven */
  1043. chip->enable_dma = chip_info->enable_dma;
  1044. /* SPI /SS pulse between spi burst */
  1045. if (chip_info->ins_ss_pulse)
  1046. u32_EDIT(chip->control,
  1047. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
  1048. else
  1049. u32_EDIT(chip->control,
  1050. SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
  1051. /* SPI bclk waits between each bits_per_word spi burst */
  1052. if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
  1053. dev_err(&spi->dev,
  1054. "setup - "
  1055. "bclk_wait exceeds max allowed (%d)\n",
  1056. SPI_PERIOD_MAX_WAIT);
  1057. goto err_first_setup;
  1058. }
  1059. chip->period = SPI_PERIOD_CSRC_BCLK |
  1060. (chip_info->bclk_wait & SPI_PERIOD_WAIT);
  1061. }
  1062. /* SPI mode */
  1063. tmp = spi->mode;
  1064. if (tmp & SPI_CS_HIGH) {
  1065. u32_EDIT(chip->control,
  1066. SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
  1067. }
  1068. switch (tmp & SPI_MODE_3) {
  1069. case SPI_MODE_0:
  1070. tmp = 0;
  1071. break;
  1072. case SPI_MODE_1:
  1073. tmp = SPI_CONTROL_PHA_1;
  1074. break;
  1075. case SPI_MODE_2:
  1076. tmp = SPI_CONTROL_POL_ACT_LOW;
  1077. break;
  1078. default:
  1079. /* SPI_MODE_3 */
  1080. tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
  1081. break;
  1082. }
  1083. u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
  1084. /* SPI word width */
  1085. tmp = spi->bits_per_word;
  1086. if (tmp == 0) {
  1087. tmp = 8;
  1088. spi->bits_per_word = 8;
  1089. } else if (tmp > 16) {
  1090. status = -EINVAL;
  1091. dev_err(&spi->dev,
  1092. "setup - "
  1093. "invalid bits_per_word (%d)\n",
  1094. tmp);
  1095. if (first_setup)
  1096. goto err_first_setup;
  1097. else {
  1098. /* Undo setup using chip as backup copy */
  1099. tmp = chip->bits_per_word;
  1100. spi->bits_per_word = tmp;
  1101. }
  1102. }
  1103. chip->bits_per_word = tmp;
  1104. u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
  1105. chip->n_bytes = (tmp <= 8) ? 1 : 2;
  1106. /* SPI datarate */
  1107. tmp = spi_data_rate(drv_data, spi->max_speed_hz);
  1108. if (tmp == SPI_CONTROL_DATARATE_BAD) {
  1109. status = -EINVAL;
  1110. dev_err(&spi->dev,
  1111. "setup - "
  1112. "HW min speed (%d Hz) exceeds required "
  1113. "max speed (%d Hz)\n",
  1114. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1115. spi->max_speed_hz);
  1116. if (first_setup)
  1117. goto err_first_setup;
  1118. else
  1119. /* Undo setup using chip as backup copy */
  1120. spi->max_speed_hz = chip->max_speed_hz;
  1121. } else {
  1122. u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
  1123. /* Actual rounded max_speed_hz */
  1124. tmp = spi_speed_hz(drv_data, tmp);
  1125. spi->max_speed_hz = tmp;
  1126. chip->max_speed_hz = tmp;
  1127. }
  1128. /* SPI chip-select management */
  1129. if (chip_info->cs_control)
  1130. chip->cs_control = chip_info->cs_control;
  1131. else
  1132. chip->cs_control = null_cs_control;
  1133. /* Save controller_state */
  1134. spi_set_ctldata(spi, chip);
  1135. /* Summary */
  1136. dev_dbg(&spi->dev,
  1137. "setup succeded\n"
  1138. " loopback enable = %s\n"
  1139. " dma enable = %s\n"
  1140. " insert /ss pulse = %s\n"
  1141. " period wait = %d\n"
  1142. " mode = %d\n"
  1143. " bits per word = %d\n"
  1144. " min speed = %d Hz\n"
  1145. " rounded max speed = %d Hz\n",
  1146. chip->test & SPI_TEST_LBC ? "Yes" : "No",
  1147. chip->enable_dma ? "Yes" : "No",
  1148. chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
  1149. chip->period & SPI_PERIOD_WAIT,
  1150. spi->mode,
  1151. spi->bits_per_word,
  1152. spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
  1153. spi->max_speed_hz);
  1154. return status;
  1155. err_first_setup:
  1156. kfree(chip);
  1157. return status;
  1158. }
  1159. static void cleanup(struct spi_device *spi)
  1160. {
  1161. kfree(spi_get_ctldata(spi));
  1162. }
  1163. static int __init init_queue(struct driver_data *drv_data)
  1164. {
  1165. INIT_LIST_HEAD(&drv_data->queue);
  1166. spin_lock_init(&drv_data->lock);
  1167. drv_data->run = QUEUE_STOPPED;
  1168. drv_data->busy = 0;
  1169. tasklet_init(&drv_data->pump_transfers,
  1170. pump_transfers, (unsigned long)drv_data);
  1171. INIT_WORK(&drv_data->work, pump_messages);
  1172. drv_data->workqueue = create_singlethread_workqueue(
  1173. drv_data->master->dev.parent->bus_id);
  1174. if (drv_data->workqueue == NULL)
  1175. return -EBUSY;
  1176. return 0;
  1177. }
  1178. static int start_queue(struct driver_data *drv_data)
  1179. {
  1180. unsigned long flags;
  1181. spin_lock_irqsave(&drv_data->lock, flags);
  1182. if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
  1183. spin_unlock_irqrestore(&drv_data->lock, flags);
  1184. return -EBUSY;
  1185. }
  1186. drv_data->run = QUEUE_RUNNING;
  1187. drv_data->cur_msg = NULL;
  1188. drv_data->cur_transfer = NULL;
  1189. drv_data->cur_chip = NULL;
  1190. spin_unlock_irqrestore(&drv_data->lock, flags);
  1191. queue_work(drv_data->workqueue, &drv_data->work);
  1192. return 0;
  1193. }
  1194. static int stop_queue(struct driver_data *drv_data)
  1195. {
  1196. unsigned long flags;
  1197. unsigned limit = 500;
  1198. int status = 0;
  1199. spin_lock_irqsave(&drv_data->lock, flags);
  1200. /* This is a bit lame, but is optimized for the common execution path.
  1201. * A wait_queue on the drv_data->busy could be used, but then the common
  1202. * execution path (pump_messages) would be required to call wake_up or
  1203. * friends on every SPI message. Do this instead */
  1204. drv_data->run = QUEUE_STOPPED;
  1205. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  1206. spin_unlock_irqrestore(&drv_data->lock, flags);
  1207. msleep(10);
  1208. spin_lock_irqsave(&drv_data->lock, flags);
  1209. }
  1210. if (!list_empty(&drv_data->queue) || drv_data->busy)
  1211. status = -EBUSY;
  1212. spin_unlock_irqrestore(&drv_data->lock, flags);
  1213. return status;
  1214. }
  1215. static int destroy_queue(struct driver_data *drv_data)
  1216. {
  1217. int status;
  1218. status = stop_queue(drv_data);
  1219. if (status != 0)
  1220. return status;
  1221. if (drv_data->workqueue)
  1222. destroy_workqueue(drv_data->workqueue);
  1223. return 0;
  1224. }
  1225. static int __init spi_imx_probe(struct platform_device *pdev)
  1226. {
  1227. struct device *dev = &pdev->dev;
  1228. struct spi_imx_master *platform_info;
  1229. struct spi_master *master;
  1230. struct driver_data *drv_data = NULL;
  1231. struct resource *res;
  1232. int irq, status = 0;
  1233. platform_info = dev->platform_data;
  1234. if (platform_info == NULL) {
  1235. dev_err(&pdev->dev, "probe - no platform data supplied\n");
  1236. status = -ENODEV;
  1237. goto err_no_pdata;
  1238. }
  1239. drv_data->clk = clk_get(&pdev->dev, "perclk2");
  1240. if (IS_ERR(drv_data->clk)) {
  1241. dev_err(&pdev->dev, "probe - cannot get get\n");
  1242. status = PTR_ERR(drv_data->clk);
  1243. goto err_no_clk;
  1244. }
  1245. clk_enable(drv_data->clk);
  1246. /* Allocate master with space for drv_data */
  1247. master = spi_alloc_master(dev, sizeof(struct driver_data));
  1248. if (!master) {
  1249. dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
  1250. status = -ENOMEM;
  1251. goto err_no_mem;
  1252. }
  1253. drv_data = spi_master_get_devdata(master);
  1254. drv_data->master = master;
  1255. drv_data->master_info = platform_info;
  1256. drv_data->pdev = pdev;
  1257. master->bus_num = pdev->id;
  1258. master->num_chipselect = platform_info->num_chipselect;
  1259. master->cleanup = cleanup;
  1260. master->setup = setup;
  1261. master->transfer = transfer;
  1262. drv_data->dummy_dma_buf = SPI_DUMMY_u32;
  1263. /* Find and map resources */
  1264. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1265. if (!res) {
  1266. dev_err(&pdev->dev, "probe - MEM resources not defined\n");
  1267. status = -ENODEV;
  1268. goto err_no_iores;
  1269. }
  1270. drv_data->ioarea = request_mem_region(res->start,
  1271. res->end - res->start + 1,
  1272. pdev->name);
  1273. if (drv_data->ioarea == NULL) {
  1274. dev_err(&pdev->dev, "probe - cannot reserve region\n");
  1275. status = -ENXIO;
  1276. goto err_no_iores;
  1277. }
  1278. drv_data->regs = ioremap(res->start, res->end - res->start + 1);
  1279. if (drv_data->regs == NULL) {
  1280. dev_err(&pdev->dev, "probe - cannot map IO\n");
  1281. status = -ENXIO;
  1282. goto err_no_iomap;
  1283. }
  1284. drv_data->rd_data_phys = (dma_addr_t)res->start;
  1285. /* Attach to IRQ */
  1286. irq = platform_get_irq(pdev, 0);
  1287. if (irq < 0) {
  1288. dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
  1289. status = -ENODEV;
  1290. goto err_no_irqres;
  1291. }
  1292. status = request_irq(irq, spi_int, IRQF_DISABLED, dev->bus_id, drv_data);
  1293. if (status < 0) {
  1294. dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
  1295. goto err_no_irqres;
  1296. }
  1297. /* Setup DMA if requested */
  1298. drv_data->tx_channel = -1;
  1299. drv_data->rx_channel = -1;
  1300. if (platform_info->enable_dma) {
  1301. /* Get rx DMA channel */
  1302. drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
  1303. DMA_PRIO_HIGH);
  1304. if (drv_data->rx_channel < 0) {
  1305. dev_err(dev,
  1306. "probe - problem (%d) requesting rx channel\n",
  1307. drv_data->rx_channel);
  1308. goto err_no_rxdma;
  1309. } else
  1310. imx_dma_setup_handlers(drv_data->rx_channel, NULL,
  1311. dma_err_handler, drv_data);
  1312. /* Get tx DMA channel */
  1313. drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
  1314. DMA_PRIO_MEDIUM);
  1315. if (drv_data->tx_channel < 0) {
  1316. dev_err(dev,
  1317. "probe - problem (%d) requesting tx channel\n",
  1318. drv_data->tx_channel);
  1319. imx_dma_free(drv_data->rx_channel);
  1320. goto err_no_txdma;
  1321. } else
  1322. imx_dma_setup_handlers(drv_data->tx_channel,
  1323. dma_tx_handler, dma_err_handler,
  1324. drv_data);
  1325. /* Set request source and burst length for allocated channels */
  1326. switch (drv_data->pdev->id) {
  1327. case 1:
  1328. /* Using SPI1 */
  1329. RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
  1330. RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
  1331. break;
  1332. case 2:
  1333. /* Using SPI2 */
  1334. RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
  1335. RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
  1336. break;
  1337. default:
  1338. dev_err(dev, "probe - bad SPI Id\n");
  1339. imx_dma_free(drv_data->rx_channel);
  1340. imx_dma_free(drv_data->tx_channel);
  1341. status = -ENODEV;
  1342. goto err_no_devid;
  1343. }
  1344. BLR(drv_data->rx_channel) = SPI_DMA_BLR;
  1345. BLR(drv_data->tx_channel) = SPI_DMA_BLR;
  1346. }
  1347. /* Load default SPI configuration */
  1348. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1349. writel(0, drv_data->regs + SPI_RESET);
  1350. writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
  1351. /* Initial and start queue */
  1352. status = init_queue(drv_data);
  1353. if (status != 0) {
  1354. dev_err(&pdev->dev, "probe - problem initializing queue\n");
  1355. goto err_init_queue;
  1356. }
  1357. status = start_queue(drv_data);
  1358. if (status != 0) {
  1359. dev_err(&pdev->dev, "probe - problem starting queue\n");
  1360. goto err_start_queue;
  1361. }
  1362. /* Register with the SPI framework */
  1363. platform_set_drvdata(pdev, drv_data);
  1364. status = spi_register_master(master);
  1365. if (status != 0) {
  1366. dev_err(&pdev->dev, "probe - problem registering spi master\n");
  1367. goto err_spi_register;
  1368. }
  1369. dev_dbg(dev, "probe succeded\n");
  1370. return 0;
  1371. err_init_queue:
  1372. err_start_queue:
  1373. err_spi_register:
  1374. destroy_queue(drv_data);
  1375. err_no_rxdma:
  1376. err_no_txdma:
  1377. err_no_devid:
  1378. free_irq(irq, drv_data);
  1379. err_no_irqres:
  1380. iounmap(drv_data->regs);
  1381. err_no_iomap:
  1382. release_resource(drv_data->ioarea);
  1383. kfree(drv_data->ioarea);
  1384. err_no_iores:
  1385. spi_master_put(master);
  1386. err_no_pdata:
  1387. clk_disable(drv_data->clk);
  1388. clk_put(drv_data->clk);
  1389. err_no_clk:
  1390. err_no_mem:
  1391. return status;
  1392. }
  1393. static int __exit spi_imx_remove(struct platform_device *pdev)
  1394. {
  1395. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1396. int irq;
  1397. int status = 0;
  1398. if (!drv_data)
  1399. return 0;
  1400. tasklet_kill(&drv_data->pump_transfers);
  1401. /* Remove the queue */
  1402. status = destroy_queue(drv_data);
  1403. if (status != 0) {
  1404. dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
  1405. return status;
  1406. }
  1407. /* Reset SPI */
  1408. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1409. writel(0, drv_data->regs + SPI_RESET);
  1410. /* Release DMA */
  1411. if (drv_data->master_info->enable_dma) {
  1412. RSSR(drv_data->rx_channel) = 0;
  1413. RSSR(drv_data->tx_channel) = 0;
  1414. imx_dma_free(drv_data->tx_channel);
  1415. imx_dma_free(drv_data->rx_channel);
  1416. }
  1417. /* Release IRQ */
  1418. irq = platform_get_irq(pdev, 0);
  1419. if (irq >= 0)
  1420. free_irq(irq, drv_data);
  1421. clk_disable(drv_data->clk);
  1422. clk_put(drv_data->clk);
  1423. /* Release map resources */
  1424. iounmap(drv_data->regs);
  1425. release_resource(drv_data->ioarea);
  1426. kfree(drv_data->ioarea);
  1427. /* Disconnect from the SPI framework */
  1428. spi_unregister_master(drv_data->master);
  1429. spi_master_put(drv_data->master);
  1430. /* Prevent double remove */
  1431. platform_set_drvdata(pdev, NULL);
  1432. dev_dbg(&pdev->dev, "remove succeded\n");
  1433. return 0;
  1434. }
  1435. static void spi_imx_shutdown(struct platform_device *pdev)
  1436. {
  1437. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1438. /* Reset SPI */
  1439. writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
  1440. writel(0, drv_data->regs + SPI_RESET);
  1441. dev_dbg(&pdev->dev, "shutdown succeded\n");
  1442. }
  1443. #ifdef CONFIG_PM
  1444. static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
  1445. {
  1446. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1447. int status = 0;
  1448. status = stop_queue(drv_data);
  1449. if (status != 0) {
  1450. dev_warn(&pdev->dev, "suspend cannot stop queue\n");
  1451. return status;
  1452. }
  1453. dev_dbg(&pdev->dev, "suspended\n");
  1454. return 0;
  1455. }
  1456. static int spi_imx_resume(struct platform_device *pdev)
  1457. {
  1458. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1459. int status = 0;
  1460. /* Start the queue running */
  1461. status = start_queue(drv_data);
  1462. if (status != 0)
  1463. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1464. else
  1465. dev_dbg(&pdev->dev, "resumed\n");
  1466. return status;
  1467. }
  1468. #else
  1469. #define spi_imx_suspend NULL
  1470. #define spi_imx_resume NULL
  1471. #endif /* CONFIG_PM */
  1472. /* work with hotplug and coldplug */
  1473. MODULE_ALIAS("platform:spi_imx");
  1474. static struct platform_driver driver = {
  1475. .driver = {
  1476. .name = "spi_imx",
  1477. .owner = THIS_MODULE,
  1478. },
  1479. .remove = __exit_p(spi_imx_remove),
  1480. .shutdown = spi_imx_shutdown,
  1481. .suspend = spi_imx_suspend,
  1482. .resume = spi_imx_resume,
  1483. };
  1484. static int __init spi_imx_init(void)
  1485. {
  1486. return platform_driver_probe(&driver, spi_imx_probe);
  1487. }
  1488. module_init(spi_imx_init);
  1489. static void __exit spi_imx_exit(void)
  1490. {
  1491. platform_driver_unregister(&driver);
  1492. }
  1493. module_exit(spi_imx_exit);
  1494. MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
  1495. MODULE_DESCRIPTION("iMX SPI Controller Driver");
  1496. MODULE_LICENSE("GPL");