davinci_spi.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * Copyright (C) 2009 Texas Instruments.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/gpio.h>
  21. #include <linux/module.h>
  22. #include <linux/delay.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/err.h>
  25. #include <linux/clk.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/spi/spi.h>
  28. #include <linux/spi/spi_bitbang.h>
  29. #include <linux/slab.h>
  30. #include <mach/spi.h>
  31. #include <mach/edma.h>
  32. #define SPI_NO_RESOURCE ((resource_size_t)-1)
  33. #define SPI_MAX_CHIPSELECT 2
  34. #define CS_DEFAULT 0xFF
  35. #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
  36. #define DAVINCI_DMA_DATA_TYPE_S8 0x01
  37. #define DAVINCI_DMA_DATA_TYPE_S16 0x02
  38. #define DAVINCI_DMA_DATA_TYPE_S32 0x04
  39. #define SPIFMT_PHASE_MASK BIT(16)
  40. #define SPIFMT_POLARITY_MASK BIT(17)
  41. #define SPIFMT_DISTIMER_MASK BIT(18)
  42. #define SPIFMT_SHIFTDIR_MASK BIT(20)
  43. #define SPIFMT_WAITENA_MASK BIT(21)
  44. #define SPIFMT_PARITYENA_MASK BIT(22)
  45. #define SPIFMT_ODD_PARITY_MASK BIT(23)
  46. #define SPIFMT_WDELAY_MASK 0x3f000000u
  47. #define SPIFMT_WDELAY_SHIFT 24
  48. #define SPIFMT_PRESCALE_SHIFT 8
  49. /* SPIPC0 */
  50. #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
  51. #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
  52. #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
  53. #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
  54. #define SPIINT_MASKALL 0x0101035F
  55. #define SPI_INTLVL_1 0x000001FFu
  56. #define SPI_INTLVL_0 0x00000000u
  57. /* SPIDAT1 (upper 16 bit defines) */
  58. #define SPIDAT1_CSHOLD_MASK BIT(12)
  59. /* SPIGCR1 */
  60. #define SPIGCR1_CLKMOD_MASK BIT(1)
  61. #define SPIGCR1_MASTER_MASK BIT(0)
  62. #define SPIGCR1_LOOPBACK_MASK BIT(16)
  63. #define SPIGCR1_SPIENA_MASK BIT(24)
  64. /* SPIBUF */
  65. #define SPIBUF_TXFULL_MASK BIT(29)
  66. #define SPIBUF_RXEMPTY_MASK BIT(31)
  67. /* SPIDELAY */
  68. #define SPIDELAY_C2TDELAY_SHIFT 24
  69. #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
  70. #define SPIDELAY_T2CDELAY_SHIFT 16
  71. #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
  72. #define SPIDELAY_T2EDELAY_SHIFT 8
  73. #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
  74. #define SPIDELAY_C2EDELAY_SHIFT 0
  75. #define SPIDELAY_C2EDELAY_MASK 0xFF
  76. /* Error Masks */
  77. #define SPIFLG_DLEN_ERR_MASK BIT(0)
  78. #define SPIFLG_TIMEOUT_MASK BIT(1)
  79. #define SPIFLG_PARERR_MASK BIT(2)
  80. #define SPIFLG_DESYNC_MASK BIT(3)
  81. #define SPIFLG_BITERR_MASK BIT(4)
  82. #define SPIFLG_OVRRUN_MASK BIT(6)
  83. #define SPIFLG_RX_INTR_MASK BIT(8)
  84. #define SPIFLG_TX_INTR_MASK BIT(9)
  85. #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
  86. #define SPIINT_BITERR_INTR BIT(4)
  87. #define SPIINT_OVRRUN_INTR BIT(6)
  88. #define SPIINT_RX_INTR BIT(8)
  89. #define SPIINT_TX_INTR BIT(9)
  90. #define SPIINT_DMA_REQ_EN BIT(16)
  91. /* SPI Controller registers */
  92. #define SPIGCR0 0x00
  93. #define SPIGCR1 0x04
  94. #define SPIINT 0x08
  95. #define SPILVL 0x0c
  96. #define SPIFLG 0x10
  97. #define SPIPC0 0x14
  98. #define SPIDAT1 0x3c
  99. #define SPIBUF 0x40
  100. #define SPIDELAY 0x48
  101. #define SPIDEF 0x4c
  102. #define SPIFMT0 0x50
  103. struct davinci_spi_slave {
  104. u32 cmd_to_write;
  105. u32 clk_ctrl_to_write;
  106. u32 bytes_per_word;
  107. u8 active_cs;
  108. };
  109. /* We have 2 DMA channels per CS, one for RX and one for TX */
  110. struct davinci_spi_dma {
  111. int dma_tx_channel;
  112. int dma_rx_channel;
  113. int dma_tx_sync_dev;
  114. int dma_rx_sync_dev;
  115. enum dma_event_q eventq;
  116. struct completion dma_tx_completion;
  117. struct completion dma_rx_completion;
  118. };
  119. /* SPI Controller driver's private data. */
  120. struct davinci_spi {
  121. struct spi_bitbang bitbang;
  122. struct clk *clk;
  123. u8 version;
  124. resource_size_t pbase;
  125. void __iomem *base;
  126. size_t region_size;
  127. u32 irq;
  128. struct completion done;
  129. const void *tx;
  130. void *rx;
  131. u8 *tmp_buf;
  132. int count;
  133. struct davinci_spi_dma *dma_channels;
  134. struct davinci_spi_platform_data *pdata;
  135. void (*get_rx)(u32 rx_data, struct davinci_spi *);
  136. u32 (*get_tx)(struct davinci_spi *);
  137. struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
  138. };
  139. static struct davinci_spi_config davinci_spi_default_cfg;
  140. static unsigned use_dma;
  141. static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
  142. {
  143. u8 *rx = davinci_spi->rx;
  144. *rx++ = (u8)data;
  145. davinci_spi->rx = rx;
  146. }
  147. static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
  148. {
  149. u16 *rx = davinci_spi->rx;
  150. *rx++ = (u16)data;
  151. davinci_spi->rx = rx;
  152. }
  153. static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
  154. {
  155. u32 data;
  156. const u8 *tx = davinci_spi->tx;
  157. data = *tx++;
  158. davinci_spi->tx = tx;
  159. return data;
  160. }
  161. static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
  162. {
  163. u32 data;
  164. const u16 *tx = davinci_spi->tx;
  165. data = *tx++;
  166. davinci_spi->tx = tx;
  167. return data;
  168. }
  169. static inline void set_io_bits(void __iomem *addr, u32 bits)
  170. {
  171. u32 v = ioread32(addr);
  172. v |= bits;
  173. iowrite32(v, addr);
  174. }
  175. static inline void clear_io_bits(void __iomem *addr, u32 bits)
  176. {
  177. u32 v = ioread32(addr);
  178. v &= ~bits;
  179. iowrite32(v, addr);
  180. }
  181. static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
  182. {
  183. struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
  184. if (enable)
  185. set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
  186. else
  187. clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
  188. }
  189. /*
  190. * Interface to control the chip select signal
  191. */
  192. static void davinci_spi_chipselect(struct spi_device *spi, int value)
  193. {
  194. struct davinci_spi *davinci_spi;
  195. struct davinci_spi_platform_data *pdata;
  196. u8 chip_sel = spi->chip_select;
  197. u16 spidat1_cfg = CS_DEFAULT;
  198. bool gpio_chipsel = false;
  199. davinci_spi = spi_master_get_devdata(spi->master);
  200. pdata = davinci_spi->pdata;
  201. if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
  202. pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
  203. gpio_chipsel = true;
  204. /*
  205. * Board specific chip select logic decides the polarity and cs
  206. * line for the controller
  207. */
  208. if (gpio_chipsel) {
  209. if (value == BITBANG_CS_ACTIVE)
  210. gpio_set_value(pdata->chip_sel[chip_sel], 0);
  211. else
  212. gpio_set_value(pdata->chip_sel[chip_sel], 1);
  213. } else {
  214. if (value == BITBANG_CS_ACTIVE) {
  215. spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
  216. spidat1_cfg &= ~(0x1 << chip_sel);
  217. }
  218. iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
  219. }
  220. }
  221. /**
  222. * davinci_spi_get_prescale - Calculates the correct prescale value
  223. * @maxspeed_hz: the maximum rate the SPI clock can run at
  224. *
  225. * This function calculates the prescale value that generates a clock rate
  226. * less than or equal to the specified maximum.
  227. *
  228. * Returns: calculated prescale - 1 for easy programming into SPI registers
  229. * or negative error number if valid prescalar cannot be updated.
  230. */
  231. static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
  232. u32 max_speed_hz)
  233. {
  234. int ret;
  235. ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
  236. if (ret < 3 || ret > 256)
  237. return -EINVAL;
  238. return ret - 1;
  239. }
  240. /**
  241. * davinci_spi_setup_transfer - This functions will determine transfer method
  242. * @spi: spi device on which data transfer to be done
  243. * @t: spi transfer in which transfer info is filled
  244. *
  245. * This function determines data transfer method (8/16/32 bit transfer).
  246. * It will also set the SPI Clock Control register according to
  247. * SPI slave device freq.
  248. */
  249. static int davinci_spi_setup_transfer(struct spi_device *spi,
  250. struct spi_transfer *t)
  251. {
  252. struct davinci_spi *davinci_spi;
  253. struct davinci_spi_config *spicfg;
  254. u8 bits_per_word = 0;
  255. u32 hz = 0, spifmt = 0, prescale = 0;
  256. davinci_spi = spi_master_get_devdata(spi->master);
  257. spicfg = (struct davinci_spi_config *)spi->controller_data;
  258. if (!spicfg)
  259. spicfg = &davinci_spi_default_cfg;
  260. if (t) {
  261. bits_per_word = t->bits_per_word;
  262. hz = t->speed_hz;
  263. }
  264. /* if bits_per_word is not set then set it default */
  265. if (!bits_per_word)
  266. bits_per_word = spi->bits_per_word;
  267. /*
  268. * Assign function pointer to appropriate transfer method
  269. * 8bit, 16bit or 32bit transfer
  270. */
  271. if (bits_per_word <= 8 && bits_per_word >= 2) {
  272. davinci_spi->get_rx = davinci_spi_rx_buf_u8;
  273. davinci_spi->get_tx = davinci_spi_tx_buf_u8;
  274. davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
  275. } else if (bits_per_word <= 16 && bits_per_word >= 2) {
  276. davinci_spi->get_rx = davinci_spi_rx_buf_u16;
  277. davinci_spi->get_tx = davinci_spi_tx_buf_u16;
  278. davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
  279. } else
  280. return -EINVAL;
  281. if (!hz)
  282. hz = spi->max_speed_hz;
  283. /* Set up SPIFMTn register, unique to this chipselect. */
  284. prescale = davinci_spi_get_prescale(davinci_spi, hz);
  285. if (prescale < 0)
  286. return prescale;
  287. spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
  288. if (spi->mode & SPI_LSB_FIRST)
  289. spifmt |= SPIFMT_SHIFTDIR_MASK;
  290. if (spi->mode & SPI_CPOL)
  291. spifmt |= SPIFMT_POLARITY_MASK;
  292. if (!(spi->mode & SPI_CPHA))
  293. spifmt |= SPIFMT_PHASE_MASK;
  294. /*
  295. * Version 1 hardware supports two basic SPI modes:
  296. * - Standard SPI mode uses 4 pins, with chipselect
  297. * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
  298. * (distinct from SPI_3WIRE, with just one data wire;
  299. * or similar variants without MOSI or without MISO)
  300. *
  301. * Version 2 hardware supports an optional handshaking signal,
  302. * so it can support two more modes:
  303. * - 5 pin SPI variant is standard SPI plus SPI_READY
  304. * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
  305. */
  306. if (davinci_spi->version == SPI_VERSION_2) {
  307. u32 delay = 0;
  308. spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
  309. & SPIFMT_WDELAY_MASK);
  310. if (spicfg->odd_parity)
  311. spifmt |= SPIFMT_ODD_PARITY_MASK;
  312. if (spicfg->parity_enable)
  313. spifmt |= SPIFMT_PARITYENA_MASK;
  314. if (spicfg->timer_disable) {
  315. spifmt |= SPIFMT_DISTIMER_MASK;
  316. } else {
  317. delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
  318. & SPIDELAY_C2TDELAY_MASK;
  319. delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
  320. & SPIDELAY_T2CDELAY_MASK;
  321. }
  322. if (spi->mode & SPI_READY) {
  323. spifmt |= SPIFMT_WAITENA_MASK;
  324. delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
  325. & SPIDELAY_T2EDELAY_MASK;
  326. delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
  327. & SPIDELAY_C2EDELAY_MASK;
  328. }
  329. iowrite32(delay, davinci_spi->base + SPIDELAY);
  330. }
  331. iowrite32(spifmt, davinci_spi->base + SPIFMT0);
  332. return 0;
  333. }
  334. static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
  335. {
  336. struct spi_device *spi = (struct spi_device *)data;
  337. struct davinci_spi *davinci_spi;
  338. struct davinci_spi_dma *davinci_spi_dma;
  339. davinci_spi = spi_master_get_devdata(spi->master);
  340. davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
  341. if (ch_status == DMA_COMPLETE)
  342. edma_stop(davinci_spi_dma->dma_rx_channel);
  343. else
  344. edma_clean_channel(davinci_spi_dma->dma_rx_channel);
  345. complete(&davinci_spi_dma->dma_rx_completion);
  346. /* We must disable the DMA RX request */
  347. davinci_spi_set_dma_req(spi, 0);
  348. }
  349. static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
  350. {
  351. struct spi_device *spi = (struct spi_device *)data;
  352. struct davinci_spi *davinci_spi;
  353. struct davinci_spi_dma *davinci_spi_dma;
  354. davinci_spi = spi_master_get_devdata(spi->master);
  355. davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
  356. if (ch_status == DMA_COMPLETE)
  357. edma_stop(davinci_spi_dma->dma_tx_channel);
  358. else
  359. edma_clean_channel(davinci_spi_dma->dma_tx_channel);
  360. complete(&davinci_spi_dma->dma_tx_completion);
  361. /* We must disable the DMA TX request */
  362. davinci_spi_set_dma_req(spi, 0);
  363. }
  364. static int davinci_spi_request_dma(struct spi_device *spi)
  365. {
  366. struct davinci_spi *davinci_spi;
  367. struct davinci_spi_dma *davinci_spi_dma;
  368. struct device *sdev;
  369. int r;
  370. davinci_spi = spi_master_get_devdata(spi->master);
  371. davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
  372. sdev = davinci_spi->bitbang.master->dev.parent;
  373. r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
  374. davinci_spi_dma_rx_callback, spi,
  375. davinci_spi_dma->eventq);
  376. if (r < 0) {
  377. dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
  378. return -EAGAIN;
  379. }
  380. davinci_spi_dma->dma_rx_channel = r;
  381. r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
  382. davinci_spi_dma_tx_callback, spi,
  383. davinci_spi_dma->eventq);
  384. if (r < 0) {
  385. edma_free_channel(davinci_spi_dma->dma_rx_channel);
  386. davinci_spi_dma->dma_rx_channel = -1;
  387. dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
  388. return -EAGAIN;
  389. }
  390. davinci_spi_dma->dma_tx_channel = r;
  391. return 0;
  392. }
  393. /**
  394. * davinci_spi_setup - This functions will set default transfer method
  395. * @spi: spi device on which data transfer to be done
  396. *
  397. * This functions sets the default transfer method.
  398. */
  399. static int davinci_spi_setup(struct spi_device *spi)
  400. {
  401. int retval;
  402. struct davinci_spi *davinci_spi;
  403. struct davinci_spi_dma *davinci_spi_dma;
  404. davinci_spi = spi_master_get_devdata(spi->master);
  405. /* if bits per word length is zero then set it default 8 */
  406. if (!spi->bits_per_word)
  407. spi->bits_per_word = 8;
  408. davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
  409. if (use_dma && davinci_spi->dma_channels) {
  410. davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
  411. if ((davinci_spi_dma->dma_rx_channel == -1)
  412. || (davinci_spi_dma->dma_tx_channel == -1)) {
  413. retval = davinci_spi_request_dma(spi);
  414. if (retval < 0)
  415. return retval;
  416. }
  417. }
  418. retval = davinci_spi_setup_transfer(spi, NULL);
  419. return retval;
  420. }
  421. static void davinci_spi_cleanup(struct spi_device *spi)
  422. {
  423. struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
  424. struct davinci_spi_dma *davinci_spi_dma;
  425. davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
  426. if (use_dma && davinci_spi->dma_channels) {
  427. davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
  428. if ((davinci_spi_dma->dma_rx_channel != -1)
  429. && (davinci_spi_dma->dma_tx_channel != -1)) {
  430. edma_free_channel(davinci_spi_dma->dma_tx_channel);
  431. edma_free_channel(davinci_spi_dma->dma_rx_channel);
  432. }
  433. }
  434. }
  435. static int davinci_spi_bufs_prep(struct spi_device *spi,
  436. struct davinci_spi *davinci_spi)
  437. {
  438. struct davinci_spi_platform_data *pdata;
  439. int op_mode = 0;
  440. /*
  441. * REVISIT unless devices disagree about SPI_LOOP or
  442. * SPI_READY (SPI_NO_CS only allows one device!), this
  443. * should not need to be done before each message...
  444. * optimize for both flags staying cleared.
  445. */
  446. op_mode = SPIPC0_DIFUN_MASK
  447. | SPIPC0_DOFUN_MASK
  448. | SPIPC0_CLKFUN_MASK;
  449. if (!(spi->mode & SPI_NO_CS)) {
  450. pdata = davinci_spi->pdata;
  451. if (!pdata->chip_sel ||
  452. pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)
  453. op_mode |= 1 << spi->chip_select;
  454. }
  455. if (spi->mode & SPI_READY)
  456. op_mode |= SPIPC0_SPIENA_MASK;
  457. iowrite32(op_mode, davinci_spi->base + SPIPC0);
  458. if (spi->mode & SPI_LOOP)
  459. set_io_bits(davinci_spi->base + SPIGCR1,
  460. SPIGCR1_LOOPBACK_MASK);
  461. else
  462. clear_io_bits(davinci_spi->base + SPIGCR1,
  463. SPIGCR1_LOOPBACK_MASK);
  464. return 0;
  465. }
  466. static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
  467. int int_status)
  468. {
  469. struct device *sdev = davinci_spi->bitbang.master->dev.parent;
  470. if (int_status & SPIFLG_TIMEOUT_MASK) {
  471. dev_dbg(sdev, "SPI Time-out Error\n");
  472. return -ETIMEDOUT;
  473. }
  474. if (int_status & SPIFLG_DESYNC_MASK) {
  475. dev_dbg(sdev, "SPI Desynchronization Error\n");
  476. return -EIO;
  477. }
  478. if (int_status & SPIFLG_BITERR_MASK) {
  479. dev_dbg(sdev, "SPI Bit error\n");
  480. return -EIO;
  481. }
  482. if (davinci_spi->version == SPI_VERSION_2) {
  483. if (int_status & SPIFLG_DLEN_ERR_MASK) {
  484. dev_dbg(sdev, "SPI Data Length Error\n");
  485. return -EIO;
  486. }
  487. if (int_status & SPIFLG_PARERR_MASK) {
  488. dev_dbg(sdev, "SPI Parity Error\n");
  489. return -EIO;
  490. }
  491. if (int_status & SPIFLG_OVRRUN_MASK) {
  492. dev_dbg(sdev, "SPI Data Overrun error\n");
  493. return -EIO;
  494. }
  495. if (int_status & SPIFLG_TX_INTR_MASK) {
  496. dev_dbg(sdev, "SPI TX intr bit set\n");
  497. return -EIO;
  498. }
  499. if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
  500. dev_dbg(sdev, "SPI Buffer Init Active\n");
  501. return -EBUSY;
  502. }
  503. }
  504. return 0;
  505. }
  506. /**
  507. * davinci_spi_bufs - functions which will handle transfer data
  508. * @spi: spi device on which data transfer to be done
  509. * @t: spi transfer in which transfer info is filled
  510. *
  511. * This function will put data to be transferred into data register
  512. * of SPI controller and then wait until the completion will be marked
  513. * by the IRQ Handler.
  514. */
  515. static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
  516. {
  517. struct davinci_spi *davinci_spi;
  518. int int_status, count, ret;
  519. u8 conv;
  520. u32 tx_data, data1_reg_val;
  521. u32 buf_val, flg_val;
  522. struct davinci_spi_platform_data *pdata;
  523. davinci_spi = spi_master_get_devdata(spi->master);
  524. pdata = davinci_spi->pdata;
  525. davinci_spi->tx = t->tx_buf;
  526. davinci_spi->rx = t->rx_buf;
  527. /* convert len to words based on bits_per_word */
  528. conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
  529. davinci_spi->count = t->len / conv;
  530. data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
  531. INIT_COMPLETION(davinci_spi->done);
  532. ret = davinci_spi_bufs_prep(spi, davinci_spi);
  533. if (ret)
  534. return ret;
  535. /* Enable SPI */
  536. set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
  537. count = davinci_spi->count;
  538. /* Determine the command to execute READ or WRITE */
  539. if (t->tx_buf) {
  540. clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
  541. while (1) {
  542. tx_data = davinci_spi->get_tx(davinci_spi);
  543. data1_reg_val &= ~(0xFFFF);
  544. data1_reg_val |= (0xFFFF & tx_data);
  545. buf_val = ioread32(davinci_spi->base + SPIBUF);
  546. if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
  547. iowrite32(data1_reg_val,
  548. davinci_spi->base + SPIDAT1);
  549. count--;
  550. }
  551. while (ioread32(davinci_spi->base + SPIBUF)
  552. & SPIBUF_RXEMPTY_MASK)
  553. cpu_relax();
  554. /* getting the returned byte */
  555. if (t->rx_buf) {
  556. buf_val = ioread32(davinci_spi->base + SPIBUF);
  557. davinci_spi->get_rx(buf_val, davinci_spi);
  558. }
  559. if (count <= 0)
  560. break;
  561. }
  562. } else {
  563. if (pdata->poll_mode) {
  564. while (1) {
  565. /* keeps the serial clock going */
  566. if ((ioread32(davinci_spi->base + SPIBUF)
  567. & SPIBUF_TXFULL_MASK) == 0)
  568. iowrite32(data1_reg_val,
  569. davinci_spi->base + SPIDAT1);
  570. while (ioread32(davinci_spi->base + SPIBUF) &
  571. SPIBUF_RXEMPTY_MASK)
  572. cpu_relax();
  573. flg_val = ioread32(davinci_spi->base + SPIFLG);
  574. buf_val = ioread32(davinci_spi->base + SPIBUF);
  575. davinci_spi->get_rx(buf_val, davinci_spi);
  576. count--;
  577. if (count <= 0)
  578. break;
  579. }
  580. } else { /* Receive in Interrupt mode */
  581. int i;
  582. for (i = 0; i < davinci_spi->count; i++) {
  583. set_io_bits(davinci_spi->base + SPIINT,
  584. SPIINT_BITERR_INTR
  585. | SPIINT_OVRRUN_INTR
  586. | SPIINT_RX_INTR);
  587. iowrite32(data1_reg_val,
  588. davinci_spi->base + SPIDAT1);
  589. while (ioread32(davinci_spi->base + SPIINT) &
  590. SPIINT_RX_INTR)
  591. cpu_relax();
  592. }
  593. iowrite32((data1_reg_val & 0x0ffcffff),
  594. davinci_spi->base + SPIDAT1);
  595. }
  596. }
  597. /*
  598. * Check for bit error, desync error,parity error,timeout error and
  599. * receive overflow errors
  600. */
  601. int_status = ioread32(davinci_spi->base + SPIFLG);
  602. ret = davinci_spi_check_error(davinci_spi, int_status);
  603. if (ret != 0)
  604. return ret;
  605. /* SPI Framework maintains the count only in bytes so convert back */
  606. davinci_spi->count *= conv;
  607. return t->len;
  608. }
  609. #define DAVINCI_DMA_DATA_TYPE_S8 0x01
  610. #define DAVINCI_DMA_DATA_TYPE_S16 0x02
  611. #define DAVINCI_DMA_DATA_TYPE_S32 0x04
  612. static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
  613. {
  614. struct davinci_spi *davinci_spi;
  615. int int_status = 0;
  616. int count, temp_count;
  617. u8 conv = 1;
  618. u32 data1_reg_val;
  619. struct davinci_spi_dma *davinci_spi_dma;
  620. int word_len, data_type, ret;
  621. unsigned long tx_reg, rx_reg;
  622. struct device *sdev;
  623. davinci_spi = spi_master_get_devdata(spi->master);
  624. sdev = davinci_spi->bitbang.master->dev.parent;
  625. davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
  626. tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
  627. rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
  628. davinci_spi->tx = t->tx_buf;
  629. davinci_spi->rx = t->rx_buf;
  630. /* convert len to words based on bits_per_word */
  631. conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
  632. davinci_spi->count = t->len / conv;
  633. data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
  634. INIT_COMPLETION(davinci_spi->done);
  635. init_completion(&davinci_spi_dma->dma_rx_completion);
  636. init_completion(&davinci_spi_dma->dma_tx_completion);
  637. word_len = conv * 8;
  638. if (word_len <= 8)
  639. data_type = DAVINCI_DMA_DATA_TYPE_S8;
  640. else if (word_len <= 16)
  641. data_type = DAVINCI_DMA_DATA_TYPE_S16;
  642. else if (word_len <= 32)
  643. data_type = DAVINCI_DMA_DATA_TYPE_S32;
  644. else
  645. return -EINVAL;
  646. ret = davinci_spi_bufs_prep(spi, davinci_spi);
  647. if (ret)
  648. return ret;
  649. count = davinci_spi->count; /* the number of elements */
  650. /* disable all interrupts for dma transfers */
  651. clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
  652. /* Disable SPI to write configuration bits in SPIDAT */
  653. clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
  654. /* Enable SPI */
  655. set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
  656. if (t->tx_buf) {
  657. t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
  658. DMA_TO_DEVICE);
  659. if (dma_mapping_error(&spi->dev, t->tx_dma)) {
  660. dev_dbg(sdev, "Unable to DMA map a %d bytes"
  661. " TX buffer\n", count);
  662. return -ENOMEM;
  663. }
  664. temp_count = count;
  665. } else {
  666. /* We need TX clocking for RX transaction */
  667. t->tx_dma = dma_map_single(&spi->dev,
  668. (void *)davinci_spi->tmp_buf, count + 1,
  669. DMA_TO_DEVICE);
  670. if (dma_mapping_error(&spi->dev, t->tx_dma)) {
  671. dev_dbg(sdev, "Unable to DMA map a %d bytes"
  672. " TX tmp buffer\n", count);
  673. return -ENOMEM;
  674. }
  675. temp_count = count + 1;
  676. }
  677. edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
  678. data_type, temp_count, 1, 0, ASYNC);
  679. edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
  680. edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
  681. edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
  682. edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
  683. if (t->rx_buf) {
  684. /* initiate transaction */
  685. iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
  686. t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
  687. DMA_FROM_DEVICE);
  688. if (dma_mapping_error(&spi->dev, t->rx_dma)) {
  689. dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
  690. count);
  691. if (t->tx_buf != NULL)
  692. dma_unmap_single(NULL, t->tx_dma,
  693. count, DMA_TO_DEVICE);
  694. return -ENOMEM;
  695. }
  696. edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
  697. data_type, count, 1, 0, ASYNC);
  698. edma_set_src(davinci_spi_dma->dma_rx_channel,
  699. rx_reg, INCR, W8BIT);
  700. edma_set_dest(davinci_spi_dma->dma_rx_channel,
  701. t->rx_dma, INCR, W8BIT);
  702. edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
  703. edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
  704. data_type, 0);
  705. }
  706. if ((t->tx_buf) || (t->rx_buf))
  707. edma_start(davinci_spi_dma->dma_tx_channel);
  708. if (t->rx_buf)
  709. edma_start(davinci_spi_dma->dma_rx_channel);
  710. if ((t->rx_buf) || (t->tx_buf))
  711. davinci_spi_set_dma_req(spi, 1);
  712. if (t->tx_buf)
  713. wait_for_completion_interruptible(
  714. &davinci_spi_dma->dma_tx_completion);
  715. if (t->rx_buf)
  716. wait_for_completion_interruptible(
  717. &davinci_spi_dma->dma_rx_completion);
  718. dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
  719. if (t->rx_buf)
  720. dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
  721. /*
  722. * Check for bit error, desync error,parity error,timeout error and
  723. * receive overflow errors
  724. */
  725. int_status = ioread32(davinci_spi->base + SPIFLG);
  726. ret = davinci_spi_check_error(davinci_spi, int_status);
  727. if (ret != 0)
  728. return ret;
  729. /* SPI Framework maintains the count only in bytes so convert back */
  730. davinci_spi->count *= conv;
  731. return t->len;
  732. }
  733. /**
  734. * davinci_spi_irq - IRQ handler for DaVinci SPI
  735. * @irq: IRQ number for this SPI Master
  736. * @context_data: structure for SPI Master controller davinci_spi
  737. */
  738. static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
  739. {
  740. struct davinci_spi *davinci_spi = context_data;
  741. u32 int_status, rx_data = 0;
  742. irqreturn_t ret = IRQ_NONE;
  743. int_status = ioread32(davinci_spi->base + SPIFLG);
  744. while ((int_status & SPIFLG_RX_INTR_MASK)) {
  745. if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
  746. ret = IRQ_HANDLED;
  747. rx_data = ioread32(davinci_spi->base + SPIBUF);
  748. davinci_spi->get_rx(rx_data, davinci_spi);
  749. /* Disable Receive Interrupt */
  750. iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
  751. davinci_spi->base + SPIINT);
  752. } else
  753. (void)davinci_spi_check_error(davinci_spi, int_status);
  754. int_status = ioread32(davinci_spi->base + SPIFLG);
  755. }
  756. return ret;
  757. }
  758. /**
  759. * davinci_spi_probe - probe function for SPI Master Controller
  760. * @pdev: platform_device structure which contains plateform specific data
  761. */
  762. static int davinci_spi_probe(struct platform_device *pdev)
  763. {
  764. struct spi_master *master;
  765. struct davinci_spi *davinci_spi;
  766. struct davinci_spi_platform_data *pdata;
  767. struct resource *r, *mem;
  768. resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
  769. resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
  770. resource_size_t dma_eventq = SPI_NO_RESOURCE;
  771. int i = 0, ret = 0;
  772. pdata = pdev->dev.platform_data;
  773. if (pdata == NULL) {
  774. ret = -ENODEV;
  775. goto err;
  776. }
  777. master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
  778. if (master == NULL) {
  779. ret = -ENOMEM;
  780. goto err;
  781. }
  782. dev_set_drvdata(&pdev->dev, master);
  783. davinci_spi = spi_master_get_devdata(master);
  784. if (davinci_spi == NULL) {
  785. ret = -ENOENT;
  786. goto free_master;
  787. }
  788. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  789. if (r == NULL) {
  790. ret = -ENOENT;
  791. goto free_master;
  792. }
  793. davinci_spi->pbase = r->start;
  794. davinci_spi->region_size = resource_size(r);
  795. davinci_spi->pdata = pdata;
  796. mem = request_mem_region(r->start, davinci_spi->region_size,
  797. pdev->name);
  798. if (mem == NULL) {
  799. ret = -EBUSY;
  800. goto free_master;
  801. }
  802. davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
  803. if (davinci_spi->base == NULL) {
  804. ret = -ENOMEM;
  805. goto release_region;
  806. }
  807. davinci_spi->irq = platform_get_irq(pdev, 0);
  808. if (davinci_spi->irq <= 0) {
  809. ret = -EINVAL;
  810. goto unmap_io;
  811. }
  812. ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
  813. dev_name(&pdev->dev), davinci_spi);
  814. if (ret)
  815. goto unmap_io;
  816. /* Allocate tmp_buf for tx_buf */
  817. davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
  818. if (davinci_spi->tmp_buf == NULL) {
  819. ret = -ENOMEM;
  820. goto irq_free;
  821. }
  822. davinci_spi->bitbang.master = spi_master_get(master);
  823. if (davinci_spi->bitbang.master == NULL) {
  824. ret = -ENODEV;
  825. goto free_tmp_buf;
  826. }
  827. davinci_spi->clk = clk_get(&pdev->dev, NULL);
  828. if (IS_ERR(davinci_spi->clk)) {
  829. ret = -ENODEV;
  830. goto put_master;
  831. }
  832. clk_enable(davinci_spi->clk);
  833. master->bus_num = pdev->id;
  834. master->num_chipselect = pdata->num_chipselect;
  835. master->setup = davinci_spi_setup;
  836. master->cleanup = davinci_spi_cleanup;
  837. davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
  838. davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
  839. davinci_spi->version = pdata->version;
  840. use_dma = pdata->use_dma;
  841. davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
  842. if (davinci_spi->version == SPI_VERSION_2)
  843. davinci_spi->bitbang.flags |= SPI_READY;
  844. if (use_dma) {
  845. r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  846. if (r)
  847. dma_rx_chan = r->start;
  848. r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
  849. if (r)
  850. dma_tx_chan = r->start;
  851. r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
  852. if (r)
  853. dma_eventq = r->start;
  854. }
  855. if (!use_dma ||
  856. dma_rx_chan == SPI_NO_RESOURCE ||
  857. dma_tx_chan == SPI_NO_RESOURCE ||
  858. dma_eventq == SPI_NO_RESOURCE) {
  859. davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
  860. use_dma = 0;
  861. } else {
  862. davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
  863. davinci_spi->dma_channels = kzalloc(master->num_chipselect
  864. * sizeof(struct davinci_spi_dma), GFP_KERNEL);
  865. if (davinci_spi->dma_channels == NULL) {
  866. ret = -ENOMEM;
  867. goto free_clk;
  868. }
  869. for (i = 0; i < master->num_chipselect; i++) {
  870. davinci_spi->dma_channels[i].dma_rx_channel = -1;
  871. davinci_spi->dma_channels[i].dma_rx_sync_dev =
  872. dma_rx_chan;
  873. davinci_spi->dma_channels[i].dma_tx_channel = -1;
  874. davinci_spi->dma_channels[i].dma_tx_sync_dev =
  875. dma_tx_chan;
  876. davinci_spi->dma_channels[i].eventq = dma_eventq;
  877. }
  878. dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
  879. "Using RX channel = %d , TX channel = %d and "
  880. "event queue = %d", dma_rx_chan, dma_tx_chan,
  881. dma_eventq);
  882. }
  883. davinci_spi->get_rx = davinci_spi_rx_buf_u8;
  884. davinci_spi->get_tx = davinci_spi_tx_buf_u8;
  885. init_completion(&davinci_spi->done);
  886. /* Reset In/OUT SPI module */
  887. iowrite32(0, davinci_spi->base + SPIGCR0);
  888. udelay(100);
  889. iowrite32(1, davinci_spi->base + SPIGCR0);
  890. /* initialize chip selects */
  891. if (pdata->chip_sel) {
  892. for (i = 0; i < pdata->num_chipselect; i++) {
  893. if (pdata->chip_sel[i] != SPI_INTERN_CS)
  894. gpio_direction_output(pdata->chip_sel[i], 1);
  895. }
  896. }
  897. /* Clock internal */
  898. if (davinci_spi->pdata->clk_internal)
  899. set_io_bits(davinci_spi->base + SPIGCR1,
  900. SPIGCR1_CLKMOD_MASK);
  901. else
  902. clear_io_bits(davinci_spi->base + SPIGCR1,
  903. SPIGCR1_CLKMOD_MASK);
  904. iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
  905. /* master mode default */
  906. set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
  907. if (davinci_spi->pdata->intr_level)
  908. iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
  909. else
  910. iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
  911. ret = spi_bitbang_start(&davinci_spi->bitbang);
  912. if (ret)
  913. goto free_clk;
  914. dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
  915. if (!pdata->poll_mode)
  916. dev_info(&pdev->dev, "Operating in interrupt mode"
  917. " using IRQ %d\n", davinci_spi->irq);
  918. return ret;
  919. free_clk:
  920. clk_disable(davinci_spi->clk);
  921. clk_put(davinci_spi->clk);
  922. put_master:
  923. spi_master_put(master);
  924. free_tmp_buf:
  925. kfree(davinci_spi->tmp_buf);
  926. irq_free:
  927. free_irq(davinci_spi->irq, davinci_spi);
  928. unmap_io:
  929. iounmap(davinci_spi->base);
  930. release_region:
  931. release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
  932. free_master:
  933. kfree(master);
  934. err:
  935. return ret;
  936. }
  937. /**
  938. * davinci_spi_remove - remove function for SPI Master Controller
  939. * @pdev: platform_device structure which contains plateform specific data
  940. *
  941. * This function will do the reverse action of davinci_spi_probe function
  942. * It will free the IRQ and SPI controller's memory region.
  943. * It will also call spi_bitbang_stop to destroy the work queue which was
  944. * created by spi_bitbang_start.
  945. */
  946. static int __exit davinci_spi_remove(struct platform_device *pdev)
  947. {
  948. struct davinci_spi *davinci_spi;
  949. struct spi_master *master;
  950. master = dev_get_drvdata(&pdev->dev);
  951. davinci_spi = spi_master_get_devdata(master);
  952. spi_bitbang_stop(&davinci_spi->bitbang);
  953. clk_disable(davinci_spi->clk);
  954. clk_put(davinci_spi->clk);
  955. spi_master_put(master);
  956. kfree(davinci_spi->tmp_buf);
  957. free_irq(davinci_spi->irq, davinci_spi);
  958. iounmap(davinci_spi->base);
  959. release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
  960. return 0;
  961. }
  962. static struct platform_driver davinci_spi_driver = {
  963. .driver.name = "spi_davinci",
  964. .remove = __exit_p(davinci_spi_remove),
  965. };
  966. static int __init davinci_spi_init(void)
  967. {
  968. return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
  969. }
  970. module_init(davinci_spi_init);
  971. static void __exit davinci_spi_exit(void)
  972. {
  973. platform_driver_unregister(&davinci_spi_driver);
  974. }
  975. module_exit(davinci_spi_exit);
  976. MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
  977. MODULE_LICENSE("GPL");