spi-mxs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Freescale MXS SPI master driver
  3. *
  4. * Copyright 2012 DENX Software Engineering, GmbH.
  5. * Copyright 2012 Freescale Semiconductor, Inc.
  6. * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
  7. *
  8. * Rework and transition to new API by:
  9. * Marek Vasut <marex@denx.de>
  10. *
  11. * Based on previous attempt by:
  12. * Fabio Estevam <fabio.estevam@freescale.com>
  13. *
  14. * Based on code from U-Boot bootloader by:
  15. * Marek Vasut <marex@denx.de>
  16. *
  17. * Based on spi-stmp.c, which is:
  18. * Author: Dmitry Pervushin <dimka@embeddedalley.com>
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License as published by
  22. * the Free Software Foundation; either version 2 of the License, or
  23. * (at your option) any later version.
  24. *
  25. * This program is distributed in the hope that it will be useful,
  26. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  27. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  28. * GNU General Public License for more details.
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/init.h>
  32. #include <linux/ioport.h>
  33. #include <linux/of.h>
  34. #include <linux/of_device.h>
  35. #include <linux/of_gpio.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/delay.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/dmaengine.h>
  41. #include <linux/highmem.h>
  42. #include <linux/clk.h>
  43. #include <linux/err.h>
  44. #include <linux/completion.h>
  45. #include <linux/gpio.h>
  46. #include <linux/regulator/consumer.h>
  47. #include <linux/module.h>
  48. #include <linux/pinctrl/consumer.h>
  49. #include <linux/stmp_device.h>
  50. #include <linux/spi/spi.h>
  51. #include <linux/spi/mxs-spi.h>
  52. #define DRIVER_NAME "mxs-spi"
  53. /* Use 10S timeout for very long transfers, it should suffice. */
  54. #define SSP_TIMEOUT 10000
  55. #define SG_MAXLEN 0xff00
  56. struct mxs_spi {
  57. struct mxs_ssp ssp;
  58. struct completion c;
  59. };
  60. static int mxs_spi_setup_transfer(struct spi_device *dev,
  61. struct spi_transfer *t)
  62. {
  63. struct mxs_spi *spi = spi_master_get_devdata(dev->master);
  64. struct mxs_ssp *ssp = &spi->ssp;
  65. uint8_t bits_per_word;
  66. uint32_t hz = 0;
  67. bits_per_word = dev->bits_per_word;
  68. if (t && t->bits_per_word)
  69. bits_per_word = t->bits_per_word;
  70. if (bits_per_word != 8) {
  71. dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n",
  72. __func__, bits_per_word);
  73. return -EINVAL;
  74. }
  75. hz = dev->max_speed_hz;
  76. if (t && t->speed_hz)
  77. hz = min(hz, t->speed_hz);
  78. if (hz == 0) {
  79. dev_err(&dev->dev, "Cannot continue with zero clock\n");
  80. return -EINVAL;
  81. }
  82. mxs_ssp_set_clk_rate(ssp, hz);
  83. writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
  84. BF_SSP_CTRL1_WORD_LENGTH
  85. (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
  86. ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
  87. ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
  88. ssp->base + HW_SSP_CTRL1(ssp));
  89. writel(0x0, ssp->base + HW_SSP_CMD0);
  90. writel(0x0, ssp->base + HW_SSP_CMD1);
  91. return 0;
  92. }
  93. static int mxs_spi_setup(struct spi_device *dev)
  94. {
  95. int err = 0;
  96. if (!dev->bits_per_word)
  97. dev->bits_per_word = 8;
  98. if (dev->mode & ~(SPI_CPOL | SPI_CPHA))
  99. return -EINVAL;
  100. err = mxs_spi_setup_transfer(dev, NULL);
  101. if (err) {
  102. dev_err(&dev->dev,
  103. "Failed to setup transfer, error = %d\n", err);
  104. }
  105. return err;
  106. }
  107. static uint32_t mxs_spi_cs_to_reg(unsigned cs)
  108. {
  109. uint32_t select = 0;
  110. /*
  111. * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
  112. *
  113. * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
  114. * in HW_SSP_CTRL0 register do have multiple usage, please refer to
  115. * the datasheet for further details. In SPI mode, they are used to
  116. * toggle the chip-select lines (nCS pins).
  117. */
  118. if (cs & 1)
  119. select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
  120. if (cs & 2)
  121. select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
  122. return select;
  123. }
  124. static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs)
  125. {
  126. const uint32_t mask =
  127. BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ;
  128. uint32_t select;
  129. struct mxs_ssp *ssp = &spi->ssp;
  130. writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  131. select = mxs_spi_cs_to_reg(cs);
  132. writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  133. }
  134. static inline void mxs_spi_enable(struct mxs_spi *spi)
  135. {
  136. struct mxs_ssp *ssp = &spi->ssp;
  137. writel(BM_SSP_CTRL0_LOCK_CS,
  138. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  139. writel(BM_SSP_CTRL0_IGNORE_CRC,
  140. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  141. }
  142. static inline void mxs_spi_disable(struct mxs_spi *spi)
  143. {
  144. struct mxs_ssp *ssp = &spi->ssp;
  145. writel(BM_SSP_CTRL0_LOCK_CS,
  146. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  147. writel(BM_SSP_CTRL0_IGNORE_CRC,
  148. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  149. }
  150. static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
  151. {
  152. unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
  153. struct mxs_ssp *ssp = &spi->ssp;
  154. uint32_t reg;
  155. while (1) {
  156. reg = readl_relaxed(ssp->base + offset);
  157. if (set && ((reg & mask) == mask))
  158. break;
  159. if (!set && ((~reg & mask) == mask))
  160. break;
  161. udelay(1);
  162. if (time_after(jiffies, timeout))
  163. return -ETIMEDOUT;
  164. }
  165. return 0;
  166. }
  167. static void mxs_ssp_dma_irq_callback(void *param)
  168. {
  169. struct mxs_spi *spi = param;
  170. complete(&spi->c);
  171. }
  172. static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
  173. {
  174. struct mxs_ssp *ssp = dev_id;
  175. dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
  176. __func__, __LINE__,
  177. readl(ssp->base + HW_SSP_CTRL1(ssp)),
  178. readl(ssp->base + HW_SSP_STATUS(ssp)));
  179. return IRQ_HANDLED;
  180. }
  181. static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
  182. unsigned char *buf, int len,
  183. int *first, int *last, int write)
  184. {
  185. struct mxs_ssp *ssp = &spi->ssp;
  186. struct dma_async_tx_descriptor *desc = NULL;
  187. const bool vmalloced_buf = is_vmalloc_addr(buf);
  188. const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
  189. const int sgs = DIV_ROUND_UP(len, desc_len);
  190. int sg_count;
  191. int min, ret;
  192. uint32_t ctrl0;
  193. struct page *vm_page;
  194. void *sg_buf;
  195. struct {
  196. uint32_t pio[4];
  197. struct scatterlist sg;
  198. } *dma_xfer;
  199. if (!len)
  200. return -EINVAL;
  201. dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL);
  202. if (!dma_xfer)
  203. return -ENOMEM;
  204. INIT_COMPLETION(spi->c);
  205. ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
  206. ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
  207. if (*first)
  208. ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
  209. if (!write)
  210. ctrl0 |= BM_SSP_CTRL0_READ;
  211. /* Queue the DMA data transfer. */
  212. for (sg_count = 0; sg_count < sgs; sg_count++) {
  213. min = min(len, desc_len);
  214. /* Prepare the transfer descriptor. */
  215. if ((sg_count + 1 == sgs) && *last)
  216. ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
  217. if (ssp->devid == IMX23_SSP)
  218. ctrl0 |= min;
  219. dma_xfer[sg_count].pio[0] = ctrl0;
  220. dma_xfer[sg_count].pio[3] = min;
  221. if (vmalloced_buf) {
  222. vm_page = vmalloc_to_page(buf);
  223. if (!vm_page) {
  224. ret = -ENOMEM;
  225. goto err_vmalloc;
  226. }
  227. sg_buf = page_address(vm_page) +
  228. ((size_t)buf & ~PAGE_MASK);
  229. } else {
  230. sg_buf = buf;
  231. }
  232. sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
  233. ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
  234. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  235. len -= min;
  236. buf += min;
  237. /* Queue the PIO register write transfer. */
  238. desc = dmaengine_prep_slave_sg(ssp->dmach,
  239. (struct scatterlist *)dma_xfer[sg_count].pio,
  240. (ssp->devid == IMX23_SSP) ? 1 : 4,
  241. DMA_TRANS_NONE,
  242. sg_count ? DMA_PREP_INTERRUPT : 0);
  243. if (!desc) {
  244. dev_err(ssp->dev,
  245. "Failed to get PIO reg. write descriptor.\n");
  246. ret = -EINVAL;
  247. goto err_mapped;
  248. }
  249. desc = dmaengine_prep_slave_sg(ssp->dmach,
  250. &dma_xfer[sg_count].sg, 1,
  251. write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  252. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  253. if (!desc) {
  254. dev_err(ssp->dev,
  255. "Failed to get DMA data write descriptor.\n");
  256. ret = -EINVAL;
  257. goto err_mapped;
  258. }
  259. }
  260. /*
  261. * The last descriptor must have this callback,
  262. * to finish the DMA transaction.
  263. */
  264. desc->callback = mxs_ssp_dma_irq_callback;
  265. desc->callback_param = spi;
  266. /* Start the transfer. */
  267. dmaengine_submit(desc);
  268. dma_async_issue_pending(ssp->dmach);
  269. ret = wait_for_completion_timeout(&spi->c,
  270. msecs_to_jiffies(SSP_TIMEOUT));
  271. if (!ret) {
  272. dev_err(ssp->dev, "DMA transfer timeout\n");
  273. ret = -ETIMEDOUT;
  274. goto err_vmalloc;
  275. }
  276. ret = 0;
  277. err_vmalloc:
  278. while (--sg_count >= 0) {
  279. err_mapped:
  280. dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
  281. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  282. }
  283. kfree(dma_xfer);
  284. return ret;
  285. }
  286. static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
  287. unsigned char *buf, int len,
  288. int *first, int *last, int write)
  289. {
  290. struct mxs_ssp *ssp = &spi->ssp;
  291. if (*first)
  292. mxs_spi_enable(spi);
  293. mxs_spi_set_cs(spi, cs);
  294. while (len--) {
  295. if (*last && len == 0)
  296. mxs_spi_disable(spi);
  297. if (ssp->devid == IMX23_SSP) {
  298. writel(BM_SSP_CTRL0_XFER_COUNT,
  299. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  300. writel(1,
  301. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  302. } else {
  303. writel(1, ssp->base + HW_SSP_XFER_SIZE);
  304. }
  305. if (write)
  306. writel(BM_SSP_CTRL0_READ,
  307. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  308. else
  309. writel(BM_SSP_CTRL0_READ,
  310. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  311. writel(BM_SSP_CTRL0_RUN,
  312. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  313. if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
  314. return -ETIMEDOUT;
  315. if (write)
  316. writel(*buf, ssp->base + HW_SSP_DATA(ssp));
  317. writel(BM_SSP_CTRL0_DATA_XFER,
  318. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  319. if (!write) {
  320. if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
  321. BM_SSP_STATUS_FIFO_EMPTY, 0))
  322. return -ETIMEDOUT;
  323. *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
  324. }
  325. if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
  326. return -ETIMEDOUT;
  327. buf++;
  328. }
  329. if (len <= 0)
  330. return 0;
  331. return -ETIMEDOUT;
  332. }
  333. static int mxs_spi_transfer_one(struct spi_master *master,
  334. struct spi_message *m)
  335. {
  336. struct mxs_spi *spi = spi_master_get_devdata(master);
  337. struct mxs_ssp *ssp = &spi->ssp;
  338. int first, last;
  339. struct spi_transfer *t, *tmp_t;
  340. int status = 0;
  341. int cs;
  342. first = last = 0;
  343. cs = m->spi->chip_select;
  344. list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
  345. status = mxs_spi_setup_transfer(m->spi, t);
  346. if (status)
  347. break;
  348. if (&t->transfer_list == m->transfers.next)
  349. first = 1;
  350. if (&t->transfer_list == m->transfers.prev)
  351. last = 1;
  352. if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
  353. dev_err(ssp->dev,
  354. "Cannot send and receive simultaneously\n");
  355. status = -EINVAL;
  356. break;
  357. }
  358. /*
  359. * Small blocks can be transfered via PIO.
  360. * Measured by empiric means:
  361. *
  362. * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
  363. *
  364. * DMA only: 2.164808 seconds, 473.0KB/s
  365. * Combined: 1.676276 seconds, 610.9KB/s
  366. */
  367. if (t->len <= 256) {
  368. writel(BM_SSP_CTRL1_DMA_ENABLE,
  369. ssp->base + HW_SSP_CTRL1(ssp) +
  370. STMP_OFFSET_REG_CLR);
  371. if (t->tx_buf)
  372. status = mxs_spi_txrx_pio(spi, cs,
  373. (void *)t->tx_buf,
  374. t->len, &first, &last, 1);
  375. if (t->rx_buf)
  376. status = mxs_spi_txrx_pio(spi, cs,
  377. t->rx_buf, t->len,
  378. &first, &last, 0);
  379. } else {
  380. writel(BM_SSP_CTRL1_DMA_ENABLE,
  381. ssp->base + HW_SSP_CTRL1(ssp) +
  382. STMP_OFFSET_REG_SET);
  383. if (t->tx_buf)
  384. status = mxs_spi_txrx_dma(spi, cs,
  385. (void *)t->tx_buf, t->len,
  386. &first, &last, 1);
  387. if (t->rx_buf)
  388. status = mxs_spi_txrx_dma(spi, cs,
  389. t->rx_buf, t->len,
  390. &first, &last, 0);
  391. }
  392. m->actual_length += t->len;
  393. if (status) {
  394. stmp_reset_block(ssp->base);
  395. break;
  396. }
  397. first = last = 0;
  398. }
  399. m->status = 0;
  400. spi_finalize_current_message(master);
  401. return status;
  402. }
  403. static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param)
  404. {
  405. struct mxs_ssp *ssp = param;
  406. if (!mxs_dma_is_apbh(chan))
  407. return false;
  408. if (chan->chan_id != ssp->dma_channel)
  409. return false;
  410. chan->private = &ssp->dma_data;
  411. return true;
  412. }
  413. static const struct of_device_id mxs_spi_dt_ids[] = {
  414. { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
  415. { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
  416. { /* sentinel */ }
  417. };
  418. MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
  419. static int __devinit mxs_spi_probe(struct platform_device *pdev)
  420. {
  421. const struct of_device_id *of_id =
  422. of_match_device(mxs_spi_dt_ids, &pdev->dev);
  423. struct device_node *np = pdev->dev.of_node;
  424. struct spi_master *master;
  425. struct mxs_spi *spi;
  426. struct mxs_ssp *ssp;
  427. struct resource *iores, *dmares;
  428. struct pinctrl *pinctrl;
  429. struct clk *clk;
  430. void __iomem *base;
  431. int devid, dma_channel;
  432. int ret = 0, irq_err, irq_dma;
  433. dma_cap_mask_t mask;
  434. iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  435. irq_err = platform_get_irq(pdev, 0);
  436. irq_dma = platform_get_irq(pdev, 1);
  437. if (!iores || irq_err < 0 || irq_dma < 0)
  438. return -EINVAL;
  439. base = devm_request_and_ioremap(&pdev->dev, iores);
  440. if (!base)
  441. return -EADDRNOTAVAIL;
  442. pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
  443. if (IS_ERR(pinctrl))
  444. return PTR_ERR(pinctrl);
  445. clk = devm_clk_get(&pdev->dev, NULL);
  446. if (IS_ERR(clk))
  447. return PTR_ERR(clk);
  448. if (np) {
  449. devid = (enum mxs_ssp_id) of_id->data;
  450. /*
  451. * TODO: This is a temporary solution and should be changed
  452. * to use generic DMA binding later when the helpers get in.
  453. */
  454. ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
  455. &dma_channel);
  456. if (ret) {
  457. dev_err(&pdev->dev,
  458. "Failed to get DMA channel\n");
  459. return -EINVAL;
  460. }
  461. } else {
  462. dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  463. if (!dmares)
  464. return -EINVAL;
  465. devid = pdev->id_entry->driver_data;
  466. dma_channel = dmares->start;
  467. }
  468. master = spi_alloc_master(&pdev->dev, sizeof(*spi));
  469. if (!master)
  470. return -ENOMEM;
  471. master->transfer_one_message = mxs_spi_transfer_one;
  472. master->setup = mxs_spi_setup;
  473. master->mode_bits = SPI_CPOL | SPI_CPHA;
  474. master->num_chipselect = 3;
  475. master->dev.of_node = np;
  476. master->flags = SPI_MASTER_HALF_DUPLEX;
  477. spi = spi_master_get_devdata(master);
  478. ssp = &spi->ssp;
  479. ssp->dev = &pdev->dev;
  480. ssp->clk = clk;
  481. ssp->base = base;
  482. ssp->devid = devid;
  483. ssp->dma_channel = dma_channel;
  484. init_completion(&spi->c);
  485. ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
  486. DRIVER_NAME, ssp);
  487. if (ret)
  488. goto out_master_free;
  489. dma_cap_zero(mask);
  490. dma_cap_set(DMA_SLAVE, mask);
  491. ssp->dma_data.chan_irq = irq_dma;
  492. ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp);
  493. if (!ssp->dmach) {
  494. dev_err(ssp->dev, "Failed to request DMA\n");
  495. goto out_master_free;
  496. }
  497. /*
  498. * Crank up the clock to 120MHz, this will be further divided onto a
  499. * proper speed.
  500. */
  501. clk_prepare_enable(ssp->clk);
  502. clk_set_rate(ssp->clk, 120 * 1000 * 1000);
  503. ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
  504. stmp_reset_block(ssp->base);
  505. platform_set_drvdata(pdev, master);
  506. ret = spi_register_master(master);
  507. if (ret) {
  508. dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
  509. goto out_free_dma;
  510. }
  511. return 0;
  512. out_free_dma:
  513. dma_release_channel(ssp->dmach);
  514. clk_disable_unprepare(ssp->clk);
  515. out_master_free:
  516. spi_master_put(master);
  517. return ret;
  518. }
  519. static int __devexit mxs_spi_remove(struct platform_device *pdev)
  520. {
  521. struct spi_master *master;
  522. struct mxs_spi *spi;
  523. struct mxs_ssp *ssp;
  524. master = spi_master_get(platform_get_drvdata(pdev));
  525. spi = spi_master_get_devdata(master);
  526. ssp = &spi->ssp;
  527. spi_unregister_master(master);
  528. dma_release_channel(ssp->dmach);
  529. clk_disable_unprepare(ssp->clk);
  530. spi_master_put(master);
  531. return 0;
  532. }
  533. static struct platform_driver mxs_spi_driver = {
  534. .probe = mxs_spi_probe,
  535. .remove = __devexit_p(mxs_spi_remove),
  536. .driver = {
  537. .name = DRIVER_NAME,
  538. .owner = THIS_MODULE,
  539. .of_match_table = mxs_spi_dt_ids,
  540. },
  541. };
  542. module_platform_driver(mxs_spi_driver);
  543. MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
  544. MODULE_DESCRIPTION("MXS SPI master driver");
  545. MODULE_LICENSE("GPL");
  546. MODULE_ALIAS("platform:mxs-spi");