spi-mxs.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Freescale MXS SPI master driver
  3. *
  4. * Copyright 2012 DENX Software Engineering, GmbH.
  5. * Copyright 2012 Freescale Semiconductor, Inc.
  6. * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
  7. *
  8. * Rework and transition to new API by:
  9. * Marek Vasut <marex@denx.de>
  10. *
  11. * Based on previous attempt by:
  12. * Fabio Estevam <fabio.estevam@freescale.com>
  13. *
  14. * Based on code from U-Boot bootloader by:
  15. * Marek Vasut <marex@denx.de>
  16. *
  17. * Based on spi-stmp.c, which is:
  18. * Author: Dmitry Pervushin <dimka@embeddedalley.com>
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License as published by
  22. * the Free Software Foundation; either version 2 of the License, or
  23. * (at your option) any later version.
  24. *
  25. * This program is distributed in the hope that it will be useful,
  26. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  27. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  28. * GNU General Public License for more details.
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/init.h>
  32. #include <linux/ioport.h>
  33. #include <linux/of.h>
  34. #include <linux/of_device.h>
  35. #include <linux/of_gpio.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/delay.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/dmaengine.h>
  41. #include <linux/highmem.h>
  42. #include <linux/clk.h>
  43. #include <linux/err.h>
  44. #include <linux/completion.h>
  45. #include <linux/gpio.h>
  46. #include <linux/regulator/consumer.h>
  47. #include <linux/module.h>
  48. #include <linux/stmp_device.h>
  49. #include <linux/spi/spi.h>
  50. #include <linux/spi/mxs-spi.h>
  51. #define DRIVER_NAME "mxs-spi"
  52. /* Use 10S timeout for very long transfers, it should suffice. */
  53. #define SSP_TIMEOUT 10000
  54. #define SG_MAXLEN 0xff00
  55. /*
  56. * Flags for txrx functions. More efficient that using an argument register for
  57. * each one.
  58. */
  59. #define TXRX_WRITE (1<<0) /* This is a write */
  60. #define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
  61. struct mxs_spi {
  62. struct mxs_ssp ssp;
  63. struct completion c;
  64. };
  65. static int mxs_spi_setup_transfer(struct spi_device *dev,
  66. const struct spi_transfer *t)
  67. {
  68. struct mxs_spi *spi = spi_master_get_devdata(dev->master);
  69. struct mxs_ssp *ssp = &spi->ssp;
  70. const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
  71. if (hz == 0) {
  72. dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
  73. return -EINVAL;
  74. }
  75. mxs_ssp_set_clk_rate(ssp, hz);
  76. writel(BM_SSP_CTRL0_LOCK_CS,
  77. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  78. writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
  79. BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
  80. ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
  81. ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
  82. ssp->base + HW_SSP_CTRL1(ssp));
  83. writel(0x0, ssp->base + HW_SSP_CMD0);
  84. writel(0x0, ssp->base + HW_SSP_CMD1);
  85. return 0;
  86. }
  87. static int mxs_spi_setup(struct spi_device *dev)
  88. {
  89. if (!dev->bits_per_word)
  90. dev->bits_per_word = 8;
  91. return 0;
  92. }
  93. static uint32_t mxs_spi_cs_to_reg(unsigned cs)
  94. {
  95. uint32_t select = 0;
  96. /*
  97. * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
  98. *
  99. * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
  100. * in HW_SSP_CTRL0 register do have multiple usage, please refer to
  101. * the datasheet for further details. In SPI mode, they are used to
  102. * toggle the chip-select lines (nCS pins).
  103. */
  104. if (cs & 1)
  105. select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
  106. if (cs & 2)
  107. select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
  108. return select;
  109. }
  110. static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
  111. {
  112. const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
  113. struct mxs_ssp *ssp = &spi->ssp;
  114. uint32_t reg;
  115. do {
  116. reg = readl_relaxed(ssp->base + offset);
  117. if (!set)
  118. reg = ~reg;
  119. reg &= mask;
  120. if (reg == mask)
  121. return 0;
  122. } while (time_before(jiffies, timeout));
  123. return -ETIMEDOUT;
  124. }
  125. static void mxs_ssp_dma_irq_callback(void *param)
  126. {
  127. struct mxs_spi *spi = param;
  128. complete(&spi->c);
  129. }
  130. static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
  131. {
  132. struct mxs_ssp *ssp = dev_id;
  133. dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
  134. __func__, __LINE__,
  135. readl(ssp->base + HW_SSP_CTRL1(ssp)),
  136. readl(ssp->base + HW_SSP_STATUS(ssp)));
  137. return IRQ_HANDLED;
  138. }
  139. static int mxs_spi_txrx_dma(struct mxs_spi *spi,
  140. unsigned char *buf, int len,
  141. unsigned int flags)
  142. {
  143. struct mxs_ssp *ssp = &spi->ssp;
  144. struct dma_async_tx_descriptor *desc = NULL;
  145. const bool vmalloced_buf = is_vmalloc_addr(buf);
  146. const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
  147. const int sgs = DIV_ROUND_UP(len, desc_len);
  148. int sg_count;
  149. int min, ret;
  150. uint32_t ctrl0;
  151. struct page *vm_page;
  152. void *sg_buf;
  153. struct {
  154. uint32_t pio[4];
  155. struct scatterlist sg;
  156. } *dma_xfer;
  157. if (!len)
  158. return -EINVAL;
  159. dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL);
  160. if (!dma_xfer)
  161. return -ENOMEM;
  162. INIT_COMPLETION(spi->c);
  163. /* Chip select was already programmed into CTRL0 */
  164. ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
  165. ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
  166. BM_SSP_CTRL0_READ);
  167. ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
  168. if (!(flags & TXRX_WRITE))
  169. ctrl0 |= BM_SSP_CTRL0_READ;
  170. /* Queue the DMA data transfer. */
  171. for (sg_count = 0; sg_count < sgs; sg_count++) {
  172. /* Prepare the transfer descriptor. */
  173. min = min(len, desc_len);
  174. /*
  175. * De-assert CS on last segment if flag is set (i.e., no more
  176. * transfers will follow)
  177. */
  178. if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
  179. ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
  180. if (ssp->devid == IMX23_SSP) {
  181. ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
  182. ctrl0 |= min;
  183. }
  184. dma_xfer[sg_count].pio[0] = ctrl0;
  185. dma_xfer[sg_count].pio[3] = min;
  186. if (vmalloced_buf) {
  187. vm_page = vmalloc_to_page(buf);
  188. if (!vm_page) {
  189. ret = -ENOMEM;
  190. goto err_vmalloc;
  191. }
  192. sg_buf = page_address(vm_page) +
  193. ((size_t)buf & ~PAGE_MASK);
  194. } else {
  195. sg_buf = buf;
  196. }
  197. sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
  198. ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
  199. (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  200. len -= min;
  201. buf += min;
  202. /* Queue the PIO register write transfer. */
  203. desc = dmaengine_prep_slave_sg(ssp->dmach,
  204. (struct scatterlist *)dma_xfer[sg_count].pio,
  205. (ssp->devid == IMX23_SSP) ? 1 : 4,
  206. DMA_TRANS_NONE,
  207. sg_count ? DMA_PREP_INTERRUPT : 0);
  208. if (!desc) {
  209. dev_err(ssp->dev,
  210. "Failed to get PIO reg. write descriptor.\n");
  211. ret = -EINVAL;
  212. goto err_mapped;
  213. }
  214. desc = dmaengine_prep_slave_sg(ssp->dmach,
  215. &dma_xfer[sg_count].sg, 1,
  216. (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  217. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  218. if (!desc) {
  219. dev_err(ssp->dev,
  220. "Failed to get DMA data write descriptor.\n");
  221. ret = -EINVAL;
  222. goto err_mapped;
  223. }
  224. }
  225. /*
  226. * The last descriptor must have this callback,
  227. * to finish the DMA transaction.
  228. */
  229. desc->callback = mxs_ssp_dma_irq_callback;
  230. desc->callback_param = spi;
  231. /* Start the transfer. */
  232. dmaengine_submit(desc);
  233. dma_async_issue_pending(ssp->dmach);
  234. ret = wait_for_completion_timeout(&spi->c,
  235. msecs_to_jiffies(SSP_TIMEOUT));
  236. if (!ret) {
  237. dev_err(ssp->dev, "DMA transfer timeout\n");
  238. ret = -ETIMEDOUT;
  239. dmaengine_terminate_all(ssp->dmach);
  240. goto err_vmalloc;
  241. }
  242. ret = 0;
  243. err_vmalloc:
  244. while (--sg_count >= 0) {
  245. err_mapped:
  246. dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
  247. (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  248. }
  249. kfree(dma_xfer);
  250. return ret;
  251. }
  252. static int mxs_spi_txrx_pio(struct mxs_spi *spi,
  253. unsigned char *buf, int len,
  254. unsigned int flags)
  255. {
  256. struct mxs_ssp *ssp = &spi->ssp;
  257. writel(BM_SSP_CTRL0_IGNORE_CRC,
  258. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  259. while (len--) {
  260. if (len == 0 && (flags & TXRX_DEASSERT_CS))
  261. writel(BM_SSP_CTRL0_IGNORE_CRC,
  262. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  263. if (ssp->devid == IMX23_SSP) {
  264. writel(BM_SSP_CTRL0_XFER_COUNT,
  265. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  266. writel(1,
  267. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  268. } else {
  269. writel(1, ssp->base + HW_SSP_XFER_SIZE);
  270. }
  271. if (flags & TXRX_WRITE)
  272. writel(BM_SSP_CTRL0_READ,
  273. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  274. else
  275. writel(BM_SSP_CTRL0_READ,
  276. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  277. writel(BM_SSP_CTRL0_RUN,
  278. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  279. if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
  280. return -ETIMEDOUT;
  281. if (flags & TXRX_WRITE)
  282. writel(*buf, ssp->base + HW_SSP_DATA(ssp));
  283. writel(BM_SSP_CTRL0_DATA_XFER,
  284. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  285. if (!(flags & TXRX_WRITE)) {
  286. if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
  287. BM_SSP_STATUS_FIFO_EMPTY, 0))
  288. return -ETIMEDOUT;
  289. *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
  290. }
  291. if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
  292. return -ETIMEDOUT;
  293. buf++;
  294. }
  295. if (len <= 0)
  296. return 0;
  297. return -ETIMEDOUT;
  298. }
  299. static int mxs_spi_transfer_one(struct spi_master *master,
  300. struct spi_message *m)
  301. {
  302. struct mxs_spi *spi = spi_master_get_devdata(master);
  303. struct mxs_ssp *ssp = &spi->ssp;
  304. struct spi_transfer *t, *tmp_t;
  305. unsigned int flag;
  306. int status = 0;
  307. /* Program CS register bits here, it will be used for all transfers. */
  308. writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
  309. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  310. writel(mxs_spi_cs_to_reg(m->spi->chip_select),
  311. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  312. list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
  313. status = mxs_spi_setup_transfer(m->spi, t);
  314. if (status)
  315. break;
  316. /* De-assert on last transfer, inverted by cs_change flag */
  317. flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
  318. TXRX_DEASSERT_CS : 0;
  319. /*
  320. * Small blocks can be transfered via PIO.
  321. * Measured by empiric means:
  322. *
  323. * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
  324. *
  325. * DMA only: 2.164808 seconds, 473.0KB/s
  326. * Combined: 1.676276 seconds, 610.9KB/s
  327. */
  328. if (t->len < 32) {
  329. writel(BM_SSP_CTRL1_DMA_ENABLE,
  330. ssp->base + HW_SSP_CTRL1(ssp) +
  331. STMP_OFFSET_REG_CLR);
  332. if (t->tx_buf)
  333. status = mxs_spi_txrx_pio(spi,
  334. (void *)t->tx_buf,
  335. t->len, flag | TXRX_WRITE);
  336. if (t->rx_buf)
  337. status = mxs_spi_txrx_pio(spi,
  338. t->rx_buf, t->len,
  339. flag);
  340. } else {
  341. writel(BM_SSP_CTRL1_DMA_ENABLE,
  342. ssp->base + HW_SSP_CTRL1(ssp) +
  343. STMP_OFFSET_REG_SET);
  344. if (t->tx_buf)
  345. status = mxs_spi_txrx_dma(spi,
  346. (void *)t->tx_buf, t->len,
  347. flag | TXRX_WRITE);
  348. if (t->rx_buf)
  349. status = mxs_spi_txrx_dma(spi,
  350. t->rx_buf, t->len,
  351. flag);
  352. }
  353. if (status) {
  354. stmp_reset_block(ssp->base);
  355. break;
  356. }
  357. m->actual_length += t->len;
  358. }
  359. m->status = status;
  360. spi_finalize_current_message(master);
  361. return status;
  362. }
  363. static const struct of_device_id mxs_spi_dt_ids[] = {
  364. { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
  365. { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
  366. { /* sentinel */ }
  367. };
  368. MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
  369. static int mxs_spi_probe(struct platform_device *pdev)
  370. {
  371. const struct of_device_id *of_id =
  372. of_match_device(mxs_spi_dt_ids, &pdev->dev);
  373. struct device_node *np = pdev->dev.of_node;
  374. struct spi_master *master;
  375. struct mxs_spi *spi;
  376. struct mxs_ssp *ssp;
  377. struct resource *iores;
  378. struct clk *clk;
  379. void __iomem *base;
  380. int devid, clk_freq;
  381. int ret = 0, irq_err;
  382. /*
  383. * Default clock speed for the SPI core. 160MHz seems to
  384. * work reasonably well with most SPI flashes, so use this
  385. * as a default. Override with "clock-frequency" DT prop.
  386. */
  387. const int clk_freq_default = 160000000;
  388. iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  389. irq_err = platform_get_irq(pdev, 0);
  390. if (irq_err < 0)
  391. return -EINVAL;
  392. base = devm_ioremap_resource(&pdev->dev, iores);
  393. if (IS_ERR(base))
  394. return PTR_ERR(base);
  395. clk = devm_clk_get(&pdev->dev, NULL);
  396. if (IS_ERR(clk))
  397. return PTR_ERR(clk);
  398. devid = (enum mxs_ssp_id) of_id->data;
  399. ret = of_property_read_u32(np, "clock-frequency",
  400. &clk_freq);
  401. if (ret)
  402. clk_freq = clk_freq_default;
  403. master = spi_alloc_master(&pdev->dev, sizeof(*spi));
  404. if (!master)
  405. return -ENOMEM;
  406. master->transfer_one_message = mxs_spi_transfer_one;
  407. master->setup = mxs_spi_setup;
  408. master->bits_per_word_mask = SPI_BPW_MASK(8);
  409. master->mode_bits = SPI_CPOL | SPI_CPHA;
  410. master->num_chipselect = 3;
  411. master->dev.of_node = np;
  412. master->flags = SPI_MASTER_HALF_DUPLEX;
  413. spi = spi_master_get_devdata(master);
  414. ssp = &spi->ssp;
  415. ssp->dev = &pdev->dev;
  416. ssp->clk = clk;
  417. ssp->base = base;
  418. ssp->devid = devid;
  419. init_completion(&spi->c);
  420. ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
  421. DRIVER_NAME, ssp);
  422. if (ret)
  423. goto out_master_free;
  424. ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
  425. if (!ssp->dmach) {
  426. dev_err(ssp->dev, "Failed to request DMA\n");
  427. ret = -ENODEV;
  428. goto out_master_free;
  429. }
  430. ret = clk_prepare_enable(ssp->clk);
  431. if (ret)
  432. goto out_dma_release;
  433. clk_set_rate(ssp->clk, clk_freq);
  434. ret = stmp_reset_block(ssp->base);
  435. if (ret)
  436. goto out_disable_clk;
  437. platform_set_drvdata(pdev, master);
  438. ret = spi_register_master(master);
  439. if (ret) {
  440. dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
  441. goto out_disable_clk;
  442. }
  443. return 0;
  444. out_disable_clk:
  445. clk_disable_unprepare(ssp->clk);
  446. out_dma_release:
  447. dma_release_channel(ssp->dmach);
  448. out_master_free:
  449. spi_master_put(master);
  450. return ret;
  451. }
  452. static int mxs_spi_remove(struct platform_device *pdev)
  453. {
  454. struct spi_master *master;
  455. struct mxs_spi *spi;
  456. struct mxs_ssp *ssp;
  457. master = spi_master_get(platform_get_drvdata(pdev));
  458. spi = spi_master_get_devdata(master);
  459. ssp = &spi->ssp;
  460. spi_unregister_master(master);
  461. clk_disable_unprepare(ssp->clk);
  462. dma_release_channel(ssp->dmach);
  463. spi_master_put(master);
  464. return 0;
  465. }
  466. static struct platform_driver mxs_spi_driver = {
  467. .probe = mxs_spi_probe,
  468. .remove = mxs_spi_remove,
  469. .driver = {
  470. .name = DRIVER_NAME,
  471. .owner = THIS_MODULE,
  472. .of_match_table = mxs_spi_dt_ids,
  473. },
  474. };
  475. module_platform_driver(mxs_spi_driver);
  476. MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
  477. MODULE_DESCRIPTION("MXS SPI master driver");
  478. MODULE_LICENSE("GPL");
  479. MODULE_ALIAS("platform:mxs-spi");