spi-ep93xx.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/sched.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/spi/spi.h>
  31. #include <linux/platform_data/dma-ep93xx.h>
  32. #include <linux/platform_data/spi-ep93xx.h>
  33. #define SSPCR0 0x0000
  34. #define SSPCR0_MODE_SHIFT 6
  35. #define SSPCR0_SCR_SHIFT 8
  36. #define SSPCR1 0x0004
  37. #define SSPCR1_RIE BIT(0)
  38. #define SSPCR1_TIE BIT(1)
  39. #define SSPCR1_RORIE BIT(2)
  40. #define SSPCR1_LBM BIT(3)
  41. #define SSPCR1_SSE BIT(4)
  42. #define SSPCR1_MS BIT(5)
  43. #define SSPCR1_SOD BIT(6)
  44. #define SSPDR 0x0008
  45. #define SSPSR 0x000c
  46. #define SSPSR_TFE BIT(0)
  47. #define SSPSR_TNF BIT(1)
  48. #define SSPSR_RNE BIT(2)
  49. #define SSPSR_RFF BIT(3)
  50. #define SSPSR_BSY BIT(4)
  51. #define SSPCPSR 0x0010
  52. #define SSPIIR 0x0014
  53. #define SSPIIR_RIS BIT(0)
  54. #define SSPIIR_TIS BIT(1)
  55. #define SSPIIR_RORIS BIT(2)
  56. #define SSPICR SSPIIR
  57. /* timeout in milliseconds */
  58. #define SPI_TIMEOUT 5
  59. /* maximum depth of RX/TX FIFO */
  60. #define SPI_FIFO_SIZE 8
  61. /**
  62. * struct ep93xx_spi - EP93xx SPI controller structure
  63. * @pdev: pointer to platform device
  64. * @clk: clock for the controller
  65. * @regs_base: pointer to ioremap()'d registers
  66. * @sspdr_phys: physical address of the SSPDR register
  67. * @min_rate: minimum clock rate (in Hz) supported by the controller
  68. * @max_rate: maximum clock rate (in Hz) supported by the controller
  69. * @wait: wait here until given transfer is completed
  70. * @current_msg: message that is currently processed (or %NULL if none)
  71. * @tx: current byte in transfer to transmit
  72. * @rx: current byte in transfer to receive
  73. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  74. * frame decreases this level and sending one frame increases it.
  75. * @dma_rx: RX DMA channel
  76. * @dma_tx: TX DMA channel
  77. * @dma_rx_data: RX parameters passed to the DMA engine
  78. * @dma_tx_data: TX parameters passed to the DMA engine
  79. * @rx_sgt: sg table for RX transfers
  80. * @tx_sgt: sg table for TX transfers
  81. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  82. * the client
  83. */
  84. struct ep93xx_spi {
  85. const struct platform_device *pdev;
  86. struct clk *clk;
  87. void __iomem *regs_base;
  88. unsigned long sspdr_phys;
  89. unsigned long min_rate;
  90. unsigned long max_rate;
  91. struct completion wait;
  92. struct spi_message *current_msg;
  93. size_t tx;
  94. size_t rx;
  95. size_t fifo_level;
  96. struct dma_chan *dma_rx;
  97. struct dma_chan *dma_tx;
  98. struct ep93xx_dma_data dma_rx_data;
  99. struct ep93xx_dma_data dma_tx_data;
  100. struct sg_table rx_sgt;
  101. struct sg_table tx_sgt;
  102. void *zeropage;
  103. };
  104. /**
  105. * struct ep93xx_spi_chip - SPI device hardware settings
  106. * @spi: back pointer to the SPI device
  107. * @ops: private chip operations
  108. */
  109. struct ep93xx_spi_chip {
  110. const struct spi_device *spi;
  111. struct ep93xx_spi_chip_ops *ops;
  112. };
  113. /* converts bits per word to CR0.DSS value */
  114. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  115. static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
  116. u16 reg, u8 value)
  117. {
  118. writeb(value, espi->regs_base + reg);
  119. }
  120. static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  121. {
  122. return readb(spi->regs_base + reg);
  123. }
  124. static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
  125. u16 reg, u16 value)
  126. {
  127. writew(value, espi->regs_base + reg);
  128. }
  129. static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  130. {
  131. return readw(spi->regs_base + reg);
  132. }
  133. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  134. {
  135. u8 regval;
  136. int err;
  137. err = clk_enable(espi->clk);
  138. if (err)
  139. return err;
  140. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  141. regval |= SSPCR1_SSE;
  142. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  143. return 0;
  144. }
  145. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  146. {
  147. u8 regval;
  148. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  149. regval &= ~SSPCR1_SSE;
  150. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  151. clk_disable(espi->clk);
  152. }
  153. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  154. {
  155. u8 regval;
  156. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  157. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  158. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  159. }
  160. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  161. {
  162. u8 regval;
  163. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  164. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  165. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  166. }
  167. /**
  168. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  169. * @espi: ep93xx SPI controller struct
  170. * @rate: desired SPI output clock rate
  171. * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
  172. * @div_scr: pointer to return the scr divider
  173. */
  174. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  175. unsigned long rate,
  176. u8 *div_cpsr, u8 *div_scr)
  177. {
  178. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  179. int cpsr, scr;
  180. /*
  181. * Make sure that max value is between values supported by the
  182. * controller. Note that minimum value is already checked in
  183. * ep93xx_spi_transfer_one_message().
  184. */
  185. rate = clamp(rate, espi->min_rate, espi->max_rate);
  186. /*
  187. * Calculate divisors so that we can get speed according the
  188. * following formula:
  189. * rate = spi_clock_rate / (cpsr * (1 + scr))
  190. *
  191. * cpsr must be even number and starts from 2, scr can be any number
  192. * between 0 and 255.
  193. */
  194. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  195. for (scr = 0; scr <= 255; scr++) {
  196. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  197. *div_scr = (u8)scr;
  198. *div_cpsr = (u8)cpsr;
  199. return 0;
  200. }
  201. }
  202. }
  203. return -EINVAL;
  204. }
  205. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  206. {
  207. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  208. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  209. if (chip->ops && chip->ops->cs_control)
  210. chip->ops->cs_control(spi, value);
  211. }
  212. /**
  213. * ep93xx_spi_setup() - setup an SPI device
  214. * @spi: SPI device to setup
  215. *
  216. * This function sets up SPI device mode, speed etc. Can be called multiple
  217. * times for a single device. Returns %0 in case of success, negative error in
  218. * case of failure. When this function returns success, the device is
  219. * deselected.
  220. */
  221. static int ep93xx_spi_setup(struct spi_device *spi)
  222. {
  223. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  224. struct ep93xx_spi_chip *chip;
  225. chip = spi_get_ctldata(spi);
  226. if (!chip) {
  227. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  228. spi->modalias);
  229. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  230. if (!chip)
  231. return -ENOMEM;
  232. chip->spi = spi;
  233. chip->ops = spi->controller_data;
  234. if (chip->ops && chip->ops->setup) {
  235. int ret = chip->ops->setup(spi);
  236. if (ret) {
  237. kfree(chip);
  238. return ret;
  239. }
  240. }
  241. spi_set_ctldata(spi, chip);
  242. }
  243. ep93xx_spi_cs_control(spi, false);
  244. return 0;
  245. }
  246. /**
  247. * ep93xx_spi_cleanup() - cleans up master controller specific state
  248. * @spi: SPI device to cleanup
  249. *
  250. * This function releases master controller specific state for given @spi
  251. * device.
  252. */
  253. static void ep93xx_spi_cleanup(struct spi_device *spi)
  254. {
  255. struct ep93xx_spi_chip *chip;
  256. chip = spi_get_ctldata(spi);
  257. if (chip) {
  258. if (chip->ops && chip->ops->cleanup)
  259. chip->ops->cleanup(spi);
  260. spi_set_ctldata(spi, NULL);
  261. kfree(chip);
  262. }
  263. }
  264. /**
  265. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  266. * @espi: ep93xx SPI controller struct
  267. * @chip: chip specific settings
  268. * @speed_hz: transfer speed
  269. * @bits_per_word: transfer bits_per_word
  270. */
  271. static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  272. const struct ep93xx_spi_chip *chip,
  273. u32 speed_hz, u8 bits_per_word)
  274. {
  275. u8 dss = bits_per_word_to_dss(bits_per_word);
  276. u8 div_cpsr = 0;
  277. u8 div_scr = 0;
  278. u16 cr0;
  279. int err;
  280. err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
  281. if (err)
  282. return err;
  283. cr0 = div_scr << SSPCR0_SCR_SHIFT;
  284. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  285. cr0 |= dss;
  286. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  287. chip->spi->mode, div_cpsr, div_scr, dss);
  288. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  289. ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
  290. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  291. return 0;
  292. }
  293. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  294. {
  295. if (t->bits_per_word > 8) {
  296. u16 tx_val = 0;
  297. if (t->tx_buf)
  298. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  299. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  300. espi->tx += sizeof(tx_val);
  301. } else {
  302. u8 tx_val = 0;
  303. if (t->tx_buf)
  304. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  305. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  306. espi->tx += sizeof(tx_val);
  307. }
  308. }
  309. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  310. {
  311. if (t->bits_per_word > 8) {
  312. u16 rx_val;
  313. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  314. if (t->rx_buf)
  315. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  316. espi->rx += sizeof(rx_val);
  317. } else {
  318. u8 rx_val;
  319. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  320. if (t->rx_buf)
  321. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  322. espi->rx += sizeof(rx_val);
  323. }
  324. }
  325. /**
  326. * ep93xx_spi_read_write() - perform next RX/TX transfer
  327. * @espi: ep93xx SPI controller struct
  328. *
  329. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  330. * called several times, the whole transfer will be completed. Returns
  331. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  332. *
  333. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  334. * full.
  335. */
  336. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  337. {
  338. struct spi_message *msg = espi->current_msg;
  339. struct spi_transfer *t = msg->state;
  340. /* read as long as RX FIFO has frames in it */
  341. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  342. ep93xx_do_read(espi, t);
  343. espi->fifo_level--;
  344. }
  345. /* write as long as TX FIFO has room */
  346. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  347. ep93xx_do_write(espi, t);
  348. espi->fifo_level++;
  349. }
  350. if (espi->rx == t->len)
  351. return 0;
  352. return -EINPROGRESS;
  353. }
  354. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  355. {
  356. /*
  357. * Now everything is set up for the current transfer. We prime the TX
  358. * FIFO, enable interrupts, and wait for the transfer to complete.
  359. */
  360. if (ep93xx_spi_read_write(espi)) {
  361. ep93xx_spi_enable_interrupts(espi);
  362. wait_for_completion(&espi->wait);
  363. }
  364. }
  365. /**
  366. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  367. * @espi: ep93xx SPI controller struct
  368. * @dir: DMA transfer direction
  369. *
  370. * Function configures the DMA, maps the buffer and prepares the DMA
  371. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  372. * in case of failure.
  373. */
  374. static struct dma_async_tx_descriptor *
  375. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  376. {
  377. struct spi_transfer *t = espi->current_msg->state;
  378. struct dma_async_tx_descriptor *txd;
  379. enum dma_slave_buswidth buswidth;
  380. struct dma_slave_config conf;
  381. struct scatterlist *sg;
  382. struct sg_table *sgt;
  383. struct dma_chan *chan;
  384. const void *buf, *pbuf;
  385. size_t len = t->len;
  386. int i, ret, nents;
  387. if (t->bits_per_word > 8)
  388. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  389. else
  390. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  391. memset(&conf, 0, sizeof(conf));
  392. conf.direction = dir;
  393. if (dir == DMA_DEV_TO_MEM) {
  394. chan = espi->dma_rx;
  395. buf = t->rx_buf;
  396. sgt = &espi->rx_sgt;
  397. conf.src_addr = espi->sspdr_phys;
  398. conf.src_addr_width = buswidth;
  399. } else {
  400. chan = espi->dma_tx;
  401. buf = t->tx_buf;
  402. sgt = &espi->tx_sgt;
  403. conf.dst_addr = espi->sspdr_phys;
  404. conf.dst_addr_width = buswidth;
  405. }
  406. ret = dmaengine_slave_config(chan, &conf);
  407. if (ret)
  408. return ERR_PTR(ret);
  409. /*
  410. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  411. * because we are using @espi->zeropage to provide a zero RX buffer
  412. * for the TX transfers and we have only allocated one page for that.
  413. *
  414. * For performance reasons we allocate a new sg_table only when
  415. * needed. Otherwise we will re-use the current one. Eventually the
  416. * last sg_table is released in ep93xx_spi_release_dma().
  417. */
  418. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  419. if (nents != sgt->nents) {
  420. sg_free_table(sgt);
  421. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  422. if (ret)
  423. return ERR_PTR(ret);
  424. }
  425. pbuf = buf;
  426. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  427. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  428. if (buf) {
  429. sg_set_page(sg, virt_to_page(pbuf), bytes,
  430. offset_in_page(pbuf));
  431. } else {
  432. sg_set_page(sg, virt_to_page(espi->zeropage),
  433. bytes, 0);
  434. }
  435. pbuf += bytes;
  436. len -= bytes;
  437. }
  438. if (WARN_ON(len)) {
  439. dev_warn(&espi->pdev->dev, "len = %zu expected 0!", len);
  440. return ERR_PTR(-EINVAL);
  441. }
  442. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  443. if (!nents)
  444. return ERR_PTR(-ENOMEM);
  445. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  446. if (!txd) {
  447. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  448. return ERR_PTR(-ENOMEM);
  449. }
  450. return txd;
  451. }
  452. /**
  453. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  454. * @espi: ep93xx SPI controller struct
  455. * @dir: DMA transfer direction
  456. *
  457. * Function finishes with the DMA transfer. After this, the DMA buffer is
  458. * unmapped.
  459. */
  460. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  461. enum dma_transfer_direction dir)
  462. {
  463. struct dma_chan *chan;
  464. struct sg_table *sgt;
  465. if (dir == DMA_DEV_TO_MEM) {
  466. chan = espi->dma_rx;
  467. sgt = &espi->rx_sgt;
  468. } else {
  469. chan = espi->dma_tx;
  470. sgt = &espi->tx_sgt;
  471. }
  472. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  473. }
  474. static void ep93xx_spi_dma_callback(void *callback_param)
  475. {
  476. complete(callback_param);
  477. }
  478. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  479. {
  480. struct spi_message *msg = espi->current_msg;
  481. struct dma_async_tx_descriptor *rxd, *txd;
  482. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  483. if (IS_ERR(rxd)) {
  484. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  485. msg->status = PTR_ERR(rxd);
  486. return;
  487. }
  488. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  489. if (IS_ERR(txd)) {
  490. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  491. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  492. msg->status = PTR_ERR(txd);
  493. return;
  494. }
  495. /* We are ready when RX is done */
  496. rxd->callback = ep93xx_spi_dma_callback;
  497. rxd->callback_param = &espi->wait;
  498. /* Now submit both descriptors and wait while they finish */
  499. dmaengine_submit(rxd);
  500. dmaengine_submit(txd);
  501. dma_async_issue_pending(espi->dma_rx);
  502. dma_async_issue_pending(espi->dma_tx);
  503. wait_for_completion(&espi->wait);
  504. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  505. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  506. }
  507. /**
  508. * ep93xx_spi_process_transfer() - processes one SPI transfer
  509. * @espi: ep93xx SPI controller struct
  510. * @msg: current message
  511. * @t: transfer to process
  512. *
  513. * This function processes one SPI transfer given in @t. Function waits until
  514. * transfer is complete (may sleep) and updates @msg->status based on whether
  515. * transfer was successfully processed or not.
  516. */
  517. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  518. struct spi_message *msg,
  519. struct spi_transfer *t)
  520. {
  521. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  522. int err;
  523. msg->state = t;
  524. err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
  525. if (err) {
  526. dev_err(&espi->pdev->dev,
  527. "failed to setup chip for transfer\n");
  528. msg->status = err;
  529. return;
  530. }
  531. espi->rx = 0;
  532. espi->tx = 0;
  533. /*
  534. * There is no point of setting up DMA for the transfers which will
  535. * fit into the FIFO and can be transferred with a single interrupt.
  536. * So in these cases we will be using PIO and don't bother for DMA.
  537. */
  538. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  539. ep93xx_spi_dma_transfer(espi);
  540. else
  541. ep93xx_spi_pio_transfer(espi);
  542. /*
  543. * In case of error during transmit, we bail out from processing
  544. * the message.
  545. */
  546. if (msg->status)
  547. return;
  548. msg->actual_length += t->len;
  549. /*
  550. * After this transfer is finished, perform any possible
  551. * post-transfer actions requested by the protocol driver.
  552. */
  553. if (t->delay_usecs) {
  554. set_current_state(TASK_UNINTERRUPTIBLE);
  555. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  556. }
  557. if (t->cs_change) {
  558. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  559. /*
  560. * In case protocol driver is asking us to drop the
  561. * chipselect briefly, we let the scheduler to handle
  562. * any "delay" here.
  563. */
  564. ep93xx_spi_cs_control(msg->spi, false);
  565. cond_resched();
  566. ep93xx_spi_cs_control(msg->spi, true);
  567. }
  568. }
  569. }
  570. /*
  571. * ep93xx_spi_process_message() - process one SPI message
  572. * @espi: ep93xx SPI controller struct
  573. * @msg: message to process
  574. *
  575. * This function processes a single SPI message. We go through all transfers in
  576. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  577. * asserted during the whole message (unless per transfer cs_change is set).
  578. *
  579. * @msg->status contains %0 in case of success or negative error code in case of
  580. * failure.
  581. */
  582. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  583. struct spi_message *msg)
  584. {
  585. unsigned long timeout;
  586. struct spi_transfer *t;
  587. int err;
  588. /*
  589. * Enable the SPI controller and its clock.
  590. */
  591. err = ep93xx_spi_enable(espi);
  592. if (err) {
  593. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  594. msg->status = err;
  595. return;
  596. }
  597. /*
  598. * Just to be sure: flush any data from RX FIFO.
  599. */
  600. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  601. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  602. if (time_after(jiffies, timeout)) {
  603. dev_warn(&espi->pdev->dev,
  604. "timeout while flushing RX FIFO\n");
  605. msg->status = -ETIMEDOUT;
  606. return;
  607. }
  608. ep93xx_spi_read_u16(espi, SSPDR);
  609. }
  610. /*
  611. * We explicitly handle FIFO level. This way we don't have to check TX
  612. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  613. */
  614. espi->fifo_level = 0;
  615. /*
  616. * Assert the chipselect.
  617. */
  618. ep93xx_spi_cs_control(msg->spi, true);
  619. list_for_each_entry(t, &msg->transfers, transfer_list) {
  620. ep93xx_spi_process_transfer(espi, msg, t);
  621. if (msg->status)
  622. break;
  623. }
  624. /*
  625. * Now the whole message is transferred (or failed for some reason). We
  626. * deselect the device and disable the SPI controller.
  627. */
  628. ep93xx_spi_cs_control(msg->spi, false);
  629. ep93xx_spi_disable(espi);
  630. }
  631. static int ep93xx_spi_transfer_one_message(struct spi_master *master,
  632. struct spi_message *msg)
  633. {
  634. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  635. struct spi_transfer *t;
  636. /* first validate each transfer */
  637. list_for_each_entry(t, &msg->transfers, transfer_list) {
  638. if (t->speed_hz < espi->min_rate)
  639. return -EINVAL;
  640. }
  641. msg->state = NULL;
  642. msg->status = 0;
  643. msg->actual_length = 0;
  644. espi->current_msg = msg;
  645. ep93xx_spi_process_message(espi, msg);
  646. espi->current_msg = NULL;
  647. spi_finalize_current_message(master);
  648. return 0;
  649. }
  650. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  651. {
  652. struct ep93xx_spi *espi = dev_id;
  653. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  654. /*
  655. * If we got ROR (receive overrun) interrupt we know that something is
  656. * wrong. Just abort the message.
  657. */
  658. if (unlikely(irq_status & SSPIIR_RORIS)) {
  659. /* clear the overrun interrupt */
  660. ep93xx_spi_write_u8(espi, SSPICR, 0);
  661. dev_warn(&espi->pdev->dev,
  662. "receive overrun, aborting the message\n");
  663. espi->current_msg->status = -EIO;
  664. } else {
  665. /*
  666. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  667. * simply execute next data transfer.
  668. */
  669. if (ep93xx_spi_read_write(espi)) {
  670. /*
  671. * In normal case, there still is some processing left
  672. * for current transfer. Let's wait for the next
  673. * interrupt then.
  674. */
  675. return IRQ_HANDLED;
  676. }
  677. }
  678. /*
  679. * Current transfer is finished, either with error or with success. In
  680. * any case we disable interrupts and notify the worker to handle
  681. * any post-processing of the message.
  682. */
  683. ep93xx_spi_disable_interrupts(espi);
  684. complete(&espi->wait);
  685. return IRQ_HANDLED;
  686. }
  687. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  688. {
  689. if (ep93xx_dma_chan_is_m2p(chan))
  690. return false;
  691. chan->private = filter_param;
  692. return true;
  693. }
  694. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  695. {
  696. dma_cap_mask_t mask;
  697. int ret;
  698. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  699. if (!espi->zeropage)
  700. return -ENOMEM;
  701. dma_cap_zero(mask);
  702. dma_cap_set(DMA_SLAVE, mask);
  703. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  704. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  705. espi->dma_rx_data.name = "ep93xx-spi-rx";
  706. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  707. &espi->dma_rx_data);
  708. if (!espi->dma_rx) {
  709. ret = -ENODEV;
  710. goto fail_free_page;
  711. }
  712. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  713. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  714. espi->dma_tx_data.name = "ep93xx-spi-tx";
  715. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  716. &espi->dma_tx_data);
  717. if (!espi->dma_tx) {
  718. ret = -ENODEV;
  719. goto fail_release_rx;
  720. }
  721. return 0;
  722. fail_release_rx:
  723. dma_release_channel(espi->dma_rx);
  724. espi->dma_rx = NULL;
  725. fail_free_page:
  726. free_page((unsigned long)espi->zeropage);
  727. return ret;
  728. }
  729. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  730. {
  731. if (espi->dma_rx) {
  732. dma_release_channel(espi->dma_rx);
  733. sg_free_table(&espi->rx_sgt);
  734. }
  735. if (espi->dma_tx) {
  736. dma_release_channel(espi->dma_tx);
  737. sg_free_table(&espi->tx_sgt);
  738. }
  739. if (espi->zeropage)
  740. free_page((unsigned long)espi->zeropage);
  741. }
  742. static int ep93xx_spi_probe(struct platform_device *pdev)
  743. {
  744. struct spi_master *master;
  745. struct ep93xx_spi_info *info;
  746. struct ep93xx_spi *espi;
  747. struct resource *res;
  748. int irq;
  749. int error;
  750. info = dev_get_platdata(&pdev->dev);
  751. irq = platform_get_irq(pdev, 0);
  752. if (irq < 0) {
  753. dev_err(&pdev->dev, "failed to get irq resources\n");
  754. return -EBUSY;
  755. }
  756. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  757. if (!res) {
  758. dev_err(&pdev->dev, "unable to get iomem resource\n");
  759. return -ENODEV;
  760. }
  761. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  762. if (!master)
  763. return -ENOMEM;
  764. master->setup = ep93xx_spi_setup;
  765. master->transfer_one_message = ep93xx_spi_transfer_one_message;
  766. master->cleanup = ep93xx_spi_cleanup;
  767. master->bus_num = pdev->id;
  768. master->num_chipselect = info->num_chipselect;
  769. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  770. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  771. platform_set_drvdata(pdev, master);
  772. espi = spi_master_get_devdata(master);
  773. espi->clk = devm_clk_get(&pdev->dev, NULL);
  774. if (IS_ERR(espi->clk)) {
  775. dev_err(&pdev->dev, "unable to get spi clock\n");
  776. error = PTR_ERR(espi->clk);
  777. goto fail_release_master;
  778. }
  779. init_completion(&espi->wait);
  780. /*
  781. * Calculate maximum and minimum supported clock rates
  782. * for the controller.
  783. */
  784. espi->max_rate = clk_get_rate(espi->clk) / 2;
  785. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  786. espi->pdev = pdev;
  787. espi->sspdr_phys = res->start + SSPDR;
  788. espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
  789. if (IS_ERR(espi->regs_base)) {
  790. error = PTR_ERR(espi->regs_base);
  791. goto fail_release_master;
  792. }
  793. error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
  794. 0, "ep93xx-spi", espi);
  795. if (error) {
  796. dev_err(&pdev->dev, "failed to request irq\n");
  797. goto fail_release_master;
  798. }
  799. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  800. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  801. /* make sure that the hardware is disabled */
  802. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  803. error = spi_register_master(master);
  804. if (error) {
  805. dev_err(&pdev->dev, "failed to register SPI master\n");
  806. goto fail_free_dma;
  807. }
  808. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  809. (unsigned long)res->start, irq);
  810. return 0;
  811. fail_free_dma:
  812. ep93xx_spi_release_dma(espi);
  813. fail_release_master:
  814. spi_master_put(master);
  815. return error;
  816. }
  817. static int ep93xx_spi_remove(struct platform_device *pdev)
  818. {
  819. struct spi_master *master = platform_get_drvdata(pdev);
  820. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  821. ep93xx_spi_release_dma(espi);
  822. spi_unregister_master(master);
  823. return 0;
  824. }
  825. static struct platform_driver ep93xx_spi_driver = {
  826. .driver = {
  827. .name = "ep93xx-spi",
  828. .owner = THIS_MODULE,
  829. },
  830. .probe = ep93xx_spi_probe,
  831. .remove = ep93xx_spi_remove,
  832. };
  833. module_platform_driver(ep93xx_spi_driver);
  834. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  835. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  836. MODULE_LICENSE("GPL");
  837. MODULE_ALIAS("platform:ep93xx-spi");