spi-ep93xx.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/sched.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/spi/spi.h>
  32. #include <linux/platform_data/dma-ep93xx.h>
  33. #include <linux/platform_data/spi-ep93xx.h>
  34. #define SSPCR0 0x0000
  35. #define SSPCR0_MODE_SHIFT 6
  36. #define SSPCR0_SCR_SHIFT 8
  37. #define SSPCR1 0x0004
  38. #define SSPCR1_RIE BIT(0)
  39. #define SSPCR1_TIE BIT(1)
  40. #define SSPCR1_RORIE BIT(2)
  41. #define SSPCR1_LBM BIT(3)
  42. #define SSPCR1_SSE BIT(4)
  43. #define SSPCR1_MS BIT(5)
  44. #define SSPCR1_SOD BIT(6)
  45. #define SSPDR 0x0008
  46. #define SSPSR 0x000c
  47. #define SSPSR_TFE BIT(0)
  48. #define SSPSR_TNF BIT(1)
  49. #define SSPSR_RNE BIT(2)
  50. #define SSPSR_RFF BIT(3)
  51. #define SSPSR_BSY BIT(4)
  52. #define SSPCPSR 0x0010
  53. #define SSPIIR 0x0014
  54. #define SSPIIR_RIS BIT(0)
  55. #define SSPIIR_TIS BIT(1)
  56. #define SSPIIR_RORIS BIT(2)
  57. #define SSPICR SSPIIR
  58. /* timeout in milliseconds */
  59. #define SPI_TIMEOUT 5
  60. /* maximum depth of RX/TX FIFO */
  61. #define SPI_FIFO_SIZE 8
  62. /**
  63. * struct ep93xx_spi - EP93xx SPI controller structure
  64. * @lock: spinlock that protects concurrent accesses to fields @running,
  65. * @current_msg and @msg_queue
  66. * @pdev: pointer to platform device
  67. * @clk: clock for the controller
  68. * @regs_base: pointer to ioremap()'d registers
  69. * @sspdr_phys: physical address of the SSPDR register
  70. * @min_rate: minimum clock rate (in Hz) supported by the controller
  71. * @max_rate: maximum clock rate (in Hz) supported by the controller
  72. * @running: is the queue running
  73. * @wq: workqueue used by the driver
  74. * @msg_work: work that is queued for the driver
  75. * @wait: wait here until given transfer is completed
  76. * @msg_queue: queue for the messages
  77. * @current_msg: message that is currently processed (or %NULL if none)
  78. * @tx: current byte in transfer to transmit
  79. * @rx: current byte in transfer to receive
  80. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  81. * frame decreases this level and sending one frame increases it.
  82. * @dma_rx: RX DMA channel
  83. * @dma_tx: TX DMA channel
  84. * @dma_rx_data: RX parameters passed to the DMA engine
  85. * @dma_tx_data: TX parameters passed to the DMA engine
  86. * @rx_sgt: sg table for RX transfers
  87. * @tx_sgt: sg table for TX transfers
  88. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  89. * the client
  90. *
  91. * This structure holds EP93xx SPI controller specific information. When
  92. * @running is %true, driver accepts transfer requests from protocol drivers.
  93. * @current_msg is used to hold pointer to the message that is currently
  94. * processed. If @current_msg is %NULL, it means that no processing is going
  95. * on.
  96. *
  97. * Most of the fields are only written once and they can be accessed without
  98. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  99. * @running, and @msg_queue.
  100. */
  101. struct ep93xx_spi {
  102. spinlock_t lock;
  103. const struct platform_device *pdev;
  104. struct clk *clk;
  105. void __iomem *regs_base;
  106. unsigned long sspdr_phys;
  107. unsigned long min_rate;
  108. unsigned long max_rate;
  109. bool running;
  110. struct workqueue_struct *wq;
  111. struct work_struct msg_work;
  112. struct completion wait;
  113. struct list_head msg_queue;
  114. struct spi_message *current_msg;
  115. size_t tx;
  116. size_t rx;
  117. size_t fifo_level;
  118. struct dma_chan *dma_rx;
  119. struct dma_chan *dma_tx;
  120. struct ep93xx_dma_data dma_rx_data;
  121. struct ep93xx_dma_data dma_tx_data;
  122. struct sg_table rx_sgt;
  123. struct sg_table tx_sgt;
  124. void *zeropage;
  125. };
  126. /**
  127. * struct ep93xx_spi_chip - SPI device hardware settings
  128. * @spi: back pointer to the SPI device
  129. * @rate: max rate in hz this chip supports
  130. * @div_cpsr: cpsr (pre-scaler) divider
  131. * @div_scr: scr divider
  132. * @dss: bits per word (4 - 16 bits)
  133. * @ops: private chip operations
  134. *
  135. * This structure is used to store hardware register specific settings for each
  136. * SPI device. Settings are written to hardware by function
  137. * ep93xx_spi_chip_setup().
  138. */
  139. struct ep93xx_spi_chip {
  140. const struct spi_device *spi;
  141. unsigned long rate;
  142. u8 div_cpsr;
  143. u8 div_scr;
  144. u8 dss;
  145. struct ep93xx_spi_chip_ops *ops;
  146. };
  147. /* converts bits per word to CR0.DSS value */
  148. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  149. static inline void
  150. ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
  151. {
  152. __raw_writeb(value, espi->regs_base + reg);
  153. }
  154. static inline u8
  155. ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  156. {
  157. return __raw_readb(spi->regs_base + reg);
  158. }
  159. static inline void
  160. ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
  161. {
  162. __raw_writew(value, espi->regs_base + reg);
  163. }
  164. static inline u16
  165. ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  166. {
  167. return __raw_readw(spi->regs_base + reg);
  168. }
  169. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  170. {
  171. u8 regval;
  172. int err;
  173. err = clk_enable(espi->clk);
  174. if (err)
  175. return err;
  176. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  177. regval |= SSPCR1_SSE;
  178. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  179. return 0;
  180. }
  181. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  182. {
  183. u8 regval;
  184. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  185. regval &= ~SSPCR1_SSE;
  186. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  187. clk_disable(espi->clk);
  188. }
  189. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  190. {
  191. u8 regval;
  192. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  193. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  194. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  195. }
  196. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  197. {
  198. u8 regval;
  199. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  200. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  201. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  202. }
  203. /**
  204. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  205. * @espi: ep93xx SPI controller struct
  206. * @chip: divisors are calculated for this chip
  207. * @rate: desired SPI output clock rate
  208. *
  209. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  210. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  211. * for some reason, divisors cannot be calculated nothing is stored and
  212. * %-EINVAL is returned.
  213. */
  214. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  215. struct ep93xx_spi_chip *chip,
  216. unsigned long rate)
  217. {
  218. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  219. int cpsr, scr;
  220. /*
  221. * Make sure that max value is between values supported by the
  222. * controller. Note that minimum value is already checked in
  223. * ep93xx_spi_transfer().
  224. */
  225. rate = clamp(rate, espi->min_rate, espi->max_rate);
  226. /*
  227. * Calculate divisors so that we can get speed according the
  228. * following formula:
  229. * rate = spi_clock_rate / (cpsr * (1 + scr))
  230. *
  231. * cpsr must be even number and starts from 2, scr can be any number
  232. * between 0 and 255.
  233. */
  234. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  235. for (scr = 0; scr <= 255; scr++) {
  236. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  237. chip->div_scr = (u8)scr;
  238. chip->div_cpsr = (u8)cpsr;
  239. return 0;
  240. }
  241. }
  242. }
  243. return -EINVAL;
  244. }
  245. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  246. {
  247. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  248. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  249. if (chip->ops && chip->ops->cs_control)
  250. chip->ops->cs_control(spi, value);
  251. }
  252. /**
  253. * ep93xx_spi_setup() - setup an SPI device
  254. * @spi: SPI device to setup
  255. *
  256. * This function sets up SPI device mode, speed etc. Can be called multiple
  257. * times for a single device. Returns %0 in case of success, negative error in
  258. * case of failure. When this function returns success, the device is
  259. * deselected.
  260. */
  261. static int ep93xx_spi_setup(struct spi_device *spi)
  262. {
  263. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  264. struct ep93xx_spi_chip *chip;
  265. if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
  266. dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
  267. spi->bits_per_word);
  268. return -EINVAL;
  269. }
  270. chip = spi_get_ctldata(spi);
  271. if (!chip) {
  272. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  273. spi->modalias);
  274. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  275. if (!chip)
  276. return -ENOMEM;
  277. chip->spi = spi;
  278. chip->ops = spi->controller_data;
  279. if (chip->ops && chip->ops->setup) {
  280. int ret = chip->ops->setup(spi);
  281. if (ret) {
  282. kfree(chip);
  283. return ret;
  284. }
  285. }
  286. spi_set_ctldata(spi, chip);
  287. }
  288. if (spi->max_speed_hz != chip->rate) {
  289. int err;
  290. err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
  291. if (err != 0) {
  292. spi_set_ctldata(spi, NULL);
  293. kfree(chip);
  294. return err;
  295. }
  296. chip->rate = spi->max_speed_hz;
  297. }
  298. chip->dss = bits_per_word_to_dss(spi->bits_per_word);
  299. ep93xx_spi_cs_control(spi, false);
  300. return 0;
  301. }
  302. /**
  303. * ep93xx_spi_transfer() - queue message to be transferred
  304. * @spi: target SPI device
  305. * @msg: message to be transferred
  306. *
  307. * This function is called by SPI device drivers when they are going to transfer
  308. * a new message. It simply puts the message in the queue and schedules
  309. * workqueue to perform the actual transfer later on.
  310. *
  311. * Returns %0 on success and negative error in case of failure.
  312. */
  313. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  314. {
  315. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  316. struct spi_transfer *t;
  317. unsigned long flags;
  318. if (!msg || !msg->complete)
  319. return -EINVAL;
  320. /* first validate each transfer */
  321. list_for_each_entry(t, &msg->transfers, transfer_list) {
  322. if (t->bits_per_word) {
  323. if (t->bits_per_word < 4 || t->bits_per_word > 16)
  324. return -EINVAL;
  325. }
  326. if (t->speed_hz && t->speed_hz < espi->min_rate)
  327. return -EINVAL;
  328. }
  329. /*
  330. * Now that we own the message, let's initialize it so that it is
  331. * suitable for us. We use @msg->status to signal whether there was
  332. * error in transfer and @msg->state is used to hold pointer to the
  333. * current transfer (or %NULL if no active current transfer).
  334. */
  335. msg->state = NULL;
  336. msg->status = 0;
  337. msg->actual_length = 0;
  338. spin_lock_irqsave(&espi->lock, flags);
  339. if (!espi->running) {
  340. spin_unlock_irqrestore(&espi->lock, flags);
  341. return -ESHUTDOWN;
  342. }
  343. list_add_tail(&msg->queue, &espi->msg_queue);
  344. queue_work(espi->wq, &espi->msg_work);
  345. spin_unlock_irqrestore(&espi->lock, flags);
  346. return 0;
  347. }
  348. /**
  349. * ep93xx_spi_cleanup() - cleans up master controller specific state
  350. * @spi: SPI device to cleanup
  351. *
  352. * This function releases master controller specific state for given @spi
  353. * device.
  354. */
  355. static void ep93xx_spi_cleanup(struct spi_device *spi)
  356. {
  357. struct ep93xx_spi_chip *chip;
  358. chip = spi_get_ctldata(spi);
  359. if (chip) {
  360. if (chip->ops && chip->ops->cleanup)
  361. chip->ops->cleanup(spi);
  362. spi_set_ctldata(spi, NULL);
  363. kfree(chip);
  364. }
  365. }
  366. /**
  367. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  368. * @espi: ep93xx SPI controller struct
  369. * @chip: chip specific settings
  370. *
  371. * This function sets up the actual hardware registers with settings given in
  372. * @chip. Note that no validation is done so make sure that callers validate
  373. * settings before calling this.
  374. */
  375. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  376. const struct ep93xx_spi_chip *chip)
  377. {
  378. u16 cr0;
  379. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  380. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  381. cr0 |= chip->dss;
  382. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  383. chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
  384. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  385. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  386. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  387. }
  388. static inline int bits_per_word(const struct ep93xx_spi *espi)
  389. {
  390. struct spi_message *msg = espi->current_msg;
  391. struct spi_transfer *t = msg->state;
  392. return t->bits_per_word;
  393. }
  394. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  395. {
  396. if (bits_per_word(espi) > 8) {
  397. u16 tx_val = 0;
  398. if (t->tx_buf)
  399. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  400. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  401. espi->tx += sizeof(tx_val);
  402. } else {
  403. u8 tx_val = 0;
  404. if (t->tx_buf)
  405. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  406. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  407. espi->tx += sizeof(tx_val);
  408. }
  409. }
  410. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  411. {
  412. if (bits_per_word(espi) > 8) {
  413. u16 rx_val;
  414. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  415. if (t->rx_buf)
  416. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  417. espi->rx += sizeof(rx_val);
  418. } else {
  419. u8 rx_val;
  420. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  421. if (t->rx_buf)
  422. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  423. espi->rx += sizeof(rx_val);
  424. }
  425. }
  426. /**
  427. * ep93xx_spi_read_write() - perform next RX/TX transfer
  428. * @espi: ep93xx SPI controller struct
  429. *
  430. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  431. * called several times, the whole transfer will be completed. Returns
  432. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  433. *
  434. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  435. * full.
  436. */
  437. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  438. {
  439. struct spi_message *msg = espi->current_msg;
  440. struct spi_transfer *t = msg->state;
  441. /* read as long as RX FIFO has frames in it */
  442. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  443. ep93xx_do_read(espi, t);
  444. espi->fifo_level--;
  445. }
  446. /* write as long as TX FIFO has room */
  447. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  448. ep93xx_do_write(espi, t);
  449. espi->fifo_level++;
  450. }
  451. if (espi->rx == t->len)
  452. return 0;
  453. return -EINPROGRESS;
  454. }
  455. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  456. {
  457. /*
  458. * Now everything is set up for the current transfer. We prime the TX
  459. * FIFO, enable interrupts, and wait for the transfer to complete.
  460. */
  461. if (ep93xx_spi_read_write(espi)) {
  462. ep93xx_spi_enable_interrupts(espi);
  463. wait_for_completion(&espi->wait);
  464. }
  465. }
  466. /**
  467. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  468. * @espi: ep93xx SPI controller struct
  469. * @dir: DMA transfer direction
  470. *
  471. * Function configures the DMA, maps the buffer and prepares the DMA
  472. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  473. * in case of failure.
  474. */
  475. static struct dma_async_tx_descriptor *
  476. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  477. {
  478. struct spi_transfer *t = espi->current_msg->state;
  479. struct dma_async_tx_descriptor *txd;
  480. enum dma_slave_buswidth buswidth;
  481. struct dma_slave_config conf;
  482. struct scatterlist *sg;
  483. struct sg_table *sgt;
  484. struct dma_chan *chan;
  485. const void *buf, *pbuf;
  486. size_t len = t->len;
  487. int i, ret, nents;
  488. if (bits_per_word(espi) > 8)
  489. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  490. else
  491. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  492. memset(&conf, 0, sizeof(conf));
  493. conf.direction = dir;
  494. if (dir == DMA_DEV_TO_MEM) {
  495. chan = espi->dma_rx;
  496. buf = t->rx_buf;
  497. sgt = &espi->rx_sgt;
  498. conf.src_addr = espi->sspdr_phys;
  499. conf.src_addr_width = buswidth;
  500. } else {
  501. chan = espi->dma_tx;
  502. buf = t->tx_buf;
  503. sgt = &espi->tx_sgt;
  504. conf.dst_addr = espi->sspdr_phys;
  505. conf.dst_addr_width = buswidth;
  506. }
  507. ret = dmaengine_slave_config(chan, &conf);
  508. if (ret)
  509. return ERR_PTR(ret);
  510. /*
  511. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  512. * because we are using @espi->zeropage to provide a zero RX buffer
  513. * for the TX transfers and we have only allocated one page for that.
  514. *
  515. * For performance reasons we allocate a new sg_table only when
  516. * needed. Otherwise we will re-use the current one. Eventually the
  517. * last sg_table is released in ep93xx_spi_release_dma().
  518. */
  519. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  520. if (nents != sgt->nents) {
  521. sg_free_table(sgt);
  522. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  523. if (ret)
  524. return ERR_PTR(ret);
  525. }
  526. pbuf = buf;
  527. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  528. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  529. if (buf) {
  530. sg_set_page(sg, virt_to_page(pbuf), bytes,
  531. offset_in_page(pbuf));
  532. } else {
  533. sg_set_page(sg, virt_to_page(espi->zeropage),
  534. bytes, 0);
  535. }
  536. pbuf += bytes;
  537. len -= bytes;
  538. }
  539. if (WARN_ON(len)) {
  540. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  541. return ERR_PTR(-EINVAL);
  542. }
  543. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  544. if (!nents)
  545. return ERR_PTR(-ENOMEM);
  546. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  547. if (!txd) {
  548. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  549. return ERR_PTR(-ENOMEM);
  550. }
  551. return txd;
  552. }
  553. /**
  554. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  555. * @espi: ep93xx SPI controller struct
  556. * @dir: DMA transfer direction
  557. *
  558. * Function finishes with the DMA transfer. After this, the DMA buffer is
  559. * unmapped.
  560. */
  561. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  562. enum dma_transfer_direction dir)
  563. {
  564. struct dma_chan *chan;
  565. struct sg_table *sgt;
  566. if (dir == DMA_DEV_TO_MEM) {
  567. chan = espi->dma_rx;
  568. sgt = &espi->rx_sgt;
  569. } else {
  570. chan = espi->dma_tx;
  571. sgt = &espi->tx_sgt;
  572. }
  573. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  574. }
  575. static void ep93xx_spi_dma_callback(void *callback_param)
  576. {
  577. complete(callback_param);
  578. }
  579. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  580. {
  581. struct spi_message *msg = espi->current_msg;
  582. struct dma_async_tx_descriptor *rxd, *txd;
  583. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  584. if (IS_ERR(rxd)) {
  585. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  586. msg->status = PTR_ERR(rxd);
  587. return;
  588. }
  589. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  590. if (IS_ERR(txd)) {
  591. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  592. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  593. msg->status = PTR_ERR(txd);
  594. return;
  595. }
  596. /* We are ready when RX is done */
  597. rxd->callback = ep93xx_spi_dma_callback;
  598. rxd->callback_param = &espi->wait;
  599. /* Now submit both descriptors and wait while they finish */
  600. dmaengine_submit(rxd);
  601. dmaengine_submit(txd);
  602. dma_async_issue_pending(espi->dma_rx);
  603. dma_async_issue_pending(espi->dma_tx);
  604. wait_for_completion(&espi->wait);
  605. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  606. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  607. }
  608. /**
  609. * ep93xx_spi_process_transfer() - processes one SPI transfer
  610. * @espi: ep93xx SPI controller struct
  611. * @msg: current message
  612. * @t: transfer to process
  613. *
  614. * This function processes one SPI transfer given in @t. Function waits until
  615. * transfer is complete (may sleep) and updates @msg->status based on whether
  616. * transfer was successfully processed or not.
  617. */
  618. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  619. struct spi_message *msg,
  620. struct spi_transfer *t)
  621. {
  622. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  623. msg->state = t;
  624. /*
  625. * Handle any transfer specific settings if needed. We use
  626. * temporary chip settings here and restore original later when
  627. * the transfer is finished.
  628. */
  629. if (t->speed_hz || t->bits_per_word) {
  630. struct ep93xx_spi_chip tmp_chip = *chip;
  631. if (t->speed_hz) {
  632. int err;
  633. err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
  634. t->speed_hz);
  635. if (err) {
  636. dev_err(&espi->pdev->dev,
  637. "failed to adjust speed\n");
  638. msg->status = err;
  639. return;
  640. }
  641. }
  642. if (t->bits_per_word)
  643. tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
  644. /*
  645. * Set up temporary new hw settings for this transfer.
  646. */
  647. ep93xx_spi_chip_setup(espi, &tmp_chip);
  648. }
  649. espi->rx = 0;
  650. espi->tx = 0;
  651. /*
  652. * There is no point of setting up DMA for the transfers which will
  653. * fit into the FIFO and can be transferred with a single interrupt.
  654. * So in these cases we will be using PIO and don't bother for DMA.
  655. */
  656. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  657. ep93xx_spi_dma_transfer(espi);
  658. else
  659. ep93xx_spi_pio_transfer(espi);
  660. /*
  661. * In case of error during transmit, we bail out from processing
  662. * the message.
  663. */
  664. if (msg->status)
  665. return;
  666. msg->actual_length += t->len;
  667. /*
  668. * After this transfer is finished, perform any possible
  669. * post-transfer actions requested by the protocol driver.
  670. */
  671. if (t->delay_usecs) {
  672. set_current_state(TASK_UNINTERRUPTIBLE);
  673. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  674. }
  675. if (t->cs_change) {
  676. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  677. /*
  678. * In case protocol driver is asking us to drop the
  679. * chipselect briefly, we let the scheduler to handle
  680. * any "delay" here.
  681. */
  682. ep93xx_spi_cs_control(msg->spi, false);
  683. cond_resched();
  684. ep93xx_spi_cs_control(msg->spi, true);
  685. }
  686. }
  687. if (t->speed_hz || t->bits_per_word)
  688. ep93xx_spi_chip_setup(espi, chip);
  689. }
  690. /*
  691. * ep93xx_spi_process_message() - process one SPI message
  692. * @espi: ep93xx SPI controller struct
  693. * @msg: message to process
  694. *
  695. * This function processes a single SPI message. We go through all transfers in
  696. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  697. * asserted during the whole message (unless per transfer cs_change is set).
  698. *
  699. * @msg->status contains %0 in case of success or negative error code in case of
  700. * failure.
  701. */
  702. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  703. struct spi_message *msg)
  704. {
  705. unsigned long timeout;
  706. struct spi_transfer *t;
  707. int err;
  708. /*
  709. * Enable the SPI controller and its clock.
  710. */
  711. err = ep93xx_spi_enable(espi);
  712. if (err) {
  713. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  714. msg->status = err;
  715. return;
  716. }
  717. /*
  718. * Just to be sure: flush any data from RX FIFO.
  719. */
  720. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  721. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  722. if (time_after(jiffies, timeout)) {
  723. dev_warn(&espi->pdev->dev,
  724. "timeout while flushing RX FIFO\n");
  725. msg->status = -ETIMEDOUT;
  726. return;
  727. }
  728. ep93xx_spi_read_u16(espi, SSPDR);
  729. }
  730. /*
  731. * We explicitly handle FIFO level. This way we don't have to check TX
  732. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  733. */
  734. espi->fifo_level = 0;
  735. /*
  736. * Update SPI controller registers according to spi device and assert
  737. * the chipselect.
  738. */
  739. ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
  740. ep93xx_spi_cs_control(msg->spi, true);
  741. list_for_each_entry(t, &msg->transfers, transfer_list) {
  742. ep93xx_spi_process_transfer(espi, msg, t);
  743. if (msg->status)
  744. break;
  745. }
  746. /*
  747. * Now the whole message is transferred (or failed for some reason). We
  748. * deselect the device and disable the SPI controller.
  749. */
  750. ep93xx_spi_cs_control(msg->spi, false);
  751. ep93xx_spi_disable(espi);
  752. }
  753. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  754. /**
  755. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  756. * @work: work struct
  757. *
  758. * Workqueue worker function. This function is called when there are new
  759. * SPI messages to be processed. Message is taken out from the queue and then
  760. * passed to ep93xx_spi_process_message().
  761. *
  762. * After message is transferred, protocol driver is notified by calling
  763. * @msg->complete(). In case of error, @msg->status is set to negative error
  764. * number, otherwise it contains zero (and @msg->actual_length is updated).
  765. */
  766. static void ep93xx_spi_work(struct work_struct *work)
  767. {
  768. struct ep93xx_spi *espi = work_to_espi(work);
  769. struct spi_message *msg;
  770. spin_lock_irq(&espi->lock);
  771. if (!espi->running || espi->current_msg ||
  772. list_empty(&espi->msg_queue)) {
  773. spin_unlock_irq(&espi->lock);
  774. return;
  775. }
  776. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  777. list_del_init(&msg->queue);
  778. espi->current_msg = msg;
  779. spin_unlock_irq(&espi->lock);
  780. ep93xx_spi_process_message(espi, msg);
  781. /*
  782. * Update the current message and re-schedule ourselves if there are
  783. * more messages in the queue.
  784. */
  785. spin_lock_irq(&espi->lock);
  786. espi->current_msg = NULL;
  787. if (espi->running && !list_empty(&espi->msg_queue))
  788. queue_work(espi->wq, &espi->msg_work);
  789. spin_unlock_irq(&espi->lock);
  790. /* notify the protocol driver that we are done with this message */
  791. msg->complete(msg->context);
  792. }
  793. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  794. {
  795. struct ep93xx_spi *espi = dev_id;
  796. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  797. /*
  798. * If we got ROR (receive overrun) interrupt we know that something is
  799. * wrong. Just abort the message.
  800. */
  801. if (unlikely(irq_status & SSPIIR_RORIS)) {
  802. /* clear the overrun interrupt */
  803. ep93xx_spi_write_u8(espi, SSPICR, 0);
  804. dev_warn(&espi->pdev->dev,
  805. "receive overrun, aborting the message\n");
  806. espi->current_msg->status = -EIO;
  807. } else {
  808. /*
  809. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  810. * simply execute next data transfer.
  811. */
  812. if (ep93xx_spi_read_write(espi)) {
  813. /*
  814. * In normal case, there still is some processing left
  815. * for current transfer. Let's wait for the next
  816. * interrupt then.
  817. */
  818. return IRQ_HANDLED;
  819. }
  820. }
  821. /*
  822. * Current transfer is finished, either with error or with success. In
  823. * any case we disable interrupts and notify the worker to handle
  824. * any post-processing of the message.
  825. */
  826. ep93xx_spi_disable_interrupts(espi);
  827. complete(&espi->wait);
  828. return IRQ_HANDLED;
  829. }
  830. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  831. {
  832. if (ep93xx_dma_chan_is_m2p(chan))
  833. return false;
  834. chan->private = filter_param;
  835. return true;
  836. }
  837. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  838. {
  839. dma_cap_mask_t mask;
  840. int ret;
  841. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  842. if (!espi->zeropage)
  843. return -ENOMEM;
  844. dma_cap_zero(mask);
  845. dma_cap_set(DMA_SLAVE, mask);
  846. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  847. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  848. espi->dma_rx_data.name = "ep93xx-spi-rx";
  849. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  850. &espi->dma_rx_data);
  851. if (!espi->dma_rx) {
  852. ret = -ENODEV;
  853. goto fail_free_page;
  854. }
  855. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  856. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  857. espi->dma_tx_data.name = "ep93xx-spi-tx";
  858. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  859. &espi->dma_tx_data);
  860. if (!espi->dma_tx) {
  861. ret = -ENODEV;
  862. goto fail_release_rx;
  863. }
  864. return 0;
  865. fail_release_rx:
  866. dma_release_channel(espi->dma_rx);
  867. espi->dma_rx = NULL;
  868. fail_free_page:
  869. free_page((unsigned long)espi->zeropage);
  870. return ret;
  871. }
  872. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  873. {
  874. if (espi->dma_rx) {
  875. dma_release_channel(espi->dma_rx);
  876. sg_free_table(&espi->rx_sgt);
  877. }
  878. if (espi->dma_tx) {
  879. dma_release_channel(espi->dma_tx);
  880. sg_free_table(&espi->tx_sgt);
  881. }
  882. if (espi->zeropage)
  883. free_page((unsigned long)espi->zeropage);
  884. }
  885. static int ep93xx_spi_probe(struct platform_device *pdev)
  886. {
  887. struct spi_master *master;
  888. struct ep93xx_spi_info *info;
  889. struct ep93xx_spi *espi;
  890. struct resource *res;
  891. int irq;
  892. int error;
  893. info = pdev->dev.platform_data;
  894. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  895. if (!master) {
  896. dev_err(&pdev->dev, "failed to allocate spi master\n");
  897. return -ENOMEM;
  898. }
  899. master->setup = ep93xx_spi_setup;
  900. master->transfer = ep93xx_spi_transfer;
  901. master->cleanup = ep93xx_spi_cleanup;
  902. master->bus_num = pdev->id;
  903. master->num_chipselect = info->num_chipselect;
  904. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  905. platform_set_drvdata(pdev, master);
  906. espi = spi_master_get_devdata(master);
  907. espi->clk = clk_get(&pdev->dev, NULL);
  908. if (IS_ERR(espi->clk)) {
  909. dev_err(&pdev->dev, "unable to get spi clock\n");
  910. error = PTR_ERR(espi->clk);
  911. goto fail_release_master;
  912. }
  913. spin_lock_init(&espi->lock);
  914. init_completion(&espi->wait);
  915. /*
  916. * Calculate maximum and minimum supported clock rates
  917. * for the controller.
  918. */
  919. espi->max_rate = clk_get_rate(espi->clk) / 2;
  920. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  921. espi->pdev = pdev;
  922. irq = platform_get_irq(pdev, 0);
  923. if (irq < 0) {
  924. error = -EBUSY;
  925. dev_err(&pdev->dev, "failed to get irq resources\n");
  926. goto fail_put_clock;
  927. }
  928. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  929. if (!res) {
  930. dev_err(&pdev->dev, "unable to get iomem resource\n");
  931. error = -ENODEV;
  932. goto fail_put_clock;
  933. }
  934. espi->sspdr_phys = res->start + SSPDR;
  935. espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
  936. if (IS_ERR(espi->regs_base)) {
  937. error = PTR_ERR(espi->regs_base);
  938. goto fail_put_clock;
  939. }
  940. error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
  941. 0, "ep93xx-spi", espi);
  942. if (error) {
  943. dev_err(&pdev->dev, "failed to request irq\n");
  944. goto fail_put_clock;
  945. }
  946. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  947. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  948. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  949. if (!espi->wq) {
  950. dev_err(&pdev->dev, "unable to create workqueue\n");
  951. goto fail_free_dma;
  952. }
  953. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  954. INIT_LIST_HEAD(&espi->msg_queue);
  955. espi->running = true;
  956. /* make sure that the hardware is disabled */
  957. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  958. error = spi_register_master(master);
  959. if (error) {
  960. dev_err(&pdev->dev, "failed to register SPI master\n");
  961. goto fail_free_queue;
  962. }
  963. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  964. (unsigned long)res->start, irq);
  965. return 0;
  966. fail_free_queue:
  967. destroy_workqueue(espi->wq);
  968. fail_free_dma:
  969. ep93xx_spi_release_dma(espi);
  970. fail_put_clock:
  971. clk_put(espi->clk);
  972. fail_release_master:
  973. spi_master_put(master);
  974. platform_set_drvdata(pdev, NULL);
  975. return error;
  976. }
  977. static int ep93xx_spi_remove(struct platform_device *pdev)
  978. {
  979. struct spi_master *master = platform_get_drvdata(pdev);
  980. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  981. spin_lock_irq(&espi->lock);
  982. espi->running = false;
  983. spin_unlock_irq(&espi->lock);
  984. destroy_workqueue(espi->wq);
  985. /*
  986. * Complete remaining messages with %-ESHUTDOWN status.
  987. */
  988. spin_lock_irq(&espi->lock);
  989. while (!list_empty(&espi->msg_queue)) {
  990. struct spi_message *msg;
  991. msg = list_first_entry(&espi->msg_queue,
  992. struct spi_message, queue);
  993. list_del_init(&msg->queue);
  994. msg->status = -ESHUTDOWN;
  995. spin_unlock_irq(&espi->lock);
  996. msg->complete(msg->context);
  997. spin_lock_irq(&espi->lock);
  998. }
  999. spin_unlock_irq(&espi->lock);
  1000. ep93xx_spi_release_dma(espi);
  1001. clk_put(espi->clk);
  1002. platform_set_drvdata(pdev, NULL);
  1003. spi_unregister_master(master);
  1004. return 0;
  1005. }
  1006. static struct platform_driver ep93xx_spi_driver = {
  1007. .driver = {
  1008. .name = "ep93xx-spi",
  1009. .owner = THIS_MODULE,
  1010. },
  1011. .probe = ep93xx_spi_probe,
  1012. .remove = ep93xx_spi_remove,
  1013. };
  1014. module_platform_driver(ep93xx_spi_driver);
  1015. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  1016. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  1017. MODULE_LICENSE("GPL");
  1018. MODULE_ALIAS("platform:ep93xx-spi");