spi-ep93xx.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/sched.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/spi/spi.h>
  32. #include <linux/platform_data/dma-ep93xx.h>
  33. #include <linux/platform_data/spi-ep93xx.h>
  34. #define SSPCR0 0x0000
  35. #define SSPCR0_MODE_SHIFT 6
  36. #define SSPCR0_SCR_SHIFT 8
  37. #define SSPCR1 0x0004
  38. #define SSPCR1_RIE BIT(0)
  39. #define SSPCR1_TIE BIT(1)
  40. #define SSPCR1_RORIE BIT(2)
  41. #define SSPCR1_LBM BIT(3)
  42. #define SSPCR1_SSE BIT(4)
  43. #define SSPCR1_MS BIT(5)
  44. #define SSPCR1_SOD BIT(6)
  45. #define SSPDR 0x0008
  46. #define SSPSR 0x000c
  47. #define SSPSR_TFE BIT(0)
  48. #define SSPSR_TNF BIT(1)
  49. #define SSPSR_RNE BIT(2)
  50. #define SSPSR_RFF BIT(3)
  51. #define SSPSR_BSY BIT(4)
  52. #define SSPCPSR 0x0010
  53. #define SSPIIR 0x0014
  54. #define SSPIIR_RIS BIT(0)
  55. #define SSPIIR_TIS BIT(1)
  56. #define SSPIIR_RORIS BIT(2)
  57. #define SSPICR SSPIIR
  58. /* timeout in milliseconds */
  59. #define SPI_TIMEOUT 5
  60. /* maximum depth of RX/TX FIFO */
  61. #define SPI_FIFO_SIZE 8
  62. /**
  63. * struct ep93xx_spi - EP93xx SPI controller structure
  64. * @lock: spinlock that protects concurrent accesses to fields @running,
  65. * @current_msg and @msg_queue
  66. * @pdev: pointer to platform device
  67. * @clk: clock for the controller
  68. * @regs_base: pointer to ioremap()'d registers
  69. * @sspdr_phys: physical address of the SSPDR register
  70. * @min_rate: minimum clock rate (in Hz) supported by the controller
  71. * @max_rate: maximum clock rate (in Hz) supported by the controller
  72. * @running: is the queue running
  73. * @wq: workqueue used by the driver
  74. * @msg_work: work that is queued for the driver
  75. * @wait: wait here until given transfer is completed
  76. * @msg_queue: queue for the messages
  77. * @current_msg: message that is currently processed (or %NULL if none)
  78. * @tx: current byte in transfer to transmit
  79. * @rx: current byte in transfer to receive
  80. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  81. * frame decreases this level and sending one frame increases it.
  82. * @dma_rx: RX DMA channel
  83. * @dma_tx: TX DMA channel
  84. * @dma_rx_data: RX parameters passed to the DMA engine
  85. * @dma_tx_data: TX parameters passed to the DMA engine
  86. * @rx_sgt: sg table for RX transfers
  87. * @tx_sgt: sg table for TX transfers
  88. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  89. * the client
  90. *
  91. * This structure holds EP93xx SPI controller specific information. When
  92. * @running is %true, driver accepts transfer requests from protocol drivers.
  93. * @current_msg is used to hold pointer to the message that is currently
  94. * processed. If @current_msg is %NULL, it means that no processing is going
  95. * on.
  96. *
  97. * Most of the fields are only written once and they can be accessed without
  98. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  99. * @running, and @msg_queue.
  100. */
  101. struct ep93xx_spi {
  102. spinlock_t lock;
  103. const struct platform_device *pdev;
  104. struct clk *clk;
  105. void __iomem *regs_base;
  106. unsigned long sspdr_phys;
  107. unsigned long min_rate;
  108. unsigned long max_rate;
  109. bool running;
  110. struct workqueue_struct *wq;
  111. struct work_struct msg_work;
  112. struct completion wait;
  113. struct list_head msg_queue;
  114. struct spi_message *current_msg;
  115. size_t tx;
  116. size_t rx;
  117. size_t fifo_level;
  118. struct dma_chan *dma_rx;
  119. struct dma_chan *dma_tx;
  120. struct ep93xx_dma_data dma_rx_data;
  121. struct ep93xx_dma_data dma_tx_data;
  122. struct sg_table rx_sgt;
  123. struct sg_table tx_sgt;
  124. void *zeropage;
  125. };
  126. /**
  127. * struct ep93xx_spi_chip - SPI device hardware settings
  128. * @spi: back pointer to the SPI device
  129. * @rate: max rate in hz this chip supports
  130. * @div_cpsr: cpsr (pre-scaler) divider
  131. * @div_scr: scr divider
  132. * @ops: private chip operations
  133. *
  134. * This structure is used to store hardware register specific settings for each
  135. * SPI device. Settings are written to hardware by function
  136. * ep93xx_spi_chip_setup().
  137. */
  138. struct ep93xx_spi_chip {
  139. const struct spi_device *spi;
  140. unsigned long rate;
  141. u8 div_cpsr;
  142. u8 div_scr;
  143. struct ep93xx_spi_chip_ops *ops;
  144. };
  145. /* converts bits per word to CR0.DSS value */
  146. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  147. static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
  148. u16 reg, u8 value)
  149. {
  150. writeb(value, espi->regs_base + reg);
  151. }
  152. static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  153. {
  154. return readb(spi->regs_base + reg);
  155. }
  156. static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
  157. u16 reg, u16 value)
  158. {
  159. writew(value, espi->regs_base + reg);
  160. }
  161. static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  162. {
  163. return readw(spi->regs_base + reg);
  164. }
  165. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  166. {
  167. u8 regval;
  168. int err;
  169. err = clk_enable(espi->clk);
  170. if (err)
  171. return err;
  172. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  173. regval |= SSPCR1_SSE;
  174. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  175. return 0;
  176. }
  177. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  178. {
  179. u8 regval;
  180. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  181. regval &= ~SSPCR1_SSE;
  182. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  183. clk_disable(espi->clk);
  184. }
  185. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  186. {
  187. u8 regval;
  188. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  189. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  190. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  191. }
  192. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  193. {
  194. u8 regval;
  195. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  196. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  197. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  198. }
  199. /**
  200. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  201. * @espi: ep93xx SPI controller struct
  202. * @chip: divisors are calculated for this chip
  203. * @rate: desired SPI output clock rate
  204. *
  205. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  206. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  207. * for some reason, divisors cannot be calculated nothing is stored and
  208. * %-EINVAL is returned.
  209. */
  210. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  211. struct ep93xx_spi_chip *chip,
  212. unsigned long rate)
  213. {
  214. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  215. int cpsr, scr;
  216. /*
  217. * Make sure that max value is between values supported by the
  218. * controller. Note that minimum value is already checked in
  219. * ep93xx_spi_transfer().
  220. */
  221. rate = clamp(rate, espi->min_rate, espi->max_rate);
  222. /*
  223. * Calculate divisors so that we can get speed according the
  224. * following formula:
  225. * rate = spi_clock_rate / (cpsr * (1 + scr))
  226. *
  227. * cpsr must be even number and starts from 2, scr can be any number
  228. * between 0 and 255.
  229. */
  230. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  231. for (scr = 0; scr <= 255; scr++) {
  232. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  233. chip->div_scr = (u8)scr;
  234. chip->div_cpsr = (u8)cpsr;
  235. return 0;
  236. }
  237. }
  238. }
  239. return -EINVAL;
  240. }
  241. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  242. {
  243. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  244. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  245. if (chip->ops && chip->ops->cs_control)
  246. chip->ops->cs_control(spi, value);
  247. }
  248. /**
  249. * ep93xx_spi_setup() - setup an SPI device
  250. * @spi: SPI device to setup
  251. *
  252. * This function sets up SPI device mode, speed etc. Can be called multiple
  253. * times for a single device. Returns %0 in case of success, negative error in
  254. * case of failure. When this function returns success, the device is
  255. * deselected.
  256. */
  257. static int ep93xx_spi_setup(struct spi_device *spi)
  258. {
  259. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  260. struct ep93xx_spi_chip *chip;
  261. chip = spi_get_ctldata(spi);
  262. if (!chip) {
  263. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  264. spi->modalias);
  265. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  266. if (!chip)
  267. return -ENOMEM;
  268. chip->spi = spi;
  269. chip->ops = spi->controller_data;
  270. if (chip->ops && chip->ops->setup) {
  271. int ret = chip->ops->setup(spi);
  272. if (ret) {
  273. kfree(chip);
  274. return ret;
  275. }
  276. }
  277. spi_set_ctldata(spi, chip);
  278. }
  279. if (spi->max_speed_hz != chip->rate) {
  280. int err;
  281. err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
  282. if (err != 0) {
  283. spi_set_ctldata(spi, NULL);
  284. kfree(chip);
  285. return err;
  286. }
  287. chip->rate = spi->max_speed_hz;
  288. }
  289. ep93xx_spi_cs_control(spi, false);
  290. return 0;
  291. }
  292. /**
  293. * ep93xx_spi_transfer() - queue message to be transferred
  294. * @spi: target SPI device
  295. * @msg: message to be transferred
  296. *
  297. * This function is called by SPI device drivers when they are going to transfer
  298. * a new message. It simply puts the message in the queue and schedules
  299. * workqueue to perform the actual transfer later on.
  300. *
  301. * Returns %0 on success and negative error in case of failure.
  302. */
  303. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  304. {
  305. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  306. struct spi_transfer *t;
  307. unsigned long flags;
  308. if (!msg || !msg->complete)
  309. return -EINVAL;
  310. /* first validate each transfer */
  311. list_for_each_entry(t, &msg->transfers, transfer_list) {
  312. if (t->speed_hz && t->speed_hz < espi->min_rate)
  313. return -EINVAL;
  314. }
  315. /*
  316. * Now that we own the message, let's initialize it so that it is
  317. * suitable for us. We use @msg->status to signal whether there was
  318. * error in transfer and @msg->state is used to hold pointer to the
  319. * current transfer (or %NULL if no active current transfer).
  320. */
  321. msg->state = NULL;
  322. msg->status = 0;
  323. msg->actual_length = 0;
  324. spin_lock_irqsave(&espi->lock, flags);
  325. if (!espi->running) {
  326. spin_unlock_irqrestore(&espi->lock, flags);
  327. return -ESHUTDOWN;
  328. }
  329. list_add_tail(&msg->queue, &espi->msg_queue);
  330. queue_work(espi->wq, &espi->msg_work);
  331. spin_unlock_irqrestore(&espi->lock, flags);
  332. return 0;
  333. }
  334. /**
  335. * ep93xx_spi_cleanup() - cleans up master controller specific state
  336. * @spi: SPI device to cleanup
  337. *
  338. * This function releases master controller specific state for given @spi
  339. * device.
  340. */
  341. static void ep93xx_spi_cleanup(struct spi_device *spi)
  342. {
  343. struct ep93xx_spi_chip *chip;
  344. chip = spi_get_ctldata(spi);
  345. if (chip) {
  346. if (chip->ops && chip->ops->cleanup)
  347. chip->ops->cleanup(spi);
  348. spi_set_ctldata(spi, NULL);
  349. kfree(chip);
  350. }
  351. }
  352. /**
  353. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  354. * @espi: ep93xx SPI controller struct
  355. * @chip: chip specific settings
  356. * @bits_per_word: transfer bits_per_word
  357. *
  358. * This function sets up the actual hardware registers with settings given in
  359. * @chip. Note that no validation is done so make sure that callers validate
  360. * settings before calling this.
  361. */
  362. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  363. const struct ep93xx_spi_chip *chip,
  364. u8 bits_per_word)
  365. {
  366. u8 dss = bits_per_word_to_dss(bits_per_word);
  367. u16 cr0;
  368. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  369. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  370. cr0 |= dss;
  371. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  372. chip->spi->mode, chip->div_cpsr, chip->div_scr, dss);
  373. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  374. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  375. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  376. }
  377. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  378. {
  379. if (t->bits_per_word > 8) {
  380. u16 tx_val = 0;
  381. if (t->tx_buf)
  382. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  383. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  384. espi->tx += sizeof(tx_val);
  385. } else {
  386. u8 tx_val = 0;
  387. if (t->tx_buf)
  388. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  389. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  390. espi->tx += sizeof(tx_val);
  391. }
  392. }
  393. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  394. {
  395. if (t->bits_per_word > 8) {
  396. u16 rx_val;
  397. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  398. if (t->rx_buf)
  399. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  400. espi->rx += sizeof(rx_val);
  401. } else {
  402. u8 rx_val;
  403. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  404. if (t->rx_buf)
  405. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  406. espi->rx += sizeof(rx_val);
  407. }
  408. }
  409. /**
  410. * ep93xx_spi_read_write() - perform next RX/TX transfer
  411. * @espi: ep93xx SPI controller struct
  412. *
  413. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  414. * called several times, the whole transfer will be completed. Returns
  415. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  416. *
  417. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  418. * full.
  419. */
  420. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  421. {
  422. struct spi_message *msg = espi->current_msg;
  423. struct spi_transfer *t = msg->state;
  424. /* read as long as RX FIFO has frames in it */
  425. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  426. ep93xx_do_read(espi, t);
  427. espi->fifo_level--;
  428. }
  429. /* write as long as TX FIFO has room */
  430. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  431. ep93xx_do_write(espi, t);
  432. espi->fifo_level++;
  433. }
  434. if (espi->rx == t->len)
  435. return 0;
  436. return -EINPROGRESS;
  437. }
  438. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  439. {
  440. /*
  441. * Now everything is set up for the current transfer. We prime the TX
  442. * FIFO, enable interrupts, and wait for the transfer to complete.
  443. */
  444. if (ep93xx_spi_read_write(espi)) {
  445. ep93xx_spi_enable_interrupts(espi);
  446. wait_for_completion(&espi->wait);
  447. }
  448. }
  449. /**
  450. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  451. * @espi: ep93xx SPI controller struct
  452. * @dir: DMA transfer direction
  453. *
  454. * Function configures the DMA, maps the buffer and prepares the DMA
  455. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  456. * in case of failure.
  457. */
  458. static struct dma_async_tx_descriptor *
  459. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  460. {
  461. struct spi_transfer *t = espi->current_msg->state;
  462. struct dma_async_tx_descriptor *txd;
  463. enum dma_slave_buswidth buswidth;
  464. struct dma_slave_config conf;
  465. struct scatterlist *sg;
  466. struct sg_table *sgt;
  467. struct dma_chan *chan;
  468. const void *buf, *pbuf;
  469. size_t len = t->len;
  470. int i, ret, nents;
  471. if (t->bits_per_word > 8)
  472. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  473. else
  474. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  475. memset(&conf, 0, sizeof(conf));
  476. conf.direction = dir;
  477. if (dir == DMA_DEV_TO_MEM) {
  478. chan = espi->dma_rx;
  479. buf = t->rx_buf;
  480. sgt = &espi->rx_sgt;
  481. conf.src_addr = espi->sspdr_phys;
  482. conf.src_addr_width = buswidth;
  483. } else {
  484. chan = espi->dma_tx;
  485. buf = t->tx_buf;
  486. sgt = &espi->tx_sgt;
  487. conf.dst_addr = espi->sspdr_phys;
  488. conf.dst_addr_width = buswidth;
  489. }
  490. ret = dmaengine_slave_config(chan, &conf);
  491. if (ret)
  492. return ERR_PTR(ret);
  493. /*
  494. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  495. * because we are using @espi->zeropage to provide a zero RX buffer
  496. * for the TX transfers and we have only allocated one page for that.
  497. *
  498. * For performance reasons we allocate a new sg_table only when
  499. * needed. Otherwise we will re-use the current one. Eventually the
  500. * last sg_table is released in ep93xx_spi_release_dma().
  501. */
  502. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  503. if (nents != sgt->nents) {
  504. sg_free_table(sgt);
  505. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  506. if (ret)
  507. return ERR_PTR(ret);
  508. }
  509. pbuf = buf;
  510. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  511. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  512. if (buf) {
  513. sg_set_page(sg, virt_to_page(pbuf), bytes,
  514. offset_in_page(pbuf));
  515. } else {
  516. sg_set_page(sg, virt_to_page(espi->zeropage),
  517. bytes, 0);
  518. }
  519. pbuf += bytes;
  520. len -= bytes;
  521. }
  522. if (WARN_ON(len)) {
  523. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  524. return ERR_PTR(-EINVAL);
  525. }
  526. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  527. if (!nents)
  528. return ERR_PTR(-ENOMEM);
  529. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  530. if (!txd) {
  531. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  532. return ERR_PTR(-ENOMEM);
  533. }
  534. return txd;
  535. }
  536. /**
  537. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  538. * @espi: ep93xx SPI controller struct
  539. * @dir: DMA transfer direction
  540. *
  541. * Function finishes with the DMA transfer. After this, the DMA buffer is
  542. * unmapped.
  543. */
  544. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  545. enum dma_transfer_direction dir)
  546. {
  547. struct dma_chan *chan;
  548. struct sg_table *sgt;
  549. if (dir == DMA_DEV_TO_MEM) {
  550. chan = espi->dma_rx;
  551. sgt = &espi->rx_sgt;
  552. } else {
  553. chan = espi->dma_tx;
  554. sgt = &espi->tx_sgt;
  555. }
  556. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  557. }
  558. static void ep93xx_spi_dma_callback(void *callback_param)
  559. {
  560. complete(callback_param);
  561. }
  562. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  563. {
  564. struct spi_message *msg = espi->current_msg;
  565. struct dma_async_tx_descriptor *rxd, *txd;
  566. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  567. if (IS_ERR(rxd)) {
  568. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  569. msg->status = PTR_ERR(rxd);
  570. return;
  571. }
  572. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  573. if (IS_ERR(txd)) {
  574. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  575. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  576. msg->status = PTR_ERR(txd);
  577. return;
  578. }
  579. /* We are ready when RX is done */
  580. rxd->callback = ep93xx_spi_dma_callback;
  581. rxd->callback_param = &espi->wait;
  582. /* Now submit both descriptors and wait while they finish */
  583. dmaengine_submit(rxd);
  584. dmaengine_submit(txd);
  585. dma_async_issue_pending(espi->dma_rx);
  586. dma_async_issue_pending(espi->dma_tx);
  587. wait_for_completion(&espi->wait);
  588. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  589. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  590. }
  591. /**
  592. * ep93xx_spi_process_transfer() - processes one SPI transfer
  593. * @espi: ep93xx SPI controller struct
  594. * @msg: current message
  595. * @t: transfer to process
  596. *
  597. * This function processes one SPI transfer given in @t. Function waits until
  598. * transfer is complete (may sleep) and updates @msg->status based on whether
  599. * transfer was successfully processed or not.
  600. */
  601. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  602. struct spi_message *msg,
  603. struct spi_transfer *t)
  604. {
  605. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  606. int err;
  607. msg->state = t;
  608. err = ep93xx_spi_calc_divisors(espi, chip, t->speed_hz);
  609. if (err) {
  610. dev_err(&espi->pdev->dev, "failed to adjust speed\n");
  611. msg->status = err;
  612. return;
  613. }
  614. ep93xx_spi_chip_setup(espi, chip, t->bits_per_word);
  615. espi->rx = 0;
  616. espi->tx = 0;
  617. /*
  618. * There is no point of setting up DMA for the transfers which will
  619. * fit into the FIFO and can be transferred with a single interrupt.
  620. * So in these cases we will be using PIO and don't bother for DMA.
  621. */
  622. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  623. ep93xx_spi_dma_transfer(espi);
  624. else
  625. ep93xx_spi_pio_transfer(espi);
  626. /*
  627. * In case of error during transmit, we bail out from processing
  628. * the message.
  629. */
  630. if (msg->status)
  631. return;
  632. msg->actual_length += t->len;
  633. /*
  634. * After this transfer is finished, perform any possible
  635. * post-transfer actions requested by the protocol driver.
  636. */
  637. if (t->delay_usecs) {
  638. set_current_state(TASK_UNINTERRUPTIBLE);
  639. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  640. }
  641. if (t->cs_change) {
  642. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  643. /*
  644. * In case protocol driver is asking us to drop the
  645. * chipselect briefly, we let the scheduler to handle
  646. * any "delay" here.
  647. */
  648. ep93xx_spi_cs_control(msg->spi, false);
  649. cond_resched();
  650. ep93xx_spi_cs_control(msg->spi, true);
  651. }
  652. }
  653. }
  654. /*
  655. * ep93xx_spi_process_message() - process one SPI message
  656. * @espi: ep93xx SPI controller struct
  657. * @msg: message to process
  658. *
  659. * This function processes a single SPI message. We go through all transfers in
  660. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  661. * asserted during the whole message (unless per transfer cs_change is set).
  662. *
  663. * @msg->status contains %0 in case of success or negative error code in case of
  664. * failure.
  665. */
  666. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  667. struct spi_message *msg)
  668. {
  669. unsigned long timeout;
  670. struct spi_transfer *t;
  671. int err;
  672. /*
  673. * Enable the SPI controller and its clock.
  674. */
  675. err = ep93xx_spi_enable(espi);
  676. if (err) {
  677. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  678. msg->status = err;
  679. return;
  680. }
  681. /*
  682. * Just to be sure: flush any data from RX FIFO.
  683. */
  684. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  685. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  686. if (time_after(jiffies, timeout)) {
  687. dev_warn(&espi->pdev->dev,
  688. "timeout while flushing RX FIFO\n");
  689. msg->status = -ETIMEDOUT;
  690. return;
  691. }
  692. ep93xx_spi_read_u16(espi, SSPDR);
  693. }
  694. /*
  695. * We explicitly handle FIFO level. This way we don't have to check TX
  696. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  697. */
  698. espi->fifo_level = 0;
  699. /*
  700. * Assert the chipselect.
  701. */
  702. ep93xx_spi_cs_control(msg->spi, true);
  703. list_for_each_entry(t, &msg->transfers, transfer_list) {
  704. ep93xx_spi_process_transfer(espi, msg, t);
  705. if (msg->status)
  706. break;
  707. }
  708. /*
  709. * Now the whole message is transferred (or failed for some reason). We
  710. * deselect the device and disable the SPI controller.
  711. */
  712. ep93xx_spi_cs_control(msg->spi, false);
  713. ep93xx_spi_disable(espi);
  714. }
  715. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  716. /**
  717. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  718. * @work: work struct
  719. *
  720. * Workqueue worker function. This function is called when there are new
  721. * SPI messages to be processed. Message is taken out from the queue and then
  722. * passed to ep93xx_spi_process_message().
  723. *
  724. * After message is transferred, protocol driver is notified by calling
  725. * @msg->complete(). In case of error, @msg->status is set to negative error
  726. * number, otherwise it contains zero (and @msg->actual_length is updated).
  727. */
  728. static void ep93xx_spi_work(struct work_struct *work)
  729. {
  730. struct ep93xx_spi *espi = work_to_espi(work);
  731. struct spi_message *msg;
  732. spin_lock_irq(&espi->lock);
  733. if (!espi->running || espi->current_msg ||
  734. list_empty(&espi->msg_queue)) {
  735. spin_unlock_irq(&espi->lock);
  736. return;
  737. }
  738. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  739. list_del_init(&msg->queue);
  740. espi->current_msg = msg;
  741. spin_unlock_irq(&espi->lock);
  742. ep93xx_spi_process_message(espi, msg);
  743. /*
  744. * Update the current message and re-schedule ourselves if there are
  745. * more messages in the queue.
  746. */
  747. spin_lock_irq(&espi->lock);
  748. espi->current_msg = NULL;
  749. if (espi->running && !list_empty(&espi->msg_queue))
  750. queue_work(espi->wq, &espi->msg_work);
  751. spin_unlock_irq(&espi->lock);
  752. /* notify the protocol driver that we are done with this message */
  753. msg->complete(msg->context);
  754. }
  755. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  756. {
  757. struct ep93xx_spi *espi = dev_id;
  758. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  759. /*
  760. * If we got ROR (receive overrun) interrupt we know that something is
  761. * wrong. Just abort the message.
  762. */
  763. if (unlikely(irq_status & SSPIIR_RORIS)) {
  764. /* clear the overrun interrupt */
  765. ep93xx_spi_write_u8(espi, SSPICR, 0);
  766. dev_warn(&espi->pdev->dev,
  767. "receive overrun, aborting the message\n");
  768. espi->current_msg->status = -EIO;
  769. } else {
  770. /*
  771. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  772. * simply execute next data transfer.
  773. */
  774. if (ep93xx_spi_read_write(espi)) {
  775. /*
  776. * In normal case, there still is some processing left
  777. * for current transfer. Let's wait for the next
  778. * interrupt then.
  779. */
  780. return IRQ_HANDLED;
  781. }
  782. }
  783. /*
  784. * Current transfer is finished, either with error or with success. In
  785. * any case we disable interrupts and notify the worker to handle
  786. * any post-processing of the message.
  787. */
  788. ep93xx_spi_disable_interrupts(espi);
  789. complete(&espi->wait);
  790. return IRQ_HANDLED;
  791. }
  792. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  793. {
  794. if (ep93xx_dma_chan_is_m2p(chan))
  795. return false;
  796. chan->private = filter_param;
  797. return true;
  798. }
  799. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  800. {
  801. dma_cap_mask_t mask;
  802. int ret;
  803. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  804. if (!espi->zeropage)
  805. return -ENOMEM;
  806. dma_cap_zero(mask);
  807. dma_cap_set(DMA_SLAVE, mask);
  808. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  809. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  810. espi->dma_rx_data.name = "ep93xx-spi-rx";
  811. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  812. &espi->dma_rx_data);
  813. if (!espi->dma_rx) {
  814. ret = -ENODEV;
  815. goto fail_free_page;
  816. }
  817. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  818. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  819. espi->dma_tx_data.name = "ep93xx-spi-tx";
  820. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  821. &espi->dma_tx_data);
  822. if (!espi->dma_tx) {
  823. ret = -ENODEV;
  824. goto fail_release_rx;
  825. }
  826. return 0;
  827. fail_release_rx:
  828. dma_release_channel(espi->dma_rx);
  829. espi->dma_rx = NULL;
  830. fail_free_page:
  831. free_page((unsigned long)espi->zeropage);
  832. return ret;
  833. }
  834. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  835. {
  836. if (espi->dma_rx) {
  837. dma_release_channel(espi->dma_rx);
  838. sg_free_table(&espi->rx_sgt);
  839. }
  840. if (espi->dma_tx) {
  841. dma_release_channel(espi->dma_tx);
  842. sg_free_table(&espi->tx_sgt);
  843. }
  844. if (espi->zeropage)
  845. free_page((unsigned long)espi->zeropage);
  846. }
  847. static int ep93xx_spi_probe(struct platform_device *pdev)
  848. {
  849. struct spi_master *master;
  850. struct ep93xx_spi_info *info;
  851. struct ep93xx_spi *espi;
  852. struct resource *res;
  853. int irq;
  854. int error;
  855. info = pdev->dev.platform_data;
  856. irq = platform_get_irq(pdev, 0);
  857. if (irq < 0) {
  858. dev_err(&pdev->dev, "failed to get irq resources\n");
  859. return -EBUSY;
  860. }
  861. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  862. if (!res) {
  863. dev_err(&pdev->dev, "unable to get iomem resource\n");
  864. return -ENODEV;
  865. }
  866. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  867. if (!master)
  868. return -ENOMEM;
  869. master->setup = ep93xx_spi_setup;
  870. master->transfer = ep93xx_spi_transfer;
  871. master->cleanup = ep93xx_spi_cleanup;
  872. master->bus_num = pdev->id;
  873. master->num_chipselect = info->num_chipselect;
  874. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  875. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  876. platform_set_drvdata(pdev, master);
  877. espi = spi_master_get_devdata(master);
  878. espi->clk = clk_get(&pdev->dev, NULL);
  879. if (IS_ERR(espi->clk)) {
  880. dev_err(&pdev->dev, "unable to get spi clock\n");
  881. error = PTR_ERR(espi->clk);
  882. goto fail_release_master;
  883. }
  884. spin_lock_init(&espi->lock);
  885. init_completion(&espi->wait);
  886. /*
  887. * Calculate maximum and minimum supported clock rates
  888. * for the controller.
  889. */
  890. espi->max_rate = clk_get_rate(espi->clk) / 2;
  891. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  892. espi->pdev = pdev;
  893. espi->sspdr_phys = res->start + SSPDR;
  894. espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
  895. if (IS_ERR(espi->regs_base)) {
  896. error = PTR_ERR(espi->regs_base);
  897. goto fail_put_clock;
  898. }
  899. error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
  900. 0, "ep93xx-spi", espi);
  901. if (error) {
  902. dev_err(&pdev->dev, "failed to request irq\n");
  903. goto fail_put_clock;
  904. }
  905. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  906. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  907. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  908. if (!espi->wq) {
  909. dev_err(&pdev->dev, "unable to create workqueue\n");
  910. error = -ENOMEM;
  911. goto fail_free_dma;
  912. }
  913. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  914. INIT_LIST_HEAD(&espi->msg_queue);
  915. espi->running = true;
  916. /* make sure that the hardware is disabled */
  917. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  918. error = spi_register_master(master);
  919. if (error) {
  920. dev_err(&pdev->dev, "failed to register SPI master\n");
  921. goto fail_free_queue;
  922. }
  923. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  924. (unsigned long)res->start, irq);
  925. return 0;
  926. fail_free_queue:
  927. destroy_workqueue(espi->wq);
  928. fail_free_dma:
  929. ep93xx_spi_release_dma(espi);
  930. fail_put_clock:
  931. clk_put(espi->clk);
  932. fail_release_master:
  933. spi_master_put(master);
  934. return error;
  935. }
  936. static int ep93xx_spi_remove(struct platform_device *pdev)
  937. {
  938. struct spi_master *master = platform_get_drvdata(pdev);
  939. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  940. spin_lock_irq(&espi->lock);
  941. espi->running = false;
  942. spin_unlock_irq(&espi->lock);
  943. destroy_workqueue(espi->wq);
  944. /*
  945. * Complete remaining messages with %-ESHUTDOWN status.
  946. */
  947. spin_lock_irq(&espi->lock);
  948. while (!list_empty(&espi->msg_queue)) {
  949. struct spi_message *msg;
  950. msg = list_first_entry(&espi->msg_queue,
  951. struct spi_message, queue);
  952. list_del_init(&msg->queue);
  953. msg->status = -ESHUTDOWN;
  954. spin_unlock_irq(&espi->lock);
  955. msg->complete(msg->context);
  956. spin_lock_irq(&espi->lock);
  957. }
  958. spin_unlock_irq(&espi->lock);
  959. ep93xx_spi_release_dma(espi);
  960. clk_put(espi->clk);
  961. spi_unregister_master(master);
  962. return 0;
  963. }
  964. static struct platform_driver ep93xx_spi_driver = {
  965. .driver = {
  966. .name = "ep93xx-spi",
  967. .owner = THIS_MODULE,
  968. },
  969. .probe = ep93xx_spi_probe,
  970. .remove = ep93xx_spi_remove,
  971. };
  972. module_platform_driver(ep93xx_spi_driver);
  973. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  974. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  975. MODULE_LICENSE("GPL");
  976. MODULE_ALIAS("platform:ep93xx-spi");