spi-ep93xx.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/workqueue.h>
  28. #include <linux/sched.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/spi/spi.h>
  31. #include <mach/dma.h>
  32. #include <mach/ep93xx_spi.h>
  33. #define SSPCR0 0x0000
  34. #define SSPCR0_MODE_SHIFT 6
  35. #define SSPCR0_SCR_SHIFT 8
  36. #define SSPCR1 0x0004
  37. #define SSPCR1_RIE BIT(0)
  38. #define SSPCR1_TIE BIT(1)
  39. #define SSPCR1_RORIE BIT(2)
  40. #define SSPCR1_LBM BIT(3)
  41. #define SSPCR1_SSE BIT(4)
  42. #define SSPCR1_MS BIT(5)
  43. #define SSPCR1_SOD BIT(6)
  44. #define SSPDR 0x0008
  45. #define SSPSR 0x000c
  46. #define SSPSR_TFE BIT(0)
  47. #define SSPSR_TNF BIT(1)
  48. #define SSPSR_RNE BIT(2)
  49. #define SSPSR_RFF BIT(3)
  50. #define SSPSR_BSY BIT(4)
  51. #define SSPCPSR 0x0010
  52. #define SSPIIR 0x0014
  53. #define SSPIIR_RIS BIT(0)
  54. #define SSPIIR_TIS BIT(1)
  55. #define SSPIIR_RORIS BIT(2)
  56. #define SSPICR SSPIIR
  57. /* timeout in milliseconds */
  58. #define SPI_TIMEOUT 5
  59. /* maximum depth of RX/TX FIFO */
  60. #define SPI_FIFO_SIZE 8
  61. /**
  62. * struct ep93xx_spi - EP93xx SPI controller structure
  63. * @lock: spinlock that protects concurrent accesses to fields @running,
  64. * @current_msg and @msg_queue
  65. * @pdev: pointer to platform device
  66. * @clk: clock for the controller
  67. * @regs_base: pointer to ioremap()'d registers
  68. * @sspdr_phys: physical address of the SSPDR register
  69. * @irq: IRQ number used by the driver
  70. * @min_rate: minimum clock rate (in Hz) supported by the controller
  71. * @max_rate: maximum clock rate (in Hz) supported by the controller
  72. * @running: is the queue running
  73. * @wq: workqueue used by the driver
  74. * @msg_work: work that is queued for the driver
  75. * @wait: wait here until given transfer is completed
  76. * @msg_queue: queue for the messages
  77. * @current_msg: message that is currently processed (or %NULL if none)
  78. * @tx: current byte in transfer to transmit
  79. * @rx: current byte in transfer to receive
  80. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  81. * frame decreases this level and sending one frame increases it.
  82. * @dma_rx: RX DMA channel
  83. * @dma_tx: TX DMA channel
  84. * @dma_rx_data: RX parameters passed to the DMA engine
  85. * @dma_tx_data: TX parameters passed to the DMA engine
  86. * @rx_sgt: sg table for RX transfers
  87. * @tx_sgt: sg table for TX transfers
  88. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  89. * the client
  90. *
  91. * This structure holds EP93xx SPI controller specific information. When
  92. * @running is %true, driver accepts transfer requests from protocol drivers.
  93. * @current_msg is used to hold pointer to the message that is currently
  94. * processed. If @current_msg is %NULL, it means that no processing is going
  95. * on.
  96. *
  97. * Most of the fields are only written once and they can be accessed without
  98. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  99. * @running, and @msg_queue.
  100. */
  101. struct ep93xx_spi {
  102. spinlock_t lock;
  103. const struct platform_device *pdev;
  104. struct clk *clk;
  105. void __iomem *regs_base;
  106. unsigned long sspdr_phys;
  107. int irq;
  108. unsigned long min_rate;
  109. unsigned long max_rate;
  110. bool running;
  111. struct workqueue_struct *wq;
  112. struct work_struct msg_work;
  113. struct completion wait;
  114. struct list_head msg_queue;
  115. struct spi_message *current_msg;
  116. size_t tx;
  117. size_t rx;
  118. size_t fifo_level;
  119. struct dma_chan *dma_rx;
  120. struct dma_chan *dma_tx;
  121. struct ep93xx_dma_data dma_rx_data;
  122. struct ep93xx_dma_data dma_tx_data;
  123. struct sg_table rx_sgt;
  124. struct sg_table tx_sgt;
  125. void *zeropage;
  126. };
  127. /**
  128. * struct ep93xx_spi_chip - SPI device hardware settings
  129. * @spi: back pointer to the SPI device
  130. * @rate: max rate in hz this chip supports
  131. * @div_cpsr: cpsr (pre-scaler) divider
  132. * @div_scr: scr divider
  133. * @dss: bits per word (4 - 16 bits)
  134. * @ops: private chip operations
  135. *
  136. * This structure is used to store hardware register specific settings for each
  137. * SPI device. Settings are written to hardware by function
  138. * ep93xx_spi_chip_setup().
  139. */
  140. struct ep93xx_spi_chip {
  141. const struct spi_device *spi;
  142. unsigned long rate;
  143. u8 div_cpsr;
  144. u8 div_scr;
  145. u8 dss;
  146. struct ep93xx_spi_chip_ops *ops;
  147. };
  148. /* converts bits per word to CR0.DSS value */
  149. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  150. static inline void
  151. ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
  152. {
  153. __raw_writeb(value, espi->regs_base + reg);
  154. }
  155. static inline u8
  156. ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  157. {
  158. return __raw_readb(spi->regs_base + reg);
  159. }
  160. static inline void
  161. ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
  162. {
  163. __raw_writew(value, espi->regs_base + reg);
  164. }
  165. static inline u16
  166. ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  167. {
  168. return __raw_readw(spi->regs_base + reg);
  169. }
  170. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  171. {
  172. u8 regval;
  173. int err;
  174. err = clk_enable(espi->clk);
  175. if (err)
  176. return err;
  177. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  178. regval |= SSPCR1_SSE;
  179. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  180. return 0;
  181. }
  182. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  183. {
  184. u8 regval;
  185. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  186. regval &= ~SSPCR1_SSE;
  187. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  188. clk_disable(espi->clk);
  189. }
  190. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  191. {
  192. u8 regval;
  193. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  194. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  195. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  196. }
  197. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  198. {
  199. u8 regval;
  200. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  201. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  202. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  203. }
  204. /**
  205. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  206. * @espi: ep93xx SPI controller struct
  207. * @chip: divisors are calculated for this chip
  208. * @rate: desired SPI output clock rate
  209. *
  210. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  211. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  212. * for some reason, divisors cannot be calculated nothing is stored and
  213. * %-EINVAL is returned.
  214. */
  215. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  216. struct ep93xx_spi_chip *chip,
  217. unsigned long rate)
  218. {
  219. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  220. int cpsr, scr;
  221. /*
  222. * Make sure that max value is between values supported by the
  223. * controller. Note that minimum value is already checked in
  224. * ep93xx_spi_transfer().
  225. */
  226. rate = clamp(rate, espi->min_rate, espi->max_rate);
  227. /*
  228. * Calculate divisors so that we can get speed according the
  229. * following formula:
  230. * rate = spi_clock_rate / (cpsr * (1 + scr))
  231. *
  232. * cpsr must be even number and starts from 2, scr can be any number
  233. * between 0 and 255.
  234. */
  235. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  236. for (scr = 0; scr <= 255; scr++) {
  237. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  238. chip->div_scr = (u8)scr;
  239. chip->div_cpsr = (u8)cpsr;
  240. return 0;
  241. }
  242. }
  243. }
  244. return -EINVAL;
  245. }
  246. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  247. {
  248. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  249. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  250. if (chip->ops && chip->ops->cs_control)
  251. chip->ops->cs_control(spi, value);
  252. }
  253. /**
  254. * ep93xx_spi_setup() - setup an SPI device
  255. * @spi: SPI device to setup
  256. *
  257. * This function sets up SPI device mode, speed etc. Can be called multiple
  258. * times for a single device. Returns %0 in case of success, negative error in
  259. * case of failure. When this function returns success, the device is
  260. * deselected.
  261. */
  262. static int ep93xx_spi_setup(struct spi_device *spi)
  263. {
  264. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  265. struct ep93xx_spi_chip *chip;
  266. if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
  267. dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
  268. spi->bits_per_word);
  269. return -EINVAL;
  270. }
  271. chip = spi_get_ctldata(spi);
  272. if (!chip) {
  273. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  274. spi->modalias);
  275. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  276. if (!chip)
  277. return -ENOMEM;
  278. chip->spi = spi;
  279. chip->ops = spi->controller_data;
  280. if (chip->ops && chip->ops->setup) {
  281. int ret = chip->ops->setup(spi);
  282. if (ret) {
  283. kfree(chip);
  284. return ret;
  285. }
  286. }
  287. spi_set_ctldata(spi, chip);
  288. }
  289. if (spi->max_speed_hz != chip->rate) {
  290. int err;
  291. err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
  292. if (err != 0) {
  293. spi_set_ctldata(spi, NULL);
  294. kfree(chip);
  295. return err;
  296. }
  297. chip->rate = spi->max_speed_hz;
  298. }
  299. chip->dss = bits_per_word_to_dss(spi->bits_per_word);
  300. ep93xx_spi_cs_control(spi, false);
  301. return 0;
  302. }
  303. /**
  304. * ep93xx_spi_transfer() - queue message to be transferred
  305. * @spi: target SPI device
  306. * @msg: message to be transferred
  307. *
  308. * This function is called by SPI device drivers when they are going to transfer
  309. * a new message. It simply puts the message in the queue and schedules
  310. * workqueue to perform the actual transfer later on.
  311. *
  312. * Returns %0 on success and negative error in case of failure.
  313. */
  314. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  315. {
  316. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  317. struct spi_transfer *t;
  318. unsigned long flags;
  319. if (!msg || !msg->complete)
  320. return -EINVAL;
  321. /* first validate each transfer */
  322. list_for_each_entry(t, &msg->transfers, transfer_list) {
  323. if (t->bits_per_word) {
  324. if (t->bits_per_word < 4 || t->bits_per_word > 16)
  325. return -EINVAL;
  326. }
  327. if (t->speed_hz && t->speed_hz < espi->min_rate)
  328. return -EINVAL;
  329. }
  330. /*
  331. * Now that we own the message, let's initialize it so that it is
  332. * suitable for us. We use @msg->status to signal whether there was
  333. * error in transfer and @msg->state is used to hold pointer to the
  334. * current transfer (or %NULL if no active current transfer).
  335. */
  336. msg->state = NULL;
  337. msg->status = 0;
  338. msg->actual_length = 0;
  339. spin_lock_irqsave(&espi->lock, flags);
  340. if (!espi->running) {
  341. spin_unlock_irqrestore(&espi->lock, flags);
  342. return -ESHUTDOWN;
  343. }
  344. list_add_tail(&msg->queue, &espi->msg_queue);
  345. queue_work(espi->wq, &espi->msg_work);
  346. spin_unlock_irqrestore(&espi->lock, flags);
  347. return 0;
  348. }
  349. /**
  350. * ep93xx_spi_cleanup() - cleans up master controller specific state
  351. * @spi: SPI device to cleanup
  352. *
  353. * This function releases master controller specific state for given @spi
  354. * device.
  355. */
  356. static void ep93xx_spi_cleanup(struct spi_device *spi)
  357. {
  358. struct ep93xx_spi_chip *chip;
  359. chip = spi_get_ctldata(spi);
  360. if (chip) {
  361. if (chip->ops && chip->ops->cleanup)
  362. chip->ops->cleanup(spi);
  363. spi_set_ctldata(spi, NULL);
  364. kfree(chip);
  365. }
  366. }
  367. /**
  368. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  369. * @espi: ep93xx SPI controller struct
  370. * @chip: chip specific settings
  371. *
  372. * This function sets up the actual hardware registers with settings given in
  373. * @chip. Note that no validation is done so make sure that callers validate
  374. * settings before calling this.
  375. */
  376. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  377. const struct ep93xx_spi_chip *chip)
  378. {
  379. u16 cr0;
  380. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  381. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  382. cr0 |= chip->dss;
  383. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  384. chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
  385. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  386. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  387. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  388. }
  389. static inline int bits_per_word(const struct ep93xx_spi *espi)
  390. {
  391. struct spi_message *msg = espi->current_msg;
  392. struct spi_transfer *t = msg->state;
  393. return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
  394. }
  395. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  396. {
  397. if (bits_per_word(espi) > 8) {
  398. u16 tx_val = 0;
  399. if (t->tx_buf)
  400. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  401. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  402. espi->tx += sizeof(tx_val);
  403. } else {
  404. u8 tx_val = 0;
  405. if (t->tx_buf)
  406. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  407. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  408. espi->tx += sizeof(tx_val);
  409. }
  410. }
  411. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  412. {
  413. if (bits_per_word(espi) > 8) {
  414. u16 rx_val;
  415. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  416. if (t->rx_buf)
  417. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  418. espi->rx += sizeof(rx_val);
  419. } else {
  420. u8 rx_val;
  421. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  422. if (t->rx_buf)
  423. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  424. espi->rx += sizeof(rx_val);
  425. }
  426. }
  427. /**
  428. * ep93xx_spi_read_write() - perform next RX/TX transfer
  429. * @espi: ep93xx SPI controller struct
  430. *
  431. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  432. * called several times, the whole transfer will be completed. Returns
  433. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  434. *
  435. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  436. * full.
  437. */
  438. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  439. {
  440. struct spi_message *msg = espi->current_msg;
  441. struct spi_transfer *t = msg->state;
  442. /* read as long as RX FIFO has frames in it */
  443. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  444. ep93xx_do_read(espi, t);
  445. espi->fifo_level--;
  446. }
  447. /* write as long as TX FIFO has room */
  448. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  449. ep93xx_do_write(espi, t);
  450. espi->fifo_level++;
  451. }
  452. if (espi->rx == t->len)
  453. return 0;
  454. return -EINPROGRESS;
  455. }
  456. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  457. {
  458. /*
  459. * Now everything is set up for the current transfer. We prime the TX
  460. * FIFO, enable interrupts, and wait for the transfer to complete.
  461. */
  462. if (ep93xx_spi_read_write(espi)) {
  463. ep93xx_spi_enable_interrupts(espi);
  464. wait_for_completion(&espi->wait);
  465. }
  466. }
  467. /**
  468. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  469. * @espi: ep93xx SPI controller struct
  470. * @dir: DMA transfer direction
  471. *
  472. * Function configures the DMA, maps the buffer and prepares the DMA
  473. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  474. * in case of failure.
  475. */
  476. static struct dma_async_tx_descriptor *
  477. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
  478. {
  479. struct spi_transfer *t = espi->current_msg->state;
  480. struct dma_async_tx_descriptor *txd;
  481. enum dma_slave_buswidth buswidth;
  482. struct dma_slave_config conf;
  483. struct scatterlist *sg;
  484. struct sg_table *sgt;
  485. struct dma_chan *chan;
  486. const void *buf, *pbuf;
  487. size_t len = t->len;
  488. int i, ret, nents;
  489. if (bits_per_word(espi) > 8)
  490. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  491. else
  492. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  493. memset(&conf, 0, sizeof(conf));
  494. conf.direction = dir;
  495. if (dir == DMA_FROM_DEVICE) {
  496. chan = espi->dma_rx;
  497. buf = t->rx_buf;
  498. sgt = &espi->rx_sgt;
  499. conf.src_addr = espi->sspdr_phys;
  500. conf.src_addr_width = buswidth;
  501. } else {
  502. chan = espi->dma_tx;
  503. buf = t->tx_buf;
  504. sgt = &espi->tx_sgt;
  505. conf.dst_addr = espi->sspdr_phys;
  506. conf.dst_addr_width = buswidth;
  507. }
  508. ret = dmaengine_slave_config(chan, &conf);
  509. if (ret)
  510. return ERR_PTR(ret);
  511. /*
  512. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  513. * because we are using @espi->zeropage to provide a zero RX buffer
  514. * for the TX transfers and we have only allocated one page for that.
  515. *
  516. * For performance reasons we allocate a new sg_table only when
  517. * needed. Otherwise we will re-use the current one. Eventually the
  518. * last sg_table is released in ep93xx_spi_release_dma().
  519. */
  520. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  521. if (nents != sgt->nents) {
  522. sg_free_table(sgt);
  523. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  524. if (ret)
  525. return ERR_PTR(ret);
  526. }
  527. pbuf = buf;
  528. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  529. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  530. if (buf) {
  531. sg_set_page(sg, virt_to_page(pbuf), bytes,
  532. offset_in_page(pbuf));
  533. } else {
  534. sg_set_page(sg, virt_to_page(espi->zeropage),
  535. bytes, 0);
  536. }
  537. pbuf += bytes;
  538. len -= bytes;
  539. }
  540. if (WARN_ON(len)) {
  541. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  542. return ERR_PTR(-EINVAL);
  543. }
  544. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  545. if (!nents)
  546. return ERR_PTR(-ENOMEM);
  547. txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
  548. dir, DMA_CTRL_ACK);
  549. if (!txd) {
  550. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  551. return ERR_PTR(-ENOMEM);
  552. }
  553. return txd;
  554. }
  555. /**
  556. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  557. * @espi: ep93xx SPI controller struct
  558. * @dir: DMA transfer direction
  559. *
  560. * Function finishes with the DMA transfer. After this, the DMA buffer is
  561. * unmapped.
  562. */
  563. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  564. enum dma_data_direction dir)
  565. {
  566. struct dma_chan *chan;
  567. struct sg_table *sgt;
  568. if (dir == DMA_FROM_DEVICE) {
  569. chan = espi->dma_rx;
  570. sgt = &espi->rx_sgt;
  571. } else {
  572. chan = espi->dma_tx;
  573. sgt = &espi->tx_sgt;
  574. }
  575. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  576. }
  577. static void ep93xx_spi_dma_callback(void *callback_param)
  578. {
  579. complete(callback_param);
  580. }
  581. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  582. {
  583. struct spi_message *msg = espi->current_msg;
  584. struct dma_async_tx_descriptor *rxd, *txd;
  585. rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
  586. if (IS_ERR(rxd)) {
  587. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  588. msg->status = PTR_ERR(rxd);
  589. return;
  590. }
  591. txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
  592. if (IS_ERR(txd)) {
  593. ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
  594. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  595. msg->status = PTR_ERR(txd);
  596. return;
  597. }
  598. /* We are ready when RX is done */
  599. rxd->callback = ep93xx_spi_dma_callback;
  600. rxd->callback_param = &espi->wait;
  601. /* Now submit both descriptors and wait while they finish */
  602. dmaengine_submit(rxd);
  603. dmaengine_submit(txd);
  604. dma_async_issue_pending(espi->dma_rx);
  605. dma_async_issue_pending(espi->dma_tx);
  606. wait_for_completion(&espi->wait);
  607. ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
  608. ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
  609. }
  610. /**
  611. * ep93xx_spi_process_transfer() - processes one SPI transfer
  612. * @espi: ep93xx SPI controller struct
  613. * @msg: current message
  614. * @t: transfer to process
  615. *
  616. * This function processes one SPI transfer given in @t. Function waits until
  617. * transfer is complete (may sleep) and updates @msg->status based on whether
  618. * transfer was successfully processed or not.
  619. */
  620. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  621. struct spi_message *msg,
  622. struct spi_transfer *t)
  623. {
  624. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  625. msg->state = t;
  626. /*
  627. * Handle any transfer specific settings if needed. We use
  628. * temporary chip settings here and restore original later when
  629. * the transfer is finished.
  630. */
  631. if (t->speed_hz || t->bits_per_word) {
  632. struct ep93xx_spi_chip tmp_chip = *chip;
  633. if (t->speed_hz) {
  634. int err;
  635. err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
  636. t->speed_hz);
  637. if (err) {
  638. dev_err(&espi->pdev->dev,
  639. "failed to adjust speed\n");
  640. msg->status = err;
  641. return;
  642. }
  643. }
  644. if (t->bits_per_word)
  645. tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
  646. /*
  647. * Set up temporary new hw settings for this transfer.
  648. */
  649. ep93xx_spi_chip_setup(espi, &tmp_chip);
  650. }
  651. espi->rx = 0;
  652. espi->tx = 0;
  653. /*
  654. * There is no point of setting up DMA for the transfers which will
  655. * fit into the FIFO and can be transferred with a single interrupt.
  656. * So in these cases we will be using PIO and don't bother for DMA.
  657. */
  658. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  659. ep93xx_spi_dma_transfer(espi);
  660. else
  661. ep93xx_spi_pio_transfer(espi);
  662. /*
  663. * In case of error during transmit, we bail out from processing
  664. * the message.
  665. */
  666. if (msg->status)
  667. return;
  668. msg->actual_length += t->len;
  669. /*
  670. * After this transfer is finished, perform any possible
  671. * post-transfer actions requested by the protocol driver.
  672. */
  673. if (t->delay_usecs) {
  674. set_current_state(TASK_UNINTERRUPTIBLE);
  675. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  676. }
  677. if (t->cs_change) {
  678. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  679. /*
  680. * In case protocol driver is asking us to drop the
  681. * chipselect briefly, we let the scheduler to handle
  682. * any "delay" here.
  683. */
  684. ep93xx_spi_cs_control(msg->spi, false);
  685. cond_resched();
  686. ep93xx_spi_cs_control(msg->spi, true);
  687. }
  688. }
  689. if (t->speed_hz || t->bits_per_word)
  690. ep93xx_spi_chip_setup(espi, chip);
  691. }
  692. /*
  693. * ep93xx_spi_process_message() - process one SPI message
  694. * @espi: ep93xx SPI controller struct
  695. * @msg: message to process
  696. *
  697. * This function processes a single SPI message. We go through all transfers in
  698. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  699. * asserted during the whole message (unless per transfer cs_change is set).
  700. *
  701. * @msg->status contains %0 in case of success or negative error code in case of
  702. * failure.
  703. */
  704. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  705. struct spi_message *msg)
  706. {
  707. unsigned long timeout;
  708. struct spi_transfer *t;
  709. int err;
  710. /*
  711. * Enable the SPI controller and its clock.
  712. */
  713. err = ep93xx_spi_enable(espi);
  714. if (err) {
  715. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  716. msg->status = err;
  717. return;
  718. }
  719. /*
  720. * Just to be sure: flush any data from RX FIFO.
  721. */
  722. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  723. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  724. if (time_after(jiffies, timeout)) {
  725. dev_warn(&espi->pdev->dev,
  726. "timeout while flushing RX FIFO\n");
  727. msg->status = -ETIMEDOUT;
  728. return;
  729. }
  730. ep93xx_spi_read_u16(espi, SSPDR);
  731. }
  732. /*
  733. * We explicitly handle FIFO level. This way we don't have to check TX
  734. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  735. */
  736. espi->fifo_level = 0;
  737. /*
  738. * Update SPI controller registers according to spi device and assert
  739. * the chipselect.
  740. */
  741. ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
  742. ep93xx_spi_cs_control(msg->spi, true);
  743. list_for_each_entry(t, &msg->transfers, transfer_list) {
  744. ep93xx_spi_process_transfer(espi, msg, t);
  745. if (msg->status)
  746. break;
  747. }
  748. /*
  749. * Now the whole message is transferred (or failed for some reason). We
  750. * deselect the device and disable the SPI controller.
  751. */
  752. ep93xx_spi_cs_control(msg->spi, false);
  753. ep93xx_spi_disable(espi);
  754. }
  755. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  756. /**
  757. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  758. * @work: work struct
  759. *
  760. * Workqueue worker function. This function is called when there are new
  761. * SPI messages to be processed. Message is taken out from the queue and then
  762. * passed to ep93xx_spi_process_message().
  763. *
  764. * After message is transferred, protocol driver is notified by calling
  765. * @msg->complete(). In case of error, @msg->status is set to negative error
  766. * number, otherwise it contains zero (and @msg->actual_length is updated).
  767. */
  768. static void ep93xx_spi_work(struct work_struct *work)
  769. {
  770. struct ep93xx_spi *espi = work_to_espi(work);
  771. struct spi_message *msg;
  772. spin_lock_irq(&espi->lock);
  773. if (!espi->running || espi->current_msg ||
  774. list_empty(&espi->msg_queue)) {
  775. spin_unlock_irq(&espi->lock);
  776. return;
  777. }
  778. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  779. list_del_init(&msg->queue);
  780. espi->current_msg = msg;
  781. spin_unlock_irq(&espi->lock);
  782. ep93xx_spi_process_message(espi, msg);
  783. /*
  784. * Update the current message and re-schedule ourselves if there are
  785. * more messages in the queue.
  786. */
  787. spin_lock_irq(&espi->lock);
  788. espi->current_msg = NULL;
  789. if (espi->running && !list_empty(&espi->msg_queue))
  790. queue_work(espi->wq, &espi->msg_work);
  791. spin_unlock_irq(&espi->lock);
  792. /* notify the protocol driver that we are done with this message */
  793. msg->complete(msg->context);
  794. }
  795. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  796. {
  797. struct ep93xx_spi *espi = dev_id;
  798. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  799. /*
  800. * If we got ROR (receive overrun) interrupt we know that something is
  801. * wrong. Just abort the message.
  802. */
  803. if (unlikely(irq_status & SSPIIR_RORIS)) {
  804. /* clear the overrun interrupt */
  805. ep93xx_spi_write_u8(espi, SSPICR, 0);
  806. dev_warn(&espi->pdev->dev,
  807. "receive overrun, aborting the message\n");
  808. espi->current_msg->status = -EIO;
  809. } else {
  810. /*
  811. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  812. * simply execute next data transfer.
  813. */
  814. if (ep93xx_spi_read_write(espi)) {
  815. /*
  816. * In normal case, there still is some processing left
  817. * for current transfer. Let's wait for the next
  818. * interrupt then.
  819. */
  820. return IRQ_HANDLED;
  821. }
  822. }
  823. /*
  824. * Current transfer is finished, either with error or with success. In
  825. * any case we disable interrupts and notify the worker to handle
  826. * any post-processing of the message.
  827. */
  828. ep93xx_spi_disable_interrupts(espi);
  829. complete(&espi->wait);
  830. return IRQ_HANDLED;
  831. }
  832. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  833. {
  834. if (ep93xx_dma_chan_is_m2p(chan))
  835. return false;
  836. chan->private = filter_param;
  837. return true;
  838. }
  839. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  840. {
  841. dma_cap_mask_t mask;
  842. int ret;
  843. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  844. if (!espi->zeropage)
  845. return -ENOMEM;
  846. dma_cap_zero(mask);
  847. dma_cap_set(DMA_SLAVE, mask);
  848. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  849. espi->dma_rx_data.direction = DMA_FROM_DEVICE;
  850. espi->dma_rx_data.name = "ep93xx-spi-rx";
  851. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  852. &espi->dma_rx_data);
  853. if (!espi->dma_rx) {
  854. ret = -ENODEV;
  855. goto fail_free_page;
  856. }
  857. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  858. espi->dma_tx_data.direction = DMA_TO_DEVICE;
  859. espi->dma_tx_data.name = "ep93xx-spi-tx";
  860. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  861. &espi->dma_tx_data);
  862. if (!espi->dma_tx) {
  863. ret = -ENODEV;
  864. goto fail_release_rx;
  865. }
  866. return 0;
  867. fail_release_rx:
  868. dma_release_channel(espi->dma_rx);
  869. espi->dma_rx = NULL;
  870. fail_free_page:
  871. free_page((unsigned long)espi->zeropage);
  872. return ret;
  873. }
  874. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  875. {
  876. if (espi->dma_rx) {
  877. dma_release_channel(espi->dma_rx);
  878. sg_free_table(&espi->rx_sgt);
  879. }
  880. if (espi->dma_tx) {
  881. dma_release_channel(espi->dma_tx);
  882. sg_free_table(&espi->tx_sgt);
  883. }
  884. if (espi->zeropage)
  885. free_page((unsigned long)espi->zeropage);
  886. }
  887. static int __init ep93xx_spi_probe(struct platform_device *pdev)
  888. {
  889. struct spi_master *master;
  890. struct ep93xx_spi_info *info;
  891. struct ep93xx_spi *espi;
  892. struct resource *res;
  893. int error;
  894. info = pdev->dev.platform_data;
  895. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  896. if (!master) {
  897. dev_err(&pdev->dev, "failed to allocate spi master\n");
  898. return -ENOMEM;
  899. }
  900. master->setup = ep93xx_spi_setup;
  901. master->transfer = ep93xx_spi_transfer;
  902. master->cleanup = ep93xx_spi_cleanup;
  903. master->bus_num = pdev->id;
  904. master->num_chipselect = info->num_chipselect;
  905. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  906. platform_set_drvdata(pdev, master);
  907. espi = spi_master_get_devdata(master);
  908. espi->clk = clk_get(&pdev->dev, NULL);
  909. if (IS_ERR(espi->clk)) {
  910. dev_err(&pdev->dev, "unable to get spi clock\n");
  911. error = PTR_ERR(espi->clk);
  912. goto fail_release_master;
  913. }
  914. spin_lock_init(&espi->lock);
  915. init_completion(&espi->wait);
  916. /*
  917. * Calculate maximum and minimum supported clock rates
  918. * for the controller.
  919. */
  920. espi->max_rate = clk_get_rate(espi->clk) / 2;
  921. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  922. espi->pdev = pdev;
  923. espi->irq = platform_get_irq(pdev, 0);
  924. if (espi->irq < 0) {
  925. error = -EBUSY;
  926. dev_err(&pdev->dev, "failed to get irq resources\n");
  927. goto fail_put_clock;
  928. }
  929. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  930. if (!res) {
  931. dev_err(&pdev->dev, "unable to get iomem resource\n");
  932. error = -ENODEV;
  933. goto fail_put_clock;
  934. }
  935. res = request_mem_region(res->start, resource_size(res), pdev->name);
  936. if (!res) {
  937. dev_err(&pdev->dev, "unable to request iomem resources\n");
  938. error = -EBUSY;
  939. goto fail_put_clock;
  940. }
  941. espi->sspdr_phys = res->start + SSPDR;
  942. espi->regs_base = ioremap(res->start, resource_size(res));
  943. if (!espi->regs_base) {
  944. dev_err(&pdev->dev, "failed to map resources\n");
  945. error = -ENODEV;
  946. goto fail_free_mem;
  947. }
  948. error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
  949. "ep93xx-spi", espi);
  950. if (error) {
  951. dev_err(&pdev->dev, "failed to request irq\n");
  952. goto fail_unmap_regs;
  953. }
  954. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  955. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  956. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  957. if (!espi->wq) {
  958. dev_err(&pdev->dev, "unable to create workqueue\n");
  959. goto fail_free_dma;
  960. }
  961. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  962. INIT_LIST_HEAD(&espi->msg_queue);
  963. espi->running = true;
  964. /* make sure that the hardware is disabled */
  965. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  966. error = spi_register_master(master);
  967. if (error) {
  968. dev_err(&pdev->dev, "failed to register SPI master\n");
  969. goto fail_free_queue;
  970. }
  971. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  972. (unsigned long)res->start, espi->irq);
  973. return 0;
  974. fail_free_queue:
  975. destroy_workqueue(espi->wq);
  976. fail_free_dma:
  977. ep93xx_spi_release_dma(espi);
  978. free_irq(espi->irq, espi);
  979. fail_unmap_regs:
  980. iounmap(espi->regs_base);
  981. fail_free_mem:
  982. release_mem_region(res->start, resource_size(res));
  983. fail_put_clock:
  984. clk_put(espi->clk);
  985. fail_release_master:
  986. spi_master_put(master);
  987. platform_set_drvdata(pdev, NULL);
  988. return error;
  989. }
  990. static int __exit ep93xx_spi_remove(struct platform_device *pdev)
  991. {
  992. struct spi_master *master = platform_get_drvdata(pdev);
  993. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  994. struct resource *res;
  995. spin_lock_irq(&espi->lock);
  996. espi->running = false;
  997. spin_unlock_irq(&espi->lock);
  998. destroy_workqueue(espi->wq);
  999. /*
  1000. * Complete remaining messages with %-ESHUTDOWN status.
  1001. */
  1002. spin_lock_irq(&espi->lock);
  1003. while (!list_empty(&espi->msg_queue)) {
  1004. struct spi_message *msg;
  1005. msg = list_first_entry(&espi->msg_queue,
  1006. struct spi_message, queue);
  1007. list_del_init(&msg->queue);
  1008. msg->status = -ESHUTDOWN;
  1009. spin_unlock_irq(&espi->lock);
  1010. msg->complete(msg->context);
  1011. spin_lock_irq(&espi->lock);
  1012. }
  1013. spin_unlock_irq(&espi->lock);
  1014. ep93xx_spi_release_dma(espi);
  1015. free_irq(espi->irq, espi);
  1016. iounmap(espi->regs_base);
  1017. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1018. release_mem_region(res->start, resource_size(res));
  1019. clk_put(espi->clk);
  1020. platform_set_drvdata(pdev, NULL);
  1021. spi_unregister_master(master);
  1022. return 0;
  1023. }
  1024. static struct platform_driver ep93xx_spi_driver = {
  1025. .driver = {
  1026. .name = "ep93xx-spi",
  1027. .owner = THIS_MODULE,
  1028. },
  1029. .remove = __exit_p(ep93xx_spi_remove),
  1030. };
  1031. static int __init ep93xx_spi_init(void)
  1032. {
  1033. return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe);
  1034. }
  1035. module_init(ep93xx_spi_init);
  1036. static void __exit ep93xx_spi_exit(void)
  1037. {
  1038. platform_driver_unregister(&ep93xx_spi_driver);
  1039. }
  1040. module_exit(ep93xx_spi_exit);
  1041. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  1042. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  1043. MODULE_LICENSE("GPL");
  1044. MODULE_ALIAS("platform:ep93xx-spi");