spi-ep93xx.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/sched.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/spi/spi.h>
  32. #include <mach/dma.h>
  33. #include <mach/ep93xx_spi.h>
  34. #define SSPCR0 0x0000
  35. #define SSPCR0_MODE_SHIFT 6
  36. #define SSPCR0_SCR_SHIFT 8
  37. #define SSPCR1 0x0004
  38. #define SSPCR1_RIE BIT(0)
  39. #define SSPCR1_TIE BIT(1)
  40. #define SSPCR1_RORIE BIT(2)
  41. #define SSPCR1_LBM BIT(3)
  42. #define SSPCR1_SSE BIT(4)
  43. #define SSPCR1_MS BIT(5)
  44. #define SSPCR1_SOD BIT(6)
  45. #define SSPDR 0x0008
  46. #define SSPSR 0x000c
  47. #define SSPSR_TFE BIT(0)
  48. #define SSPSR_TNF BIT(1)
  49. #define SSPSR_RNE BIT(2)
  50. #define SSPSR_RFF BIT(3)
  51. #define SSPSR_BSY BIT(4)
  52. #define SSPCPSR 0x0010
  53. #define SSPIIR 0x0014
  54. #define SSPIIR_RIS BIT(0)
  55. #define SSPIIR_TIS BIT(1)
  56. #define SSPIIR_RORIS BIT(2)
  57. #define SSPICR SSPIIR
  58. /* timeout in milliseconds */
  59. #define SPI_TIMEOUT 5
  60. /* maximum depth of RX/TX FIFO */
  61. #define SPI_FIFO_SIZE 8
  62. /**
  63. * struct ep93xx_spi - EP93xx SPI controller structure
  64. * @lock: spinlock that protects concurrent accesses to fields @running,
  65. * @current_msg and @msg_queue
  66. * @pdev: pointer to platform device
  67. * @clk: clock for the controller
  68. * @regs_base: pointer to ioremap()'d registers
  69. * @sspdr_phys: physical address of the SSPDR register
  70. * @irq: IRQ number used by the driver
  71. * @min_rate: minimum clock rate (in Hz) supported by the controller
  72. * @max_rate: maximum clock rate (in Hz) supported by the controller
  73. * @running: is the queue running
  74. * @wq: workqueue used by the driver
  75. * @msg_work: work that is queued for the driver
  76. * @wait: wait here until given transfer is completed
  77. * @msg_queue: queue for the messages
  78. * @current_msg: message that is currently processed (or %NULL if none)
  79. * @tx: current byte in transfer to transmit
  80. * @rx: current byte in transfer to receive
  81. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  82. * frame decreases this level and sending one frame increases it.
  83. * @dma_rx: RX DMA channel
  84. * @dma_tx: TX DMA channel
  85. * @dma_rx_data: RX parameters passed to the DMA engine
  86. * @dma_tx_data: TX parameters passed to the DMA engine
  87. * @rx_sgt: sg table for RX transfers
  88. * @tx_sgt: sg table for TX transfers
  89. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  90. * the client
  91. *
  92. * This structure holds EP93xx SPI controller specific information. When
  93. * @running is %true, driver accepts transfer requests from protocol drivers.
  94. * @current_msg is used to hold pointer to the message that is currently
  95. * processed. If @current_msg is %NULL, it means that no processing is going
  96. * on.
  97. *
  98. * Most of the fields are only written once and they can be accessed without
  99. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  100. * @running, and @msg_queue.
  101. */
  102. struct ep93xx_spi {
  103. spinlock_t lock;
  104. const struct platform_device *pdev;
  105. struct clk *clk;
  106. void __iomem *regs_base;
  107. unsigned long sspdr_phys;
  108. int irq;
  109. unsigned long min_rate;
  110. unsigned long max_rate;
  111. bool running;
  112. struct workqueue_struct *wq;
  113. struct work_struct msg_work;
  114. struct completion wait;
  115. struct list_head msg_queue;
  116. struct spi_message *current_msg;
  117. size_t tx;
  118. size_t rx;
  119. size_t fifo_level;
  120. struct dma_chan *dma_rx;
  121. struct dma_chan *dma_tx;
  122. struct ep93xx_dma_data dma_rx_data;
  123. struct ep93xx_dma_data dma_tx_data;
  124. struct sg_table rx_sgt;
  125. struct sg_table tx_sgt;
  126. void *zeropage;
  127. };
  128. /**
  129. * struct ep93xx_spi_chip - SPI device hardware settings
  130. * @spi: back pointer to the SPI device
  131. * @rate: max rate in hz this chip supports
  132. * @div_cpsr: cpsr (pre-scaler) divider
  133. * @div_scr: scr divider
  134. * @dss: bits per word (4 - 16 bits)
  135. * @ops: private chip operations
  136. *
  137. * This structure is used to store hardware register specific settings for each
  138. * SPI device. Settings are written to hardware by function
  139. * ep93xx_spi_chip_setup().
  140. */
  141. struct ep93xx_spi_chip {
  142. const struct spi_device *spi;
  143. unsigned long rate;
  144. u8 div_cpsr;
  145. u8 div_scr;
  146. u8 dss;
  147. struct ep93xx_spi_chip_ops *ops;
  148. };
  149. /* converts bits per word to CR0.DSS value */
  150. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  151. static inline void
  152. ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
  153. {
  154. __raw_writeb(value, espi->regs_base + reg);
  155. }
  156. static inline u8
  157. ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  158. {
  159. return __raw_readb(spi->regs_base + reg);
  160. }
  161. static inline void
  162. ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
  163. {
  164. __raw_writew(value, espi->regs_base + reg);
  165. }
  166. static inline u16
  167. ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  168. {
  169. return __raw_readw(spi->regs_base + reg);
  170. }
  171. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  172. {
  173. u8 regval;
  174. int err;
  175. err = clk_enable(espi->clk);
  176. if (err)
  177. return err;
  178. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  179. regval |= SSPCR1_SSE;
  180. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  181. return 0;
  182. }
  183. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  184. {
  185. u8 regval;
  186. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  187. regval &= ~SSPCR1_SSE;
  188. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  189. clk_disable(espi->clk);
  190. }
  191. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  192. {
  193. u8 regval;
  194. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  195. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  196. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  197. }
  198. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  199. {
  200. u8 regval;
  201. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  202. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  203. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  204. }
  205. /**
  206. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  207. * @espi: ep93xx SPI controller struct
  208. * @chip: divisors are calculated for this chip
  209. * @rate: desired SPI output clock rate
  210. *
  211. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  212. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  213. * for some reason, divisors cannot be calculated nothing is stored and
  214. * %-EINVAL is returned.
  215. */
  216. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  217. struct ep93xx_spi_chip *chip,
  218. unsigned long rate)
  219. {
  220. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  221. int cpsr, scr;
  222. /*
  223. * Make sure that max value is between values supported by the
  224. * controller. Note that minimum value is already checked in
  225. * ep93xx_spi_transfer().
  226. */
  227. rate = clamp(rate, espi->min_rate, espi->max_rate);
  228. /*
  229. * Calculate divisors so that we can get speed according the
  230. * following formula:
  231. * rate = spi_clock_rate / (cpsr * (1 + scr))
  232. *
  233. * cpsr must be even number and starts from 2, scr can be any number
  234. * between 0 and 255.
  235. */
  236. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  237. for (scr = 0; scr <= 255; scr++) {
  238. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  239. chip->div_scr = (u8)scr;
  240. chip->div_cpsr = (u8)cpsr;
  241. return 0;
  242. }
  243. }
  244. }
  245. return -EINVAL;
  246. }
  247. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  248. {
  249. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  250. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  251. if (chip->ops && chip->ops->cs_control)
  252. chip->ops->cs_control(spi, value);
  253. }
  254. /**
  255. * ep93xx_spi_setup() - setup an SPI device
  256. * @spi: SPI device to setup
  257. *
  258. * This function sets up SPI device mode, speed etc. Can be called multiple
  259. * times for a single device. Returns %0 in case of success, negative error in
  260. * case of failure. When this function returns success, the device is
  261. * deselected.
  262. */
  263. static int ep93xx_spi_setup(struct spi_device *spi)
  264. {
  265. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  266. struct ep93xx_spi_chip *chip;
  267. if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
  268. dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
  269. spi->bits_per_word);
  270. return -EINVAL;
  271. }
  272. chip = spi_get_ctldata(spi);
  273. if (!chip) {
  274. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  275. spi->modalias);
  276. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  277. if (!chip)
  278. return -ENOMEM;
  279. chip->spi = spi;
  280. chip->ops = spi->controller_data;
  281. if (chip->ops && chip->ops->setup) {
  282. int ret = chip->ops->setup(spi);
  283. if (ret) {
  284. kfree(chip);
  285. return ret;
  286. }
  287. }
  288. spi_set_ctldata(spi, chip);
  289. }
  290. if (spi->max_speed_hz != chip->rate) {
  291. int err;
  292. err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
  293. if (err != 0) {
  294. spi_set_ctldata(spi, NULL);
  295. kfree(chip);
  296. return err;
  297. }
  298. chip->rate = spi->max_speed_hz;
  299. }
  300. chip->dss = bits_per_word_to_dss(spi->bits_per_word);
  301. ep93xx_spi_cs_control(spi, false);
  302. return 0;
  303. }
  304. /**
  305. * ep93xx_spi_transfer() - queue message to be transferred
  306. * @spi: target SPI device
  307. * @msg: message to be transferred
  308. *
  309. * This function is called by SPI device drivers when they are going to transfer
  310. * a new message. It simply puts the message in the queue and schedules
  311. * workqueue to perform the actual transfer later on.
  312. *
  313. * Returns %0 on success and negative error in case of failure.
  314. */
  315. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  316. {
  317. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  318. struct spi_transfer *t;
  319. unsigned long flags;
  320. if (!msg || !msg->complete)
  321. return -EINVAL;
  322. /* first validate each transfer */
  323. list_for_each_entry(t, &msg->transfers, transfer_list) {
  324. if (t->bits_per_word) {
  325. if (t->bits_per_word < 4 || t->bits_per_word > 16)
  326. return -EINVAL;
  327. }
  328. if (t->speed_hz && t->speed_hz < espi->min_rate)
  329. return -EINVAL;
  330. }
  331. /*
  332. * Now that we own the message, let's initialize it so that it is
  333. * suitable for us. We use @msg->status to signal whether there was
  334. * error in transfer and @msg->state is used to hold pointer to the
  335. * current transfer (or %NULL if no active current transfer).
  336. */
  337. msg->state = NULL;
  338. msg->status = 0;
  339. msg->actual_length = 0;
  340. spin_lock_irqsave(&espi->lock, flags);
  341. if (!espi->running) {
  342. spin_unlock_irqrestore(&espi->lock, flags);
  343. return -ESHUTDOWN;
  344. }
  345. list_add_tail(&msg->queue, &espi->msg_queue);
  346. queue_work(espi->wq, &espi->msg_work);
  347. spin_unlock_irqrestore(&espi->lock, flags);
  348. return 0;
  349. }
  350. /**
  351. * ep93xx_spi_cleanup() - cleans up master controller specific state
  352. * @spi: SPI device to cleanup
  353. *
  354. * This function releases master controller specific state for given @spi
  355. * device.
  356. */
  357. static void ep93xx_spi_cleanup(struct spi_device *spi)
  358. {
  359. struct ep93xx_spi_chip *chip;
  360. chip = spi_get_ctldata(spi);
  361. if (chip) {
  362. if (chip->ops && chip->ops->cleanup)
  363. chip->ops->cleanup(spi);
  364. spi_set_ctldata(spi, NULL);
  365. kfree(chip);
  366. }
  367. }
  368. /**
  369. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  370. * @espi: ep93xx SPI controller struct
  371. * @chip: chip specific settings
  372. *
  373. * This function sets up the actual hardware registers with settings given in
  374. * @chip. Note that no validation is done so make sure that callers validate
  375. * settings before calling this.
  376. */
  377. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  378. const struct ep93xx_spi_chip *chip)
  379. {
  380. u16 cr0;
  381. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  382. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  383. cr0 |= chip->dss;
  384. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  385. chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
  386. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  387. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  388. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  389. }
  390. static inline int bits_per_word(const struct ep93xx_spi *espi)
  391. {
  392. struct spi_message *msg = espi->current_msg;
  393. struct spi_transfer *t = msg->state;
  394. return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
  395. }
  396. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  397. {
  398. if (bits_per_word(espi) > 8) {
  399. u16 tx_val = 0;
  400. if (t->tx_buf)
  401. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  402. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  403. espi->tx += sizeof(tx_val);
  404. } else {
  405. u8 tx_val = 0;
  406. if (t->tx_buf)
  407. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  408. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  409. espi->tx += sizeof(tx_val);
  410. }
  411. }
  412. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  413. {
  414. if (bits_per_word(espi) > 8) {
  415. u16 rx_val;
  416. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  417. if (t->rx_buf)
  418. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  419. espi->rx += sizeof(rx_val);
  420. } else {
  421. u8 rx_val;
  422. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  423. if (t->rx_buf)
  424. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  425. espi->rx += sizeof(rx_val);
  426. }
  427. }
  428. /**
  429. * ep93xx_spi_read_write() - perform next RX/TX transfer
  430. * @espi: ep93xx SPI controller struct
  431. *
  432. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  433. * called several times, the whole transfer will be completed. Returns
  434. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  435. *
  436. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  437. * full.
  438. */
  439. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  440. {
  441. struct spi_message *msg = espi->current_msg;
  442. struct spi_transfer *t = msg->state;
  443. /* read as long as RX FIFO has frames in it */
  444. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  445. ep93xx_do_read(espi, t);
  446. espi->fifo_level--;
  447. }
  448. /* write as long as TX FIFO has room */
  449. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  450. ep93xx_do_write(espi, t);
  451. espi->fifo_level++;
  452. }
  453. if (espi->rx == t->len)
  454. return 0;
  455. return -EINPROGRESS;
  456. }
  457. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  458. {
  459. /*
  460. * Now everything is set up for the current transfer. We prime the TX
  461. * FIFO, enable interrupts, and wait for the transfer to complete.
  462. */
  463. if (ep93xx_spi_read_write(espi)) {
  464. ep93xx_spi_enable_interrupts(espi);
  465. wait_for_completion(&espi->wait);
  466. }
  467. }
  468. /**
  469. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  470. * @espi: ep93xx SPI controller struct
  471. * @dir: DMA transfer direction
  472. *
  473. * Function configures the DMA, maps the buffer and prepares the DMA
  474. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  475. * in case of failure.
  476. */
  477. static struct dma_async_tx_descriptor *
  478. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
  479. {
  480. struct spi_transfer *t = espi->current_msg->state;
  481. struct dma_async_tx_descriptor *txd;
  482. enum dma_slave_buswidth buswidth;
  483. struct dma_slave_config conf;
  484. struct scatterlist *sg;
  485. struct sg_table *sgt;
  486. struct dma_chan *chan;
  487. const void *buf, *pbuf;
  488. size_t len = t->len;
  489. int i, ret, nents;
  490. if (bits_per_word(espi) > 8)
  491. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  492. else
  493. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  494. memset(&conf, 0, sizeof(conf));
  495. conf.direction = dir;
  496. if (dir == DMA_FROM_DEVICE) {
  497. chan = espi->dma_rx;
  498. buf = t->rx_buf;
  499. sgt = &espi->rx_sgt;
  500. conf.src_addr = espi->sspdr_phys;
  501. conf.src_addr_width = buswidth;
  502. } else {
  503. chan = espi->dma_tx;
  504. buf = t->tx_buf;
  505. sgt = &espi->tx_sgt;
  506. conf.dst_addr = espi->sspdr_phys;
  507. conf.dst_addr_width = buswidth;
  508. }
  509. ret = dmaengine_slave_config(chan, &conf);
  510. if (ret)
  511. return ERR_PTR(ret);
  512. /*
  513. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  514. * because we are using @espi->zeropage to provide a zero RX buffer
  515. * for the TX transfers and we have only allocated one page for that.
  516. *
  517. * For performance reasons we allocate a new sg_table only when
  518. * needed. Otherwise we will re-use the current one. Eventually the
  519. * last sg_table is released in ep93xx_spi_release_dma().
  520. */
  521. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  522. if (nents != sgt->nents) {
  523. sg_free_table(sgt);
  524. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  525. if (ret)
  526. return ERR_PTR(ret);
  527. }
  528. pbuf = buf;
  529. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  530. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  531. if (buf) {
  532. sg_set_page(sg, virt_to_page(pbuf), bytes,
  533. offset_in_page(pbuf));
  534. } else {
  535. sg_set_page(sg, virt_to_page(espi->zeropage),
  536. bytes, 0);
  537. }
  538. pbuf += bytes;
  539. len -= bytes;
  540. }
  541. if (WARN_ON(len)) {
  542. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  543. return ERR_PTR(-EINVAL);
  544. }
  545. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  546. if (!nents)
  547. return ERR_PTR(-ENOMEM);
  548. txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
  549. dir, DMA_CTRL_ACK);
  550. if (!txd) {
  551. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  552. return ERR_PTR(-ENOMEM);
  553. }
  554. return txd;
  555. }
  556. /**
  557. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  558. * @espi: ep93xx SPI controller struct
  559. * @dir: DMA transfer direction
  560. *
  561. * Function finishes with the DMA transfer. After this, the DMA buffer is
  562. * unmapped.
  563. */
  564. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  565. enum dma_data_direction dir)
  566. {
  567. struct dma_chan *chan;
  568. struct sg_table *sgt;
  569. if (dir == DMA_FROM_DEVICE) {
  570. chan = espi->dma_rx;
  571. sgt = &espi->rx_sgt;
  572. } else {
  573. chan = espi->dma_tx;
  574. sgt = &espi->tx_sgt;
  575. }
  576. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  577. }
  578. static void ep93xx_spi_dma_callback(void *callback_param)
  579. {
  580. complete(callback_param);
  581. }
  582. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  583. {
  584. struct spi_message *msg = espi->current_msg;
  585. struct dma_async_tx_descriptor *rxd, *txd;
  586. rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
  587. if (IS_ERR(rxd)) {
  588. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  589. msg->status = PTR_ERR(rxd);
  590. return;
  591. }
  592. txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
  593. if (IS_ERR(txd)) {
  594. ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
  595. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  596. msg->status = PTR_ERR(txd);
  597. return;
  598. }
  599. /* We are ready when RX is done */
  600. rxd->callback = ep93xx_spi_dma_callback;
  601. rxd->callback_param = &espi->wait;
  602. /* Now submit both descriptors and wait while they finish */
  603. dmaengine_submit(rxd);
  604. dmaengine_submit(txd);
  605. dma_async_issue_pending(espi->dma_rx);
  606. dma_async_issue_pending(espi->dma_tx);
  607. wait_for_completion(&espi->wait);
  608. ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
  609. ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
  610. }
  611. /**
  612. * ep93xx_spi_process_transfer() - processes one SPI transfer
  613. * @espi: ep93xx SPI controller struct
  614. * @msg: current message
  615. * @t: transfer to process
  616. *
  617. * This function processes one SPI transfer given in @t. Function waits until
  618. * transfer is complete (may sleep) and updates @msg->status based on whether
  619. * transfer was successfully processed or not.
  620. */
  621. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  622. struct spi_message *msg,
  623. struct spi_transfer *t)
  624. {
  625. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  626. msg->state = t;
  627. /*
  628. * Handle any transfer specific settings if needed. We use
  629. * temporary chip settings here and restore original later when
  630. * the transfer is finished.
  631. */
  632. if (t->speed_hz || t->bits_per_word) {
  633. struct ep93xx_spi_chip tmp_chip = *chip;
  634. if (t->speed_hz) {
  635. int err;
  636. err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
  637. t->speed_hz);
  638. if (err) {
  639. dev_err(&espi->pdev->dev,
  640. "failed to adjust speed\n");
  641. msg->status = err;
  642. return;
  643. }
  644. }
  645. if (t->bits_per_word)
  646. tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
  647. /*
  648. * Set up temporary new hw settings for this transfer.
  649. */
  650. ep93xx_spi_chip_setup(espi, &tmp_chip);
  651. }
  652. espi->rx = 0;
  653. espi->tx = 0;
  654. /*
  655. * There is no point of setting up DMA for the transfers which will
  656. * fit into the FIFO and can be transferred with a single interrupt.
  657. * So in these cases we will be using PIO and don't bother for DMA.
  658. */
  659. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  660. ep93xx_spi_dma_transfer(espi);
  661. else
  662. ep93xx_spi_pio_transfer(espi);
  663. /*
  664. * In case of error during transmit, we bail out from processing
  665. * the message.
  666. */
  667. if (msg->status)
  668. return;
  669. msg->actual_length += t->len;
  670. /*
  671. * After this transfer is finished, perform any possible
  672. * post-transfer actions requested by the protocol driver.
  673. */
  674. if (t->delay_usecs) {
  675. set_current_state(TASK_UNINTERRUPTIBLE);
  676. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  677. }
  678. if (t->cs_change) {
  679. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  680. /*
  681. * In case protocol driver is asking us to drop the
  682. * chipselect briefly, we let the scheduler to handle
  683. * any "delay" here.
  684. */
  685. ep93xx_spi_cs_control(msg->spi, false);
  686. cond_resched();
  687. ep93xx_spi_cs_control(msg->spi, true);
  688. }
  689. }
  690. if (t->speed_hz || t->bits_per_word)
  691. ep93xx_spi_chip_setup(espi, chip);
  692. }
  693. /*
  694. * ep93xx_spi_process_message() - process one SPI message
  695. * @espi: ep93xx SPI controller struct
  696. * @msg: message to process
  697. *
  698. * This function processes a single SPI message. We go through all transfers in
  699. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  700. * asserted during the whole message (unless per transfer cs_change is set).
  701. *
  702. * @msg->status contains %0 in case of success or negative error code in case of
  703. * failure.
  704. */
  705. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  706. struct spi_message *msg)
  707. {
  708. unsigned long timeout;
  709. struct spi_transfer *t;
  710. int err;
  711. /*
  712. * Enable the SPI controller and its clock.
  713. */
  714. err = ep93xx_spi_enable(espi);
  715. if (err) {
  716. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  717. msg->status = err;
  718. return;
  719. }
  720. /*
  721. * Just to be sure: flush any data from RX FIFO.
  722. */
  723. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  724. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  725. if (time_after(jiffies, timeout)) {
  726. dev_warn(&espi->pdev->dev,
  727. "timeout while flushing RX FIFO\n");
  728. msg->status = -ETIMEDOUT;
  729. return;
  730. }
  731. ep93xx_spi_read_u16(espi, SSPDR);
  732. }
  733. /*
  734. * We explicitly handle FIFO level. This way we don't have to check TX
  735. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  736. */
  737. espi->fifo_level = 0;
  738. /*
  739. * Update SPI controller registers according to spi device and assert
  740. * the chipselect.
  741. */
  742. ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
  743. ep93xx_spi_cs_control(msg->spi, true);
  744. list_for_each_entry(t, &msg->transfers, transfer_list) {
  745. ep93xx_spi_process_transfer(espi, msg, t);
  746. if (msg->status)
  747. break;
  748. }
  749. /*
  750. * Now the whole message is transferred (or failed for some reason). We
  751. * deselect the device and disable the SPI controller.
  752. */
  753. ep93xx_spi_cs_control(msg->spi, false);
  754. ep93xx_spi_disable(espi);
  755. }
  756. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  757. /**
  758. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  759. * @work: work struct
  760. *
  761. * Workqueue worker function. This function is called when there are new
  762. * SPI messages to be processed. Message is taken out from the queue and then
  763. * passed to ep93xx_spi_process_message().
  764. *
  765. * After message is transferred, protocol driver is notified by calling
  766. * @msg->complete(). In case of error, @msg->status is set to negative error
  767. * number, otherwise it contains zero (and @msg->actual_length is updated).
  768. */
  769. static void ep93xx_spi_work(struct work_struct *work)
  770. {
  771. struct ep93xx_spi *espi = work_to_espi(work);
  772. struct spi_message *msg;
  773. spin_lock_irq(&espi->lock);
  774. if (!espi->running || espi->current_msg ||
  775. list_empty(&espi->msg_queue)) {
  776. spin_unlock_irq(&espi->lock);
  777. return;
  778. }
  779. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  780. list_del_init(&msg->queue);
  781. espi->current_msg = msg;
  782. spin_unlock_irq(&espi->lock);
  783. ep93xx_spi_process_message(espi, msg);
  784. /*
  785. * Update the current message and re-schedule ourselves if there are
  786. * more messages in the queue.
  787. */
  788. spin_lock_irq(&espi->lock);
  789. espi->current_msg = NULL;
  790. if (espi->running && !list_empty(&espi->msg_queue))
  791. queue_work(espi->wq, &espi->msg_work);
  792. spin_unlock_irq(&espi->lock);
  793. /* notify the protocol driver that we are done with this message */
  794. msg->complete(msg->context);
  795. }
  796. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  797. {
  798. struct ep93xx_spi *espi = dev_id;
  799. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  800. /*
  801. * If we got ROR (receive overrun) interrupt we know that something is
  802. * wrong. Just abort the message.
  803. */
  804. if (unlikely(irq_status & SSPIIR_RORIS)) {
  805. /* clear the overrun interrupt */
  806. ep93xx_spi_write_u8(espi, SSPICR, 0);
  807. dev_warn(&espi->pdev->dev,
  808. "receive overrun, aborting the message\n");
  809. espi->current_msg->status = -EIO;
  810. } else {
  811. /*
  812. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  813. * simply execute next data transfer.
  814. */
  815. if (ep93xx_spi_read_write(espi)) {
  816. /*
  817. * In normal case, there still is some processing left
  818. * for current transfer. Let's wait for the next
  819. * interrupt then.
  820. */
  821. return IRQ_HANDLED;
  822. }
  823. }
  824. /*
  825. * Current transfer is finished, either with error or with success. In
  826. * any case we disable interrupts and notify the worker to handle
  827. * any post-processing of the message.
  828. */
  829. ep93xx_spi_disable_interrupts(espi);
  830. complete(&espi->wait);
  831. return IRQ_HANDLED;
  832. }
  833. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  834. {
  835. if (ep93xx_dma_chan_is_m2p(chan))
  836. return false;
  837. chan->private = filter_param;
  838. return true;
  839. }
  840. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  841. {
  842. dma_cap_mask_t mask;
  843. int ret;
  844. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  845. if (!espi->zeropage)
  846. return -ENOMEM;
  847. dma_cap_zero(mask);
  848. dma_cap_set(DMA_SLAVE, mask);
  849. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  850. espi->dma_rx_data.direction = DMA_FROM_DEVICE;
  851. espi->dma_rx_data.name = "ep93xx-spi-rx";
  852. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  853. &espi->dma_rx_data);
  854. if (!espi->dma_rx) {
  855. ret = -ENODEV;
  856. goto fail_free_page;
  857. }
  858. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  859. espi->dma_tx_data.direction = DMA_TO_DEVICE;
  860. espi->dma_tx_data.name = "ep93xx-spi-tx";
  861. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  862. &espi->dma_tx_data);
  863. if (!espi->dma_tx) {
  864. ret = -ENODEV;
  865. goto fail_release_rx;
  866. }
  867. return 0;
  868. fail_release_rx:
  869. dma_release_channel(espi->dma_rx);
  870. espi->dma_rx = NULL;
  871. fail_free_page:
  872. free_page((unsigned long)espi->zeropage);
  873. return ret;
  874. }
  875. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  876. {
  877. if (espi->dma_rx) {
  878. dma_release_channel(espi->dma_rx);
  879. sg_free_table(&espi->rx_sgt);
  880. }
  881. if (espi->dma_tx) {
  882. dma_release_channel(espi->dma_tx);
  883. sg_free_table(&espi->tx_sgt);
  884. }
  885. if (espi->zeropage)
  886. free_page((unsigned long)espi->zeropage);
  887. }
  888. static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
  889. {
  890. struct spi_master *master;
  891. struct ep93xx_spi_info *info;
  892. struct ep93xx_spi *espi;
  893. struct resource *res;
  894. int error;
  895. info = pdev->dev.platform_data;
  896. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  897. if (!master) {
  898. dev_err(&pdev->dev, "failed to allocate spi master\n");
  899. return -ENOMEM;
  900. }
  901. master->setup = ep93xx_spi_setup;
  902. master->transfer = ep93xx_spi_transfer;
  903. master->cleanup = ep93xx_spi_cleanup;
  904. master->bus_num = pdev->id;
  905. master->num_chipselect = info->num_chipselect;
  906. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  907. platform_set_drvdata(pdev, master);
  908. espi = spi_master_get_devdata(master);
  909. espi->clk = clk_get(&pdev->dev, NULL);
  910. if (IS_ERR(espi->clk)) {
  911. dev_err(&pdev->dev, "unable to get spi clock\n");
  912. error = PTR_ERR(espi->clk);
  913. goto fail_release_master;
  914. }
  915. spin_lock_init(&espi->lock);
  916. init_completion(&espi->wait);
  917. /*
  918. * Calculate maximum and minimum supported clock rates
  919. * for the controller.
  920. */
  921. espi->max_rate = clk_get_rate(espi->clk) / 2;
  922. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  923. espi->pdev = pdev;
  924. espi->irq = platform_get_irq(pdev, 0);
  925. if (espi->irq < 0) {
  926. error = -EBUSY;
  927. dev_err(&pdev->dev, "failed to get irq resources\n");
  928. goto fail_put_clock;
  929. }
  930. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  931. if (!res) {
  932. dev_err(&pdev->dev, "unable to get iomem resource\n");
  933. error = -ENODEV;
  934. goto fail_put_clock;
  935. }
  936. res = request_mem_region(res->start, resource_size(res), pdev->name);
  937. if (!res) {
  938. dev_err(&pdev->dev, "unable to request iomem resources\n");
  939. error = -EBUSY;
  940. goto fail_put_clock;
  941. }
  942. espi->sspdr_phys = res->start + SSPDR;
  943. espi->regs_base = ioremap(res->start, resource_size(res));
  944. if (!espi->regs_base) {
  945. dev_err(&pdev->dev, "failed to map resources\n");
  946. error = -ENODEV;
  947. goto fail_free_mem;
  948. }
  949. error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
  950. "ep93xx-spi", espi);
  951. if (error) {
  952. dev_err(&pdev->dev, "failed to request irq\n");
  953. goto fail_unmap_regs;
  954. }
  955. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  956. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  957. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  958. if (!espi->wq) {
  959. dev_err(&pdev->dev, "unable to create workqueue\n");
  960. goto fail_free_dma;
  961. }
  962. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  963. INIT_LIST_HEAD(&espi->msg_queue);
  964. espi->running = true;
  965. /* make sure that the hardware is disabled */
  966. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  967. error = spi_register_master(master);
  968. if (error) {
  969. dev_err(&pdev->dev, "failed to register SPI master\n");
  970. goto fail_free_queue;
  971. }
  972. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  973. (unsigned long)res->start, espi->irq);
  974. return 0;
  975. fail_free_queue:
  976. destroy_workqueue(espi->wq);
  977. fail_free_dma:
  978. ep93xx_spi_release_dma(espi);
  979. free_irq(espi->irq, espi);
  980. fail_unmap_regs:
  981. iounmap(espi->regs_base);
  982. fail_free_mem:
  983. release_mem_region(res->start, resource_size(res));
  984. fail_put_clock:
  985. clk_put(espi->clk);
  986. fail_release_master:
  987. spi_master_put(master);
  988. platform_set_drvdata(pdev, NULL);
  989. return error;
  990. }
  991. static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
  992. {
  993. struct spi_master *master = platform_get_drvdata(pdev);
  994. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  995. struct resource *res;
  996. spin_lock_irq(&espi->lock);
  997. espi->running = false;
  998. spin_unlock_irq(&espi->lock);
  999. destroy_workqueue(espi->wq);
  1000. /*
  1001. * Complete remaining messages with %-ESHUTDOWN status.
  1002. */
  1003. spin_lock_irq(&espi->lock);
  1004. while (!list_empty(&espi->msg_queue)) {
  1005. struct spi_message *msg;
  1006. msg = list_first_entry(&espi->msg_queue,
  1007. struct spi_message, queue);
  1008. list_del_init(&msg->queue);
  1009. msg->status = -ESHUTDOWN;
  1010. spin_unlock_irq(&espi->lock);
  1011. msg->complete(msg->context);
  1012. spin_lock_irq(&espi->lock);
  1013. }
  1014. spin_unlock_irq(&espi->lock);
  1015. ep93xx_spi_release_dma(espi);
  1016. free_irq(espi->irq, espi);
  1017. iounmap(espi->regs_base);
  1018. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1019. release_mem_region(res->start, resource_size(res));
  1020. clk_put(espi->clk);
  1021. platform_set_drvdata(pdev, NULL);
  1022. spi_unregister_master(master);
  1023. return 0;
  1024. }
  1025. static struct platform_driver ep93xx_spi_driver = {
  1026. .driver = {
  1027. .name = "ep93xx-spi",
  1028. .owner = THIS_MODULE,
  1029. },
  1030. .probe = ep93xx_spi_probe,
  1031. .remove = __devexit_p(ep93xx_spi_remove),
  1032. };
  1033. module_platform_driver(ep93xx_spi_driver);
  1034. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  1035. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  1036. MODULE_LICENSE("GPL");
  1037. MODULE_ALIAS("platform:ep93xx-spi");