spi-ep93xx.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/sched.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/spi/spi.h>
  32. #include <linux/platform_data/dma-ep93xx.h>
  33. #include <linux/platform_data/spi-ep93xx.h>
  34. #define SSPCR0 0x0000
  35. #define SSPCR0_MODE_SHIFT 6
  36. #define SSPCR0_SCR_SHIFT 8
  37. #define SSPCR1 0x0004
  38. #define SSPCR1_RIE BIT(0)
  39. #define SSPCR1_TIE BIT(1)
  40. #define SSPCR1_RORIE BIT(2)
  41. #define SSPCR1_LBM BIT(3)
  42. #define SSPCR1_SSE BIT(4)
  43. #define SSPCR1_MS BIT(5)
  44. #define SSPCR1_SOD BIT(6)
  45. #define SSPDR 0x0008
  46. #define SSPSR 0x000c
  47. #define SSPSR_TFE BIT(0)
  48. #define SSPSR_TNF BIT(1)
  49. #define SSPSR_RNE BIT(2)
  50. #define SSPSR_RFF BIT(3)
  51. #define SSPSR_BSY BIT(4)
  52. #define SSPCPSR 0x0010
  53. #define SSPIIR 0x0014
  54. #define SSPIIR_RIS BIT(0)
  55. #define SSPIIR_TIS BIT(1)
  56. #define SSPIIR_RORIS BIT(2)
  57. #define SSPICR SSPIIR
  58. /* timeout in milliseconds */
  59. #define SPI_TIMEOUT 5
  60. /* maximum depth of RX/TX FIFO */
  61. #define SPI_FIFO_SIZE 8
  62. /**
  63. * struct ep93xx_spi - EP93xx SPI controller structure
  64. * @lock: spinlock that protects concurrent accesses to fields @running,
  65. * @current_msg and @msg_queue
  66. * @pdev: pointer to platform device
  67. * @clk: clock for the controller
  68. * @regs_base: pointer to ioremap()'d registers
  69. * @sspdr_phys: physical address of the SSPDR register
  70. * @min_rate: minimum clock rate (in Hz) supported by the controller
  71. * @max_rate: maximum clock rate (in Hz) supported by the controller
  72. * @running: is the queue running
  73. * @wq: workqueue used by the driver
  74. * @msg_work: work that is queued for the driver
  75. * @wait: wait here until given transfer is completed
  76. * @msg_queue: queue for the messages
  77. * @current_msg: message that is currently processed (or %NULL if none)
  78. * @tx: current byte in transfer to transmit
  79. * @rx: current byte in transfer to receive
  80. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  81. * frame decreases this level and sending one frame increases it.
  82. * @dma_rx: RX DMA channel
  83. * @dma_tx: TX DMA channel
  84. * @dma_rx_data: RX parameters passed to the DMA engine
  85. * @dma_tx_data: TX parameters passed to the DMA engine
  86. * @rx_sgt: sg table for RX transfers
  87. * @tx_sgt: sg table for TX transfers
  88. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  89. * the client
  90. *
  91. * This structure holds EP93xx SPI controller specific information. When
  92. * @running is %true, driver accepts transfer requests from protocol drivers.
  93. * @current_msg is used to hold pointer to the message that is currently
  94. * processed. If @current_msg is %NULL, it means that no processing is going
  95. * on.
  96. *
  97. * Most of the fields are only written once and they can be accessed without
  98. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  99. * @running, and @msg_queue.
  100. */
  101. struct ep93xx_spi {
  102. spinlock_t lock;
  103. const struct platform_device *pdev;
  104. struct clk *clk;
  105. void __iomem *regs_base;
  106. unsigned long sspdr_phys;
  107. unsigned long min_rate;
  108. unsigned long max_rate;
  109. bool running;
  110. struct workqueue_struct *wq;
  111. struct work_struct msg_work;
  112. struct completion wait;
  113. struct list_head msg_queue;
  114. struct spi_message *current_msg;
  115. size_t tx;
  116. size_t rx;
  117. size_t fifo_level;
  118. struct dma_chan *dma_rx;
  119. struct dma_chan *dma_tx;
  120. struct ep93xx_dma_data dma_rx_data;
  121. struct ep93xx_dma_data dma_tx_data;
  122. struct sg_table rx_sgt;
  123. struct sg_table tx_sgt;
  124. void *zeropage;
  125. };
  126. /**
  127. * struct ep93xx_spi_chip - SPI device hardware settings
  128. * @spi: back pointer to the SPI device
  129. * @rate: max rate in hz this chip supports
  130. * @div_cpsr: cpsr (pre-scaler) divider
  131. * @div_scr: scr divider
  132. * @dss: bits per word (4 - 16 bits)
  133. * @ops: private chip operations
  134. *
  135. * This structure is used to store hardware register specific settings for each
  136. * SPI device. Settings are written to hardware by function
  137. * ep93xx_spi_chip_setup().
  138. */
  139. struct ep93xx_spi_chip {
  140. const struct spi_device *spi;
  141. unsigned long rate;
  142. u8 div_cpsr;
  143. u8 div_scr;
  144. u8 dss;
  145. struct ep93xx_spi_chip_ops *ops;
  146. };
  147. /* converts bits per word to CR0.DSS value */
  148. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  149. static inline void
  150. ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
  151. {
  152. __raw_writeb(value, espi->regs_base + reg);
  153. }
  154. static inline u8
  155. ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  156. {
  157. return __raw_readb(spi->regs_base + reg);
  158. }
  159. static inline void
  160. ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
  161. {
  162. __raw_writew(value, espi->regs_base + reg);
  163. }
  164. static inline u16
  165. ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  166. {
  167. return __raw_readw(spi->regs_base + reg);
  168. }
  169. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  170. {
  171. u8 regval;
  172. int err;
  173. err = clk_enable(espi->clk);
  174. if (err)
  175. return err;
  176. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  177. regval |= SSPCR1_SSE;
  178. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  179. return 0;
  180. }
  181. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  182. {
  183. u8 regval;
  184. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  185. regval &= ~SSPCR1_SSE;
  186. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  187. clk_disable(espi->clk);
  188. }
  189. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  190. {
  191. u8 regval;
  192. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  193. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  194. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  195. }
  196. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  197. {
  198. u8 regval;
  199. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  200. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  201. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  202. }
  203. /**
  204. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  205. * @espi: ep93xx SPI controller struct
  206. * @chip: divisors are calculated for this chip
  207. * @rate: desired SPI output clock rate
  208. *
  209. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  210. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  211. * for some reason, divisors cannot be calculated nothing is stored and
  212. * %-EINVAL is returned.
  213. */
  214. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  215. struct ep93xx_spi_chip *chip,
  216. unsigned long rate)
  217. {
  218. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  219. int cpsr, scr;
  220. /*
  221. * Make sure that max value is between values supported by the
  222. * controller. Note that minimum value is already checked in
  223. * ep93xx_spi_transfer().
  224. */
  225. rate = clamp(rate, espi->min_rate, espi->max_rate);
  226. /*
  227. * Calculate divisors so that we can get speed according the
  228. * following formula:
  229. * rate = spi_clock_rate / (cpsr * (1 + scr))
  230. *
  231. * cpsr must be even number and starts from 2, scr can be any number
  232. * between 0 and 255.
  233. */
  234. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  235. for (scr = 0; scr <= 255; scr++) {
  236. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  237. chip->div_scr = (u8)scr;
  238. chip->div_cpsr = (u8)cpsr;
  239. return 0;
  240. }
  241. }
  242. }
  243. return -EINVAL;
  244. }
  245. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  246. {
  247. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  248. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  249. if (chip->ops && chip->ops->cs_control)
  250. chip->ops->cs_control(spi, value);
  251. }
  252. /**
  253. * ep93xx_spi_setup() - setup an SPI device
  254. * @spi: SPI device to setup
  255. *
  256. * This function sets up SPI device mode, speed etc. Can be called multiple
  257. * times for a single device. Returns %0 in case of success, negative error in
  258. * case of failure. When this function returns success, the device is
  259. * deselected.
  260. */
  261. static int ep93xx_spi_setup(struct spi_device *spi)
  262. {
  263. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  264. struct ep93xx_spi_chip *chip;
  265. chip = spi_get_ctldata(spi);
  266. if (!chip) {
  267. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  268. spi->modalias);
  269. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  270. if (!chip)
  271. return -ENOMEM;
  272. chip->spi = spi;
  273. chip->ops = spi->controller_data;
  274. if (chip->ops && chip->ops->setup) {
  275. int ret = chip->ops->setup(spi);
  276. if (ret) {
  277. kfree(chip);
  278. return ret;
  279. }
  280. }
  281. spi_set_ctldata(spi, chip);
  282. }
  283. if (spi->max_speed_hz != chip->rate) {
  284. int err;
  285. err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
  286. if (err != 0) {
  287. spi_set_ctldata(spi, NULL);
  288. kfree(chip);
  289. return err;
  290. }
  291. chip->rate = spi->max_speed_hz;
  292. }
  293. chip->dss = bits_per_word_to_dss(spi->bits_per_word);
  294. ep93xx_spi_cs_control(spi, false);
  295. return 0;
  296. }
  297. /**
  298. * ep93xx_spi_transfer() - queue message to be transferred
  299. * @spi: target SPI device
  300. * @msg: message to be transferred
  301. *
  302. * This function is called by SPI device drivers when they are going to transfer
  303. * a new message. It simply puts the message in the queue and schedules
  304. * workqueue to perform the actual transfer later on.
  305. *
  306. * Returns %0 on success and negative error in case of failure.
  307. */
  308. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  309. {
  310. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  311. struct spi_transfer *t;
  312. unsigned long flags;
  313. if (!msg || !msg->complete)
  314. return -EINVAL;
  315. /* first validate each transfer */
  316. list_for_each_entry(t, &msg->transfers, transfer_list) {
  317. if (t->speed_hz && t->speed_hz < espi->min_rate)
  318. return -EINVAL;
  319. }
  320. /*
  321. * Now that we own the message, let's initialize it so that it is
  322. * suitable for us. We use @msg->status to signal whether there was
  323. * error in transfer and @msg->state is used to hold pointer to the
  324. * current transfer (or %NULL if no active current transfer).
  325. */
  326. msg->state = NULL;
  327. msg->status = 0;
  328. msg->actual_length = 0;
  329. spin_lock_irqsave(&espi->lock, flags);
  330. if (!espi->running) {
  331. spin_unlock_irqrestore(&espi->lock, flags);
  332. return -ESHUTDOWN;
  333. }
  334. list_add_tail(&msg->queue, &espi->msg_queue);
  335. queue_work(espi->wq, &espi->msg_work);
  336. spin_unlock_irqrestore(&espi->lock, flags);
  337. return 0;
  338. }
  339. /**
  340. * ep93xx_spi_cleanup() - cleans up master controller specific state
  341. * @spi: SPI device to cleanup
  342. *
  343. * This function releases master controller specific state for given @spi
  344. * device.
  345. */
  346. static void ep93xx_spi_cleanup(struct spi_device *spi)
  347. {
  348. struct ep93xx_spi_chip *chip;
  349. chip = spi_get_ctldata(spi);
  350. if (chip) {
  351. if (chip->ops && chip->ops->cleanup)
  352. chip->ops->cleanup(spi);
  353. spi_set_ctldata(spi, NULL);
  354. kfree(chip);
  355. }
  356. }
  357. /**
  358. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  359. * @espi: ep93xx SPI controller struct
  360. * @chip: chip specific settings
  361. *
  362. * This function sets up the actual hardware registers with settings given in
  363. * @chip. Note that no validation is done so make sure that callers validate
  364. * settings before calling this.
  365. */
  366. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  367. const struct ep93xx_spi_chip *chip)
  368. {
  369. u16 cr0;
  370. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  371. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  372. cr0 |= chip->dss;
  373. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  374. chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
  375. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  376. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  377. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  378. }
  379. static inline int bits_per_word(const struct ep93xx_spi *espi)
  380. {
  381. struct spi_message *msg = espi->current_msg;
  382. struct spi_transfer *t = msg->state;
  383. return t->bits_per_word;
  384. }
  385. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  386. {
  387. if (bits_per_word(espi) > 8) {
  388. u16 tx_val = 0;
  389. if (t->tx_buf)
  390. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  391. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  392. espi->tx += sizeof(tx_val);
  393. } else {
  394. u8 tx_val = 0;
  395. if (t->tx_buf)
  396. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  397. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  398. espi->tx += sizeof(tx_val);
  399. }
  400. }
  401. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  402. {
  403. if (bits_per_word(espi) > 8) {
  404. u16 rx_val;
  405. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  406. if (t->rx_buf)
  407. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  408. espi->rx += sizeof(rx_val);
  409. } else {
  410. u8 rx_val;
  411. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  412. if (t->rx_buf)
  413. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  414. espi->rx += sizeof(rx_val);
  415. }
  416. }
  417. /**
  418. * ep93xx_spi_read_write() - perform next RX/TX transfer
  419. * @espi: ep93xx SPI controller struct
  420. *
  421. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  422. * called several times, the whole transfer will be completed. Returns
  423. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  424. *
  425. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  426. * full.
  427. */
  428. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  429. {
  430. struct spi_message *msg = espi->current_msg;
  431. struct spi_transfer *t = msg->state;
  432. /* read as long as RX FIFO has frames in it */
  433. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  434. ep93xx_do_read(espi, t);
  435. espi->fifo_level--;
  436. }
  437. /* write as long as TX FIFO has room */
  438. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  439. ep93xx_do_write(espi, t);
  440. espi->fifo_level++;
  441. }
  442. if (espi->rx == t->len)
  443. return 0;
  444. return -EINPROGRESS;
  445. }
  446. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  447. {
  448. /*
  449. * Now everything is set up for the current transfer. We prime the TX
  450. * FIFO, enable interrupts, and wait for the transfer to complete.
  451. */
  452. if (ep93xx_spi_read_write(espi)) {
  453. ep93xx_spi_enable_interrupts(espi);
  454. wait_for_completion(&espi->wait);
  455. }
  456. }
  457. /**
  458. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  459. * @espi: ep93xx SPI controller struct
  460. * @dir: DMA transfer direction
  461. *
  462. * Function configures the DMA, maps the buffer and prepares the DMA
  463. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  464. * in case of failure.
  465. */
  466. static struct dma_async_tx_descriptor *
  467. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  468. {
  469. struct spi_transfer *t = espi->current_msg->state;
  470. struct dma_async_tx_descriptor *txd;
  471. enum dma_slave_buswidth buswidth;
  472. struct dma_slave_config conf;
  473. struct scatterlist *sg;
  474. struct sg_table *sgt;
  475. struct dma_chan *chan;
  476. const void *buf, *pbuf;
  477. size_t len = t->len;
  478. int i, ret, nents;
  479. if (bits_per_word(espi) > 8)
  480. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  481. else
  482. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  483. memset(&conf, 0, sizeof(conf));
  484. conf.direction = dir;
  485. if (dir == DMA_DEV_TO_MEM) {
  486. chan = espi->dma_rx;
  487. buf = t->rx_buf;
  488. sgt = &espi->rx_sgt;
  489. conf.src_addr = espi->sspdr_phys;
  490. conf.src_addr_width = buswidth;
  491. } else {
  492. chan = espi->dma_tx;
  493. buf = t->tx_buf;
  494. sgt = &espi->tx_sgt;
  495. conf.dst_addr = espi->sspdr_phys;
  496. conf.dst_addr_width = buswidth;
  497. }
  498. ret = dmaengine_slave_config(chan, &conf);
  499. if (ret)
  500. return ERR_PTR(ret);
  501. /*
  502. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  503. * because we are using @espi->zeropage to provide a zero RX buffer
  504. * for the TX transfers and we have only allocated one page for that.
  505. *
  506. * For performance reasons we allocate a new sg_table only when
  507. * needed. Otherwise we will re-use the current one. Eventually the
  508. * last sg_table is released in ep93xx_spi_release_dma().
  509. */
  510. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  511. if (nents != sgt->nents) {
  512. sg_free_table(sgt);
  513. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  514. if (ret)
  515. return ERR_PTR(ret);
  516. }
  517. pbuf = buf;
  518. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  519. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  520. if (buf) {
  521. sg_set_page(sg, virt_to_page(pbuf), bytes,
  522. offset_in_page(pbuf));
  523. } else {
  524. sg_set_page(sg, virt_to_page(espi->zeropage),
  525. bytes, 0);
  526. }
  527. pbuf += bytes;
  528. len -= bytes;
  529. }
  530. if (WARN_ON(len)) {
  531. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  532. return ERR_PTR(-EINVAL);
  533. }
  534. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  535. if (!nents)
  536. return ERR_PTR(-ENOMEM);
  537. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  538. if (!txd) {
  539. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  540. return ERR_PTR(-ENOMEM);
  541. }
  542. return txd;
  543. }
  544. /**
  545. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  546. * @espi: ep93xx SPI controller struct
  547. * @dir: DMA transfer direction
  548. *
  549. * Function finishes with the DMA transfer. After this, the DMA buffer is
  550. * unmapped.
  551. */
  552. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  553. enum dma_transfer_direction dir)
  554. {
  555. struct dma_chan *chan;
  556. struct sg_table *sgt;
  557. if (dir == DMA_DEV_TO_MEM) {
  558. chan = espi->dma_rx;
  559. sgt = &espi->rx_sgt;
  560. } else {
  561. chan = espi->dma_tx;
  562. sgt = &espi->tx_sgt;
  563. }
  564. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  565. }
  566. static void ep93xx_spi_dma_callback(void *callback_param)
  567. {
  568. complete(callback_param);
  569. }
  570. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  571. {
  572. struct spi_message *msg = espi->current_msg;
  573. struct dma_async_tx_descriptor *rxd, *txd;
  574. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  575. if (IS_ERR(rxd)) {
  576. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  577. msg->status = PTR_ERR(rxd);
  578. return;
  579. }
  580. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  581. if (IS_ERR(txd)) {
  582. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  583. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  584. msg->status = PTR_ERR(txd);
  585. return;
  586. }
  587. /* We are ready when RX is done */
  588. rxd->callback = ep93xx_spi_dma_callback;
  589. rxd->callback_param = &espi->wait;
  590. /* Now submit both descriptors and wait while they finish */
  591. dmaengine_submit(rxd);
  592. dmaengine_submit(txd);
  593. dma_async_issue_pending(espi->dma_rx);
  594. dma_async_issue_pending(espi->dma_tx);
  595. wait_for_completion(&espi->wait);
  596. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  597. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  598. }
  599. /**
  600. * ep93xx_spi_process_transfer() - processes one SPI transfer
  601. * @espi: ep93xx SPI controller struct
  602. * @msg: current message
  603. * @t: transfer to process
  604. *
  605. * This function processes one SPI transfer given in @t. Function waits until
  606. * transfer is complete (may sleep) and updates @msg->status based on whether
  607. * transfer was successfully processed or not.
  608. */
  609. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  610. struct spi_message *msg,
  611. struct spi_transfer *t)
  612. {
  613. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  614. msg->state = t;
  615. /*
  616. * Handle any transfer specific settings if needed. We use
  617. * temporary chip settings here and restore original later when
  618. * the transfer is finished.
  619. */
  620. if (t->speed_hz || t->bits_per_word) {
  621. struct ep93xx_spi_chip tmp_chip = *chip;
  622. if (t->speed_hz) {
  623. int err;
  624. err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
  625. t->speed_hz);
  626. if (err) {
  627. dev_err(&espi->pdev->dev,
  628. "failed to adjust speed\n");
  629. msg->status = err;
  630. return;
  631. }
  632. }
  633. if (t->bits_per_word)
  634. tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
  635. /*
  636. * Set up temporary new hw settings for this transfer.
  637. */
  638. ep93xx_spi_chip_setup(espi, &tmp_chip);
  639. }
  640. espi->rx = 0;
  641. espi->tx = 0;
  642. /*
  643. * There is no point of setting up DMA for the transfers which will
  644. * fit into the FIFO and can be transferred with a single interrupt.
  645. * So in these cases we will be using PIO and don't bother for DMA.
  646. */
  647. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  648. ep93xx_spi_dma_transfer(espi);
  649. else
  650. ep93xx_spi_pio_transfer(espi);
  651. /*
  652. * In case of error during transmit, we bail out from processing
  653. * the message.
  654. */
  655. if (msg->status)
  656. return;
  657. msg->actual_length += t->len;
  658. /*
  659. * After this transfer is finished, perform any possible
  660. * post-transfer actions requested by the protocol driver.
  661. */
  662. if (t->delay_usecs) {
  663. set_current_state(TASK_UNINTERRUPTIBLE);
  664. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  665. }
  666. if (t->cs_change) {
  667. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  668. /*
  669. * In case protocol driver is asking us to drop the
  670. * chipselect briefly, we let the scheduler to handle
  671. * any "delay" here.
  672. */
  673. ep93xx_spi_cs_control(msg->spi, false);
  674. cond_resched();
  675. ep93xx_spi_cs_control(msg->spi, true);
  676. }
  677. }
  678. if (t->speed_hz || t->bits_per_word)
  679. ep93xx_spi_chip_setup(espi, chip);
  680. }
  681. /*
  682. * ep93xx_spi_process_message() - process one SPI message
  683. * @espi: ep93xx SPI controller struct
  684. * @msg: message to process
  685. *
  686. * This function processes a single SPI message. We go through all transfers in
  687. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  688. * asserted during the whole message (unless per transfer cs_change is set).
  689. *
  690. * @msg->status contains %0 in case of success or negative error code in case of
  691. * failure.
  692. */
  693. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  694. struct spi_message *msg)
  695. {
  696. unsigned long timeout;
  697. struct spi_transfer *t;
  698. int err;
  699. /*
  700. * Enable the SPI controller and its clock.
  701. */
  702. err = ep93xx_spi_enable(espi);
  703. if (err) {
  704. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  705. msg->status = err;
  706. return;
  707. }
  708. /*
  709. * Just to be sure: flush any data from RX FIFO.
  710. */
  711. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  712. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  713. if (time_after(jiffies, timeout)) {
  714. dev_warn(&espi->pdev->dev,
  715. "timeout while flushing RX FIFO\n");
  716. msg->status = -ETIMEDOUT;
  717. return;
  718. }
  719. ep93xx_spi_read_u16(espi, SSPDR);
  720. }
  721. /*
  722. * We explicitly handle FIFO level. This way we don't have to check TX
  723. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  724. */
  725. espi->fifo_level = 0;
  726. /*
  727. * Update SPI controller registers according to spi device and assert
  728. * the chipselect.
  729. */
  730. ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
  731. ep93xx_spi_cs_control(msg->spi, true);
  732. list_for_each_entry(t, &msg->transfers, transfer_list) {
  733. ep93xx_spi_process_transfer(espi, msg, t);
  734. if (msg->status)
  735. break;
  736. }
  737. /*
  738. * Now the whole message is transferred (or failed for some reason). We
  739. * deselect the device and disable the SPI controller.
  740. */
  741. ep93xx_spi_cs_control(msg->spi, false);
  742. ep93xx_spi_disable(espi);
  743. }
  744. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  745. /**
  746. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  747. * @work: work struct
  748. *
  749. * Workqueue worker function. This function is called when there are new
  750. * SPI messages to be processed. Message is taken out from the queue and then
  751. * passed to ep93xx_spi_process_message().
  752. *
  753. * After message is transferred, protocol driver is notified by calling
  754. * @msg->complete(). In case of error, @msg->status is set to negative error
  755. * number, otherwise it contains zero (and @msg->actual_length is updated).
  756. */
  757. static void ep93xx_spi_work(struct work_struct *work)
  758. {
  759. struct ep93xx_spi *espi = work_to_espi(work);
  760. struct spi_message *msg;
  761. spin_lock_irq(&espi->lock);
  762. if (!espi->running || espi->current_msg ||
  763. list_empty(&espi->msg_queue)) {
  764. spin_unlock_irq(&espi->lock);
  765. return;
  766. }
  767. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  768. list_del_init(&msg->queue);
  769. espi->current_msg = msg;
  770. spin_unlock_irq(&espi->lock);
  771. ep93xx_spi_process_message(espi, msg);
  772. /*
  773. * Update the current message and re-schedule ourselves if there are
  774. * more messages in the queue.
  775. */
  776. spin_lock_irq(&espi->lock);
  777. espi->current_msg = NULL;
  778. if (espi->running && !list_empty(&espi->msg_queue))
  779. queue_work(espi->wq, &espi->msg_work);
  780. spin_unlock_irq(&espi->lock);
  781. /* notify the protocol driver that we are done with this message */
  782. msg->complete(msg->context);
  783. }
  784. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  785. {
  786. struct ep93xx_spi *espi = dev_id;
  787. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  788. /*
  789. * If we got ROR (receive overrun) interrupt we know that something is
  790. * wrong. Just abort the message.
  791. */
  792. if (unlikely(irq_status & SSPIIR_RORIS)) {
  793. /* clear the overrun interrupt */
  794. ep93xx_spi_write_u8(espi, SSPICR, 0);
  795. dev_warn(&espi->pdev->dev,
  796. "receive overrun, aborting the message\n");
  797. espi->current_msg->status = -EIO;
  798. } else {
  799. /*
  800. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  801. * simply execute next data transfer.
  802. */
  803. if (ep93xx_spi_read_write(espi)) {
  804. /*
  805. * In normal case, there still is some processing left
  806. * for current transfer. Let's wait for the next
  807. * interrupt then.
  808. */
  809. return IRQ_HANDLED;
  810. }
  811. }
  812. /*
  813. * Current transfer is finished, either with error or with success. In
  814. * any case we disable interrupts and notify the worker to handle
  815. * any post-processing of the message.
  816. */
  817. ep93xx_spi_disable_interrupts(espi);
  818. complete(&espi->wait);
  819. return IRQ_HANDLED;
  820. }
  821. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  822. {
  823. if (ep93xx_dma_chan_is_m2p(chan))
  824. return false;
  825. chan->private = filter_param;
  826. return true;
  827. }
  828. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  829. {
  830. dma_cap_mask_t mask;
  831. int ret;
  832. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  833. if (!espi->zeropage)
  834. return -ENOMEM;
  835. dma_cap_zero(mask);
  836. dma_cap_set(DMA_SLAVE, mask);
  837. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  838. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  839. espi->dma_rx_data.name = "ep93xx-spi-rx";
  840. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  841. &espi->dma_rx_data);
  842. if (!espi->dma_rx) {
  843. ret = -ENODEV;
  844. goto fail_free_page;
  845. }
  846. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  847. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  848. espi->dma_tx_data.name = "ep93xx-spi-tx";
  849. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  850. &espi->dma_tx_data);
  851. if (!espi->dma_tx) {
  852. ret = -ENODEV;
  853. goto fail_release_rx;
  854. }
  855. return 0;
  856. fail_release_rx:
  857. dma_release_channel(espi->dma_rx);
  858. espi->dma_rx = NULL;
  859. fail_free_page:
  860. free_page((unsigned long)espi->zeropage);
  861. return ret;
  862. }
  863. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  864. {
  865. if (espi->dma_rx) {
  866. dma_release_channel(espi->dma_rx);
  867. sg_free_table(&espi->rx_sgt);
  868. }
  869. if (espi->dma_tx) {
  870. dma_release_channel(espi->dma_tx);
  871. sg_free_table(&espi->tx_sgt);
  872. }
  873. if (espi->zeropage)
  874. free_page((unsigned long)espi->zeropage);
  875. }
  876. static int ep93xx_spi_probe(struct platform_device *pdev)
  877. {
  878. struct spi_master *master;
  879. struct ep93xx_spi_info *info;
  880. struct ep93xx_spi *espi;
  881. struct resource *res;
  882. int irq;
  883. int error;
  884. info = pdev->dev.platform_data;
  885. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  886. if (!master) {
  887. dev_err(&pdev->dev, "failed to allocate spi master\n");
  888. return -ENOMEM;
  889. }
  890. master->setup = ep93xx_spi_setup;
  891. master->transfer = ep93xx_spi_transfer;
  892. master->cleanup = ep93xx_spi_cleanup;
  893. master->bus_num = pdev->id;
  894. master->num_chipselect = info->num_chipselect;
  895. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  896. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  897. platform_set_drvdata(pdev, master);
  898. espi = spi_master_get_devdata(master);
  899. espi->clk = clk_get(&pdev->dev, NULL);
  900. if (IS_ERR(espi->clk)) {
  901. dev_err(&pdev->dev, "unable to get spi clock\n");
  902. error = PTR_ERR(espi->clk);
  903. goto fail_release_master;
  904. }
  905. spin_lock_init(&espi->lock);
  906. init_completion(&espi->wait);
  907. /*
  908. * Calculate maximum and minimum supported clock rates
  909. * for the controller.
  910. */
  911. espi->max_rate = clk_get_rate(espi->clk) / 2;
  912. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  913. espi->pdev = pdev;
  914. irq = platform_get_irq(pdev, 0);
  915. if (irq < 0) {
  916. error = -EBUSY;
  917. dev_err(&pdev->dev, "failed to get irq resources\n");
  918. goto fail_put_clock;
  919. }
  920. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  921. if (!res) {
  922. dev_err(&pdev->dev, "unable to get iomem resource\n");
  923. error = -ENODEV;
  924. goto fail_put_clock;
  925. }
  926. espi->sspdr_phys = res->start + SSPDR;
  927. espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
  928. if (IS_ERR(espi->regs_base)) {
  929. error = PTR_ERR(espi->regs_base);
  930. goto fail_put_clock;
  931. }
  932. error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
  933. 0, "ep93xx-spi", espi);
  934. if (error) {
  935. dev_err(&pdev->dev, "failed to request irq\n");
  936. goto fail_put_clock;
  937. }
  938. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  939. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  940. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  941. if (!espi->wq) {
  942. dev_err(&pdev->dev, "unable to create workqueue\n");
  943. error = -ENOMEM;
  944. goto fail_free_dma;
  945. }
  946. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  947. INIT_LIST_HEAD(&espi->msg_queue);
  948. espi->running = true;
  949. /* make sure that the hardware is disabled */
  950. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  951. error = spi_register_master(master);
  952. if (error) {
  953. dev_err(&pdev->dev, "failed to register SPI master\n");
  954. goto fail_free_queue;
  955. }
  956. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  957. (unsigned long)res->start, irq);
  958. return 0;
  959. fail_free_queue:
  960. destroy_workqueue(espi->wq);
  961. fail_free_dma:
  962. ep93xx_spi_release_dma(espi);
  963. fail_put_clock:
  964. clk_put(espi->clk);
  965. fail_release_master:
  966. spi_master_put(master);
  967. return error;
  968. }
  969. static int ep93xx_spi_remove(struct platform_device *pdev)
  970. {
  971. struct spi_master *master = platform_get_drvdata(pdev);
  972. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  973. spin_lock_irq(&espi->lock);
  974. espi->running = false;
  975. spin_unlock_irq(&espi->lock);
  976. destroy_workqueue(espi->wq);
  977. /*
  978. * Complete remaining messages with %-ESHUTDOWN status.
  979. */
  980. spin_lock_irq(&espi->lock);
  981. while (!list_empty(&espi->msg_queue)) {
  982. struct spi_message *msg;
  983. msg = list_first_entry(&espi->msg_queue,
  984. struct spi_message, queue);
  985. list_del_init(&msg->queue);
  986. msg->status = -ESHUTDOWN;
  987. spin_unlock_irq(&espi->lock);
  988. msg->complete(msg->context);
  989. spin_lock_irq(&espi->lock);
  990. }
  991. spin_unlock_irq(&espi->lock);
  992. ep93xx_spi_release_dma(espi);
  993. clk_put(espi->clk);
  994. spi_unregister_master(master);
  995. return 0;
  996. }
  997. static struct platform_driver ep93xx_spi_driver = {
  998. .driver = {
  999. .name = "ep93xx-spi",
  1000. .owner = THIS_MODULE,
  1001. },
  1002. .probe = ep93xx_spi_probe,
  1003. .remove = ep93xx_spi_remove,
  1004. };
  1005. module_platform_driver(ep93xx_spi_driver);
  1006. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  1007. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  1008. MODULE_LICENSE("GPL");
  1009. MODULE_ALIAS("platform:ep93xx-spi");