spi-ep93xx.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/sched.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/spi/spi.h>
  32. #include <linux/platform_data/dma-ep93xx.h>
  33. #include <linux/platform_data/spi-ep93xx.h>
  34. #define SSPCR0 0x0000
  35. #define SSPCR0_MODE_SHIFT 6
  36. #define SSPCR0_SCR_SHIFT 8
  37. #define SSPCR1 0x0004
  38. #define SSPCR1_RIE BIT(0)
  39. #define SSPCR1_TIE BIT(1)
  40. #define SSPCR1_RORIE BIT(2)
  41. #define SSPCR1_LBM BIT(3)
  42. #define SSPCR1_SSE BIT(4)
  43. #define SSPCR1_MS BIT(5)
  44. #define SSPCR1_SOD BIT(6)
  45. #define SSPDR 0x0008
  46. #define SSPSR 0x000c
  47. #define SSPSR_TFE BIT(0)
  48. #define SSPSR_TNF BIT(1)
  49. #define SSPSR_RNE BIT(2)
  50. #define SSPSR_RFF BIT(3)
  51. #define SSPSR_BSY BIT(4)
  52. #define SSPCPSR 0x0010
  53. #define SSPIIR 0x0014
  54. #define SSPIIR_RIS BIT(0)
  55. #define SSPIIR_TIS BIT(1)
  56. #define SSPIIR_RORIS BIT(2)
  57. #define SSPICR SSPIIR
  58. /* timeout in milliseconds */
  59. #define SPI_TIMEOUT 5
  60. /* maximum depth of RX/TX FIFO */
  61. #define SPI_FIFO_SIZE 8
  62. /**
  63. * struct ep93xx_spi - EP93xx SPI controller structure
  64. * @lock: spinlock that protects concurrent accesses to fields @running,
  65. * @current_msg and @msg_queue
  66. * @pdev: pointer to platform device
  67. * @clk: clock for the controller
  68. * @regs_base: pointer to ioremap()'d registers
  69. * @sspdr_phys: physical address of the SSPDR register
  70. * @min_rate: minimum clock rate (in Hz) supported by the controller
  71. * @max_rate: maximum clock rate (in Hz) supported by the controller
  72. * @running: is the queue running
  73. * @wq: workqueue used by the driver
  74. * @msg_work: work that is queued for the driver
  75. * @wait: wait here until given transfer is completed
  76. * @msg_queue: queue for the messages
  77. * @current_msg: message that is currently processed (or %NULL if none)
  78. * @tx: current byte in transfer to transmit
  79. * @rx: current byte in transfer to receive
  80. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  81. * frame decreases this level and sending one frame increases it.
  82. * @dma_rx: RX DMA channel
  83. * @dma_tx: TX DMA channel
  84. * @dma_rx_data: RX parameters passed to the DMA engine
  85. * @dma_tx_data: TX parameters passed to the DMA engine
  86. * @rx_sgt: sg table for RX transfers
  87. * @tx_sgt: sg table for TX transfers
  88. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  89. * the client
  90. *
  91. * This structure holds EP93xx SPI controller specific information. When
  92. * @running is %true, driver accepts transfer requests from protocol drivers.
  93. * @current_msg is used to hold pointer to the message that is currently
  94. * processed. If @current_msg is %NULL, it means that no processing is going
  95. * on.
  96. *
  97. * Most of the fields are only written once and they can be accessed without
  98. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  99. * @running, and @msg_queue.
  100. */
  101. struct ep93xx_spi {
  102. spinlock_t lock;
  103. const struct platform_device *pdev;
  104. struct clk *clk;
  105. void __iomem *regs_base;
  106. unsigned long sspdr_phys;
  107. unsigned long min_rate;
  108. unsigned long max_rate;
  109. bool running;
  110. struct workqueue_struct *wq;
  111. struct work_struct msg_work;
  112. struct completion wait;
  113. struct list_head msg_queue;
  114. struct spi_message *current_msg;
  115. size_t tx;
  116. size_t rx;
  117. size_t fifo_level;
  118. struct dma_chan *dma_rx;
  119. struct dma_chan *dma_tx;
  120. struct ep93xx_dma_data dma_rx_data;
  121. struct ep93xx_dma_data dma_tx_data;
  122. struct sg_table rx_sgt;
  123. struct sg_table tx_sgt;
  124. void *zeropage;
  125. };
  126. /**
  127. * struct ep93xx_spi_chip - SPI device hardware settings
  128. * @spi: back pointer to the SPI device
  129. * @div_cpsr: cpsr (pre-scaler) divider
  130. * @div_scr: scr divider
  131. * @ops: private chip operations
  132. *
  133. * This structure is used to store hardware register specific settings for each
  134. * SPI device. Settings are written to hardware by function
  135. * ep93xx_spi_chip_setup().
  136. */
  137. struct ep93xx_spi_chip {
  138. const struct spi_device *spi;
  139. u8 div_cpsr;
  140. u8 div_scr;
  141. struct ep93xx_spi_chip_ops *ops;
  142. };
  143. /* converts bits per word to CR0.DSS value */
  144. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  145. static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
  146. u16 reg, u8 value)
  147. {
  148. writeb(value, espi->regs_base + reg);
  149. }
  150. static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  151. {
  152. return readb(spi->regs_base + reg);
  153. }
  154. static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
  155. u16 reg, u16 value)
  156. {
  157. writew(value, espi->regs_base + reg);
  158. }
  159. static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  160. {
  161. return readw(spi->regs_base + reg);
  162. }
  163. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  164. {
  165. u8 regval;
  166. int err;
  167. err = clk_enable(espi->clk);
  168. if (err)
  169. return err;
  170. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  171. regval |= SSPCR1_SSE;
  172. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  173. return 0;
  174. }
  175. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  176. {
  177. u8 regval;
  178. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  179. regval &= ~SSPCR1_SSE;
  180. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  181. clk_disable(espi->clk);
  182. }
  183. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  184. {
  185. u8 regval;
  186. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  187. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  188. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  189. }
  190. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  191. {
  192. u8 regval;
  193. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  194. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  195. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  196. }
  197. /**
  198. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  199. * @espi: ep93xx SPI controller struct
  200. * @chip: divisors are calculated for this chip
  201. * @rate: desired SPI output clock rate
  202. *
  203. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  204. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  205. * for some reason, divisors cannot be calculated nothing is stored and
  206. * %-EINVAL is returned.
  207. */
  208. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  209. struct ep93xx_spi_chip *chip,
  210. unsigned long rate)
  211. {
  212. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  213. int cpsr, scr;
  214. /*
  215. * Make sure that max value is between values supported by the
  216. * controller. Note that minimum value is already checked in
  217. * ep93xx_spi_transfer().
  218. */
  219. rate = clamp(rate, espi->min_rate, espi->max_rate);
  220. /*
  221. * Calculate divisors so that we can get speed according the
  222. * following formula:
  223. * rate = spi_clock_rate / (cpsr * (1 + scr))
  224. *
  225. * cpsr must be even number and starts from 2, scr can be any number
  226. * between 0 and 255.
  227. */
  228. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  229. for (scr = 0; scr <= 255; scr++) {
  230. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  231. chip->div_scr = (u8)scr;
  232. chip->div_cpsr = (u8)cpsr;
  233. return 0;
  234. }
  235. }
  236. }
  237. return -EINVAL;
  238. }
  239. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  240. {
  241. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  242. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  243. if (chip->ops && chip->ops->cs_control)
  244. chip->ops->cs_control(spi, value);
  245. }
  246. /**
  247. * ep93xx_spi_setup() - setup an SPI device
  248. * @spi: SPI device to setup
  249. *
  250. * This function sets up SPI device mode, speed etc. Can be called multiple
  251. * times for a single device. Returns %0 in case of success, negative error in
  252. * case of failure. When this function returns success, the device is
  253. * deselected.
  254. */
  255. static int ep93xx_spi_setup(struct spi_device *spi)
  256. {
  257. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  258. struct ep93xx_spi_chip *chip;
  259. chip = spi_get_ctldata(spi);
  260. if (!chip) {
  261. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  262. spi->modalias);
  263. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  264. if (!chip)
  265. return -ENOMEM;
  266. chip->spi = spi;
  267. chip->ops = spi->controller_data;
  268. if (chip->ops && chip->ops->setup) {
  269. int ret = chip->ops->setup(spi);
  270. if (ret) {
  271. kfree(chip);
  272. return ret;
  273. }
  274. }
  275. spi_set_ctldata(spi, chip);
  276. }
  277. ep93xx_spi_cs_control(spi, false);
  278. return 0;
  279. }
  280. /**
  281. * ep93xx_spi_transfer() - queue message to be transferred
  282. * @spi: target SPI device
  283. * @msg: message to be transferred
  284. *
  285. * This function is called by SPI device drivers when they are going to transfer
  286. * a new message. It simply puts the message in the queue and schedules
  287. * workqueue to perform the actual transfer later on.
  288. *
  289. * Returns %0 on success and negative error in case of failure.
  290. */
  291. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  292. {
  293. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  294. struct spi_transfer *t;
  295. unsigned long flags;
  296. if (!msg || !msg->complete)
  297. return -EINVAL;
  298. /* first validate each transfer */
  299. list_for_each_entry(t, &msg->transfers, transfer_list) {
  300. if (t->speed_hz && t->speed_hz < espi->min_rate)
  301. return -EINVAL;
  302. }
  303. /*
  304. * Now that we own the message, let's initialize it so that it is
  305. * suitable for us. We use @msg->status to signal whether there was
  306. * error in transfer and @msg->state is used to hold pointer to the
  307. * current transfer (or %NULL if no active current transfer).
  308. */
  309. msg->state = NULL;
  310. msg->status = 0;
  311. msg->actual_length = 0;
  312. spin_lock_irqsave(&espi->lock, flags);
  313. if (!espi->running) {
  314. spin_unlock_irqrestore(&espi->lock, flags);
  315. return -ESHUTDOWN;
  316. }
  317. list_add_tail(&msg->queue, &espi->msg_queue);
  318. queue_work(espi->wq, &espi->msg_work);
  319. spin_unlock_irqrestore(&espi->lock, flags);
  320. return 0;
  321. }
  322. /**
  323. * ep93xx_spi_cleanup() - cleans up master controller specific state
  324. * @spi: SPI device to cleanup
  325. *
  326. * This function releases master controller specific state for given @spi
  327. * device.
  328. */
  329. static void ep93xx_spi_cleanup(struct spi_device *spi)
  330. {
  331. struct ep93xx_spi_chip *chip;
  332. chip = spi_get_ctldata(spi);
  333. if (chip) {
  334. if (chip->ops && chip->ops->cleanup)
  335. chip->ops->cleanup(spi);
  336. spi_set_ctldata(spi, NULL);
  337. kfree(chip);
  338. }
  339. }
  340. /**
  341. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  342. * @espi: ep93xx SPI controller struct
  343. * @chip: chip specific settings
  344. * @bits_per_word: transfer bits_per_word
  345. *
  346. * This function sets up the actual hardware registers with settings given in
  347. * @chip. Note that no validation is done so make sure that callers validate
  348. * settings before calling this.
  349. */
  350. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  351. const struct ep93xx_spi_chip *chip,
  352. u8 bits_per_word)
  353. {
  354. u8 dss = bits_per_word_to_dss(bits_per_word);
  355. u16 cr0;
  356. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  357. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  358. cr0 |= dss;
  359. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  360. chip->spi->mode, chip->div_cpsr, chip->div_scr, dss);
  361. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  362. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  363. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  364. }
  365. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  366. {
  367. if (t->bits_per_word > 8) {
  368. u16 tx_val = 0;
  369. if (t->tx_buf)
  370. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  371. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  372. espi->tx += sizeof(tx_val);
  373. } else {
  374. u8 tx_val = 0;
  375. if (t->tx_buf)
  376. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  377. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  378. espi->tx += sizeof(tx_val);
  379. }
  380. }
  381. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  382. {
  383. if (t->bits_per_word > 8) {
  384. u16 rx_val;
  385. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  386. if (t->rx_buf)
  387. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  388. espi->rx += sizeof(rx_val);
  389. } else {
  390. u8 rx_val;
  391. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  392. if (t->rx_buf)
  393. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  394. espi->rx += sizeof(rx_val);
  395. }
  396. }
  397. /**
  398. * ep93xx_spi_read_write() - perform next RX/TX transfer
  399. * @espi: ep93xx SPI controller struct
  400. *
  401. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  402. * called several times, the whole transfer will be completed. Returns
  403. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  404. *
  405. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  406. * full.
  407. */
  408. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  409. {
  410. struct spi_message *msg = espi->current_msg;
  411. struct spi_transfer *t = msg->state;
  412. /* read as long as RX FIFO has frames in it */
  413. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  414. ep93xx_do_read(espi, t);
  415. espi->fifo_level--;
  416. }
  417. /* write as long as TX FIFO has room */
  418. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  419. ep93xx_do_write(espi, t);
  420. espi->fifo_level++;
  421. }
  422. if (espi->rx == t->len)
  423. return 0;
  424. return -EINPROGRESS;
  425. }
  426. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  427. {
  428. /*
  429. * Now everything is set up for the current transfer. We prime the TX
  430. * FIFO, enable interrupts, and wait for the transfer to complete.
  431. */
  432. if (ep93xx_spi_read_write(espi)) {
  433. ep93xx_spi_enable_interrupts(espi);
  434. wait_for_completion(&espi->wait);
  435. }
  436. }
  437. /**
  438. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  439. * @espi: ep93xx SPI controller struct
  440. * @dir: DMA transfer direction
  441. *
  442. * Function configures the DMA, maps the buffer and prepares the DMA
  443. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  444. * in case of failure.
  445. */
  446. static struct dma_async_tx_descriptor *
  447. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  448. {
  449. struct spi_transfer *t = espi->current_msg->state;
  450. struct dma_async_tx_descriptor *txd;
  451. enum dma_slave_buswidth buswidth;
  452. struct dma_slave_config conf;
  453. struct scatterlist *sg;
  454. struct sg_table *sgt;
  455. struct dma_chan *chan;
  456. const void *buf, *pbuf;
  457. size_t len = t->len;
  458. int i, ret, nents;
  459. if (t->bits_per_word > 8)
  460. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  461. else
  462. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  463. memset(&conf, 0, sizeof(conf));
  464. conf.direction = dir;
  465. if (dir == DMA_DEV_TO_MEM) {
  466. chan = espi->dma_rx;
  467. buf = t->rx_buf;
  468. sgt = &espi->rx_sgt;
  469. conf.src_addr = espi->sspdr_phys;
  470. conf.src_addr_width = buswidth;
  471. } else {
  472. chan = espi->dma_tx;
  473. buf = t->tx_buf;
  474. sgt = &espi->tx_sgt;
  475. conf.dst_addr = espi->sspdr_phys;
  476. conf.dst_addr_width = buswidth;
  477. }
  478. ret = dmaengine_slave_config(chan, &conf);
  479. if (ret)
  480. return ERR_PTR(ret);
  481. /*
  482. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  483. * because we are using @espi->zeropage to provide a zero RX buffer
  484. * for the TX transfers and we have only allocated one page for that.
  485. *
  486. * For performance reasons we allocate a new sg_table only when
  487. * needed. Otherwise we will re-use the current one. Eventually the
  488. * last sg_table is released in ep93xx_spi_release_dma().
  489. */
  490. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  491. if (nents != sgt->nents) {
  492. sg_free_table(sgt);
  493. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  494. if (ret)
  495. return ERR_PTR(ret);
  496. }
  497. pbuf = buf;
  498. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  499. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  500. if (buf) {
  501. sg_set_page(sg, virt_to_page(pbuf), bytes,
  502. offset_in_page(pbuf));
  503. } else {
  504. sg_set_page(sg, virt_to_page(espi->zeropage),
  505. bytes, 0);
  506. }
  507. pbuf += bytes;
  508. len -= bytes;
  509. }
  510. if (WARN_ON(len)) {
  511. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  512. return ERR_PTR(-EINVAL);
  513. }
  514. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  515. if (!nents)
  516. return ERR_PTR(-ENOMEM);
  517. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  518. if (!txd) {
  519. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  520. return ERR_PTR(-ENOMEM);
  521. }
  522. return txd;
  523. }
  524. /**
  525. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  526. * @espi: ep93xx SPI controller struct
  527. * @dir: DMA transfer direction
  528. *
  529. * Function finishes with the DMA transfer. After this, the DMA buffer is
  530. * unmapped.
  531. */
  532. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  533. enum dma_transfer_direction dir)
  534. {
  535. struct dma_chan *chan;
  536. struct sg_table *sgt;
  537. if (dir == DMA_DEV_TO_MEM) {
  538. chan = espi->dma_rx;
  539. sgt = &espi->rx_sgt;
  540. } else {
  541. chan = espi->dma_tx;
  542. sgt = &espi->tx_sgt;
  543. }
  544. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  545. }
  546. static void ep93xx_spi_dma_callback(void *callback_param)
  547. {
  548. complete(callback_param);
  549. }
  550. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  551. {
  552. struct spi_message *msg = espi->current_msg;
  553. struct dma_async_tx_descriptor *rxd, *txd;
  554. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  555. if (IS_ERR(rxd)) {
  556. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  557. msg->status = PTR_ERR(rxd);
  558. return;
  559. }
  560. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  561. if (IS_ERR(txd)) {
  562. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  563. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  564. msg->status = PTR_ERR(txd);
  565. return;
  566. }
  567. /* We are ready when RX is done */
  568. rxd->callback = ep93xx_spi_dma_callback;
  569. rxd->callback_param = &espi->wait;
  570. /* Now submit both descriptors and wait while they finish */
  571. dmaengine_submit(rxd);
  572. dmaengine_submit(txd);
  573. dma_async_issue_pending(espi->dma_rx);
  574. dma_async_issue_pending(espi->dma_tx);
  575. wait_for_completion(&espi->wait);
  576. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  577. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  578. }
  579. /**
  580. * ep93xx_spi_process_transfer() - processes one SPI transfer
  581. * @espi: ep93xx SPI controller struct
  582. * @msg: current message
  583. * @t: transfer to process
  584. *
  585. * This function processes one SPI transfer given in @t. Function waits until
  586. * transfer is complete (may sleep) and updates @msg->status based on whether
  587. * transfer was successfully processed or not.
  588. */
  589. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  590. struct spi_message *msg,
  591. struct spi_transfer *t)
  592. {
  593. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  594. int err;
  595. msg->state = t;
  596. err = ep93xx_spi_calc_divisors(espi, chip, t->speed_hz);
  597. if (err) {
  598. dev_err(&espi->pdev->dev, "failed to adjust speed\n");
  599. msg->status = err;
  600. return;
  601. }
  602. ep93xx_spi_chip_setup(espi, chip, t->bits_per_word);
  603. espi->rx = 0;
  604. espi->tx = 0;
  605. /*
  606. * There is no point of setting up DMA for the transfers which will
  607. * fit into the FIFO and can be transferred with a single interrupt.
  608. * So in these cases we will be using PIO and don't bother for DMA.
  609. */
  610. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  611. ep93xx_spi_dma_transfer(espi);
  612. else
  613. ep93xx_spi_pio_transfer(espi);
  614. /*
  615. * In case of error during transmit, we bail out from processing
  616. * the message.
  617. */
  618. if (msg->status)
  619. return;
  620. msg->actual_length += t->len;
  621. /*
  622. * After this transfer is finished, perform any possible
  623. * post-transfer actions requested by the protocol driver.
  624. */
  625. if (t->delay_usecs) {
  626. set_current_state(TASK_UNINTERRUPTIBLE);
  627. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  628. }
  629. if (t->cs_change) {
  630. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  631. /*
  632. * In case protocol driver is asking us to drop the
  633. * chipselect briefly, we let the scheduler to handle
  634. * any "delay" here.
  635. */
  636. ep93xx_spi_cs_control(msg->spi, false);
  637. cond_resched();
  638. ep93xx_spi_cs_control(msg->spi, true);
  639. }
  640. }
  641. }
  642. /*
  643. * ep93xx_spi_process_message() - process one SPI message
  644. * @espi: ep93xx SPI controller struct
  645. * @msg: message to process
  646. *
  647. * This function processes a single SPI message. We go through all transfers in
  648. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  649. * asserted during the whole message (unless per transfer cs_change is set).
  650. *
  651. * @msg->status contains %0 in case of success or negative error code in case of
  652. * failure.
  653. */
  654. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  655. struct spi_message *msg)
  656. {
  657. unsigned long timeout;
  658. struct spi_transfer *t;
  659. int err;
  660. /*
  661. * Enable the SPI controller and its clock.
  662. */
  663. err = ep93xx_spi_enable(espi);
  664. if (err) {
  665. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  666. msg->status = err;
  667. return;
  668. }
  669. /*
  670. * Just to be sure: flush any data from RX FIFO.
  671. */
  672. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  673. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  674. if (time_after(jiffies, timeout)) {
  675. dev_warn(&espi->pdev->dev,
  676. "timeout while flushing RX FIFO\n");
  677. msg->status = -ETIMEDOUT;
  678. return;
  679. }
  680. ep93xx_spi_read_u16(espi, SSPDR);
  681. }
  682. /*
  683. * We explicitly handle FIFO level. This way we don't have to check TX
  684. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  685. */
  686. espi->fifo_level = 0;
  687. /*
  688. * Assert the chipselect.
  689. */
  690. ep93xx_spi_cs_control(msg->spi, true);
  691. list_for_each_entry(t, &msg->transfers, transfer_list) {
  692. ep93xx_spi_process_transfer(espi, msg, t);
  693. if (msg->status)
  694. break;
  695. }
  696. /*
  697. * Now the whole message is transferred (or failed for some reason). We
  698. * deselect the device and disable the SPI controller.
  699. */
  700. ep93xx_spi_cs_control(msg->spi, false);
  701. ep93xx_spi_disable(espi);
  702. }
  703. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  704. /**
  705. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  706. * @work: work struct
  707. *
  708. * Workqueue worker function. This function is called when there are new
  709. * SPI messages to be processed. Message is taken out from the queue and then
  710. * passed to ep93xx_spi_process_message().
  711. *
  712. * After message is transferred, protocol driver is notified by calling
  713. * @msg->complete(). In case of error, @msg->status is set to negative error
  714. * number, otherwise it contains zero (and @msg->actual_length is updated).
  715. */
  716. static void ep93xx_spi_work(struct work_struct *work)
  717. {
  718. struct ep93xx_spi *espi = work_to_espi(work);
  719. struct spi_message *msg;
  720. spin_lock_irq(&espi->lock);
  721. if (!espi->running || espi->current_msg ||
  722. list_empty(&espi->msg_queue)) {
  723. spin_unlock_irq(&espi->lock);
  724. return;
  725. }
  726. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  727. list_del_init(&msg->queue);
  728. espi->current_msg = msg;
  729. spin_unlock_irq(&espi->lock);
  730. ep93xx_spi_process_message(espi, msg);
  731. /*
  732. * Update the current message and re-schedule ourselves if there are
  733. * more messages in the queue.
  734. */
  735. spin_lock_irq(&espi->lock);
  736. espi->current_msg = NULL;
  737. if (espi->running && !list_empty(&espi->msg_queue))
  738. queue_work(espi->wq, &espi->msg_work);
  739. spin_unlock_irq(&espi->lock);
  740. /* notify the protocol driver that we are done with this message */
  741. msg->complete(msg->context);
  742. }
  743. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  744. {
  745. struct ep93xx_spi *espi = dev_id;
  746. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  747. /*
  748. * If we got ROR (receive overrun) interrupt we know that something is
  749. * wrong. Just abort the message.
  750. */
  751. if (unlikely(irq_status & SSPIIR_RORIS)) {
  752. /* clear the overrun interrupt */
  753. ep93xx_spi_write_u8(espi, SSPICR, 0);
  754. dev_warn(&espi->pdev->dev,
  755. "receive overrun, aborting the message\n");
  756. espi->current_msg->status = -EIO;
  757. } else {
  758. /*
  759. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  760. * simply execute next data transfer.
  761. */
  762. if (ep93xx_spi_read_write(espi)) {
  763. /*
  764. * In normal case, there still is some processing left
  765. * for current transfer. Let's wait for the next
  766. * interrupt then.
  767. */
  768. return IRQ_HANDLED;
  769. }
  770. }
  771. /*
  772. * Current transfer is finished, either with error or with success. In
  773. * any case we disable interrupts and notify the worker to handle
  774. * any post-processing of the message.
  775. */
  776. ep93xx_spi_disable_interrupts(espi);
  777. complete(&espi->wait);
  778. return IRQ_HANDLED;
  779. }
  780. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  781. {
  782. if (ep93xx_dma_chan_is_m2p(chan))
  783. return false;
  784. chan->private = filter_param;
  785. return true;
  786. }
  787. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  788. {
  789. dma_cap_mask_t mask;
  790. int ret;
  791. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  792. if (!espi->zeropage)
  793. return -ENOMEM;
  794. dma_cap_zero(mask);
  795. dma_cap_set(DMA_SLAVE, mask);
  796. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  797. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  798. espi->dma_rx_data.name = "ep93xx-spi-rx";
  799. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  800. &espi->dma_rx_data);
  801. if (!espi->dma_rx) {
  802. ret = -ENODEV;
  803. goto fail_free_page;
  804. }
  805. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  806. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  807. espi->dma_tx_data.name = "ep93xx-spi-tx";
  808. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  809. &espi->dma_tx_data);
  810. if (!espi->dma_tx) {
  811. ret = -ENODEV;
  812. goto fail_release_rx;
  813. }
  814. return 0;
  815. fail_release_rx:
  816. dma_release_channel(espi->dma_rx);
  817. espi->dma_rx = NULL;
  818. fail_free_page:
  819. free_page((unsigned long)espi->zeropage);
  820. return ret;
  821. }
  822. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  823. {
  824. if (espi->dma_rx) {
  825. dma_release_channel(espi->dma_rx);
  826. sg_free_table(&espi->rx_sgt);
  827. }
  828. if (espi->dma_tx) {
  829. dma_release_channel(espi->dma_tx);
  830. sg_free_table(&espi->tx_sgt);
  831. }
  832. if (espi->zeropage)
  833. free_page((unsigned long)espi->zeropage);
  834. }
  835. static int ep93xx_spi_probe(struct platform_device *pdev)
  836. {
  837. struct spi_master *master;
  838. struct ep93xx_spi_info *info;
  839. struct ep93xx_spi *espi;
  840. struct resource *res;
  841. int irq;
  842. int error;
  843. info = pdev->dev.platform_data;
  844. irq = platform_get_irq(pdev, 0);
  845. if (irq < 0) {
  846. dev_err(&pdev->dev, "failed to get irq resources\n");
  847. return -EBUSY;
  848. }
  849. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  850. if (!res) {
  851. dev_err(&pdev->dev, "unable to get iomem resource\n");
  852. return -ENODEV;
  853. }
  854. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  855. if (!master)
  856. return -ENOMEM;
  857. master->setup = ep93xx_spi_setup;
  858. master->transfer = ep93xx_spi_transfer;
  859. master->cleanup = ep93xx_spi_cleanup;
  860. master->bus_num = pdev->id;
  861. master->num_chipselect = info->num_chipselect;
  862. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  863. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  864. platform_set_drvdata(pdev, master);
  865. espi = spi_master_get_devdata(master);
  866. espi->clk = devm_clk_get(&pdev->dev, NULL);
  867. if (IS_ERR(espi->clk)) {
  868. dev_err(&pdev->dev, "unable to get spi clock\n");
  869. error = PTR_ERR(espi->clk);
  870. goto fail_release_master;
  871. }
  872. spin_lock_init(&espi->lock);
  873. init_completion(&espi->wait);
  874. /*
  875. * Calculate maximum and minimum supported clock rates
  876. * for the controller.
  877. */
  878. espi->max_rate = clk_get_rate(espi->clk) / 2;
  879. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  880. espi->pdev = pdev;
  881. espi->sspdr_phys = res->start + SSPDR;
  882. espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
  883. if (IS_ERR(espi->regs_base)) {
  884. error = PTR_ERR(espi->regs_base);
  885. goto fail_release_master;
  886. }
  887. error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
  888. 0, "ep93xx-spi", espi);
  889. if (error) {
  890. dev_err(&pdev->dev, "failed to request irq\n");
  891. goto fail_release_master;
  892. }
  893. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  894. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  895. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  896. if (!espi->wq) {
  897. dev_err(&pdev->dev, "unable to create workqueue\n");
  898. error = -ENOMEM;
  899. goto fail_free_dma;
  900. }
  901. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  902. INIT_LIST_HEAD(&espi->msg_queue);
  903. espi->running = true;
  904. /* make sure that the hardware is disabled */
  905. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  906. error = spi_register_master(master);
  907. if (error) {
  908. dev_err(&pdev->dev, "failed to register SPI master\n");
  909. goto fail_free_queue;
  910. }
  911. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  912. (unsigned long)res->start, irq);
  913. return 0;
  914. fail_free_queue:
  915. destroy_workqueue(espi->wq);
  916. fail_free_dma:
  917. ep93xx_spi_release_dma(espi);
  918. fail_release_master:
  919. spi_master_put(master);
  920. return error;
  921. }
  922. static int ep93xx_spi_remove(struct platform_device *pdev)
  923. {
  924. struct spi_master *master = platform_get_drvdata(pdev);
  925. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  926. spin_lock_irq(&espi->lock);
  927. espi->running = false;
  928. spin_unlock_irq(&espi->lock);
  929. destroy_workqueue(espi->wq);
  930. /*
  931. * Complete remaining messages with %-ESHUTDOWN status.
  932. */
  933. spin_lock_irq(&espi->lock);
  934. while (!list_empty(&espi->msg_queue)) {
  935. struct spi_message *msg;
  936. msg = list_first_entry(&espi->msg_queue,
  937. struct spi_message, queue);
  938. list_del_init(&msg->queue);
  939. msg->status = -ESHUTDOWN;
  940. spin_unlock_irq(&espi->lock);
  941. msg->complete(msg->context);
  942. spin_lock_irq(&espi->lock);
  943. }
  944. spin_unlock_irq(&espi->lock);
  945. ep93xx_spi_release_dma(espi);
  946. spi_unregister_master(master);
  947. return 0;
  948. }
  949. static struct platform_driver ep93xx_spi_driver = {
  950. .driver = {
  951. .name = "ep93xx-spi",
  952. .owner = THIS_MODULE,
  953. },
  954. .probe = ep93xx_spi_probe,
  955. .remove = ep93xx_spi_remove,
  956. };
  957. module_platform_driver(ep93xx_spi_driver);
  958. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  959. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  960. MODULE_LICENSE("GPL");
  961. MODULE_ALIAS("platform:ep93xx-spi");