spi-rspi.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * SH RSPI driver
  3. *
  4. * Copyright (C) 2012 Renesas Solutions Corp.
  5. *
  6. * Based on spi-sh.c:
  7. * Copyright (C) 2011 Renesas Solutions Corp.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. */
  23. #include <linux/module.h>
  24. #include <linux/kernel.h>
  25. #include <linux/sched.h>
  26. #include <linux/errno.h>
  27. #include <linux/list.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/io.h>
  32. #include <linux/clk.h>
  33. #include <linux/dmaengine.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/sh_dma.h>
  36. #include <linux/spi/spi.h>
  37. #include <linux/spi/rspi.h>
  38. #define RSPI_SPCR 0x00
  39. #define RSPI_SSLP 0x01
  40. #define RSPI_SPPCR 0x02
  41. #define RSPI_SPSR 0x03
  42. #define RSPI_SPDR 0x04
  43. #define RSPI_SPSCR 0x08
  44. #define RSPI_SPSSR 0x09
  45. #define RSPI_SPBR 0x0a
  46. #define RSPI_SPDCR 0x0b
  47. #define RSPI_SPCKD 0x0c
  48. #define RSPI_SSLND 0x0d
  49. #define RSPI_SPND 0x0e
  50. #define RSPI_SPCR2 0x0f
  51. #define RSPI_SPCMD0 0x10
  52. #define RSPI_SPCMD1 0x12
  53. #define RSPI_SPCMD2 0x14
  54. #define RSPI_SPCMD3 0x16
  55. #define RSPI_SPCMD4 0x18
  56. #define RSPI_SPCMD5 0x1a
  57. #define RSPI_SPCMD6 0x1c
  58. #define RSPI_SPCMD7 0x1e
  59. /*qspi only */
  60. #define QSPI_SPBFCR 0x18
  61. #define QSPI_SPBDCR 0x1a
  62. #define QSPI_SPBMUL0 0x1c
  63. #define QSPI_SPBMUL1 0x20
  64. #define QSPI_SPBMUL2 0x24
  65. #define QSPI_SPBMUL3 0x28
  66. /* SPCR */
  67. #define SPCR_SPRIE 0x80
  68. #define SPCR_SPE 0x40
  69. #define SPCR_SPTIE 0x20
  70. #define SPCR_SPEIE 0x10
  71. #define SPCR_MSTR 0x08
  72. #define SPCR_MODFEN 0x04
  73. #define SPCR_TXMD 0x02
  74. #define SPCR_SPMS 0x01
  75. /* SSLP */
  76. #define SSLP_SSL1P 0x02
  77. #define SSLP_SSL0P 0x01
  78. /* SPPCR */
  79. #define SPPCR_MOIFE 0x20
  80. #define SPPCR_MOIFV 0x10
  81. #define SPPCR_SPOM 0x04
  82. #define SPPCR_SPLP2 0x02
  83. #define SPPCR_SPLP 0x01
  84. /* SPSR */
  85. #define SPSR_SPRF 0x80
  86. #define SPSR_SPTEF 0x20
  87. #define SPSR_PERF 0x08
  88. #define SPSR_MODF 0x04
  89. #define SPSR_IDLNF 0x02
  90. #define SPSR_OVRF 0x01
  91. /* SPSCR */
  92. #define SPSCR_SPSLN_MASK 0x07
  93. /* SPSSR */
  94. #define SPSSR_SPECM_MASK 0x70
  95. #define SPSSR_SPCP_MASK 0x07
  96. /* SPDCR */
  97. #define SPDCR_SPLW 0x20
  98. #define SPDCR_SPRDTD 0x10
  99. #define SPDCR_SLSEL1 0x08
  100. #define SPDCR_SLSEL0 0x04
  101. #define SPDCR_SLSEL_MASK 0x0c
  102. #define SPDCR_SPFC1 0x02
  103. #define SPDCR_SPFC0 0x01
  104. /* SPCKD */
  105. #define SPCKD_SCKDL_MASK 0x07
  106. /* SSLND */
  107. #define SSLND_SLNDL_MASK 0x07
  108. /* SPND */
  109. #define SPND_SPNDL_MASK 0x07
  110. /* SPCR2 */
  111. #define SPCR2_PTE 0x08
  112. #define SPCR2_SPIE 0x04
  113. #define SPCR2_SPOE 0x02
  114. #define SPCR2_SPPE 0x01
  115. /* SPCMDn */
  116. #define SPCMD_SCKDEN 0x8000
  117. #define SPCMD_SLNDEN 0x4000
  118. #define SPCMD_SPNDEN 0x2000
  119. #define SPCMD_LSBF 0x1000
  120. #define SPCMD_SPB_MASK 0x0f00
  121. #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
  122. #define SPCMD_SPB_8BIT 0x0000 /* qspi only */
  123. #define SPCMD_SPB_16BIT 0x0100
  124. #define SPCMD_SPB_20BIT 0x0000
  125. #define SPCMD_SPB_24BIT 0x0100
  126. #define SPCMD_SPB_32BIT 0x0200
  127. #define SPCMD_SSLKP 0x0080
  128. #define SPCMD_SSLA_MASK 0x0030
  129. #define SPCMD_BRDV_MASK 0x000c
  130. #define SPCMD_CPOL 0x0002
  131. #define SPCMD_CPHA 0x0001
  132. /* SPBFCR */
  133. #define SPBFCR_TXRST 0x80 /* qspi only */
  134. #define SPBFCR_RXRST 0x40 /* qspi only */
  135. struct rspi_data {
  136. void __iomem *addr;
  137. u32 max_speed_hz;
  138. struct spi_master *master;
  139. struct list_head queue;
  140. struct work_struct ws;
  141. wait_queue_head_t wait;
  142. spinlock_t lock;
  143. struct clk *clk;
  144. unsigned char spsr;
  145. const struct spi_ops *ops;
  146. /* for dmaengine */
  147. struct dma_chan *chan_tx;
  148. struct dma_chan *chan_rx;
  149. int irq;
  150. unsigned dma_width_16bit:1;
  151. unsigned dma_callbacked:1;
  152. };
  153. static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
  154. {
  155. iowrite8(data, rspi->addr + offset);
  156. }
  157. static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
  158. {
  159. iowrite16(data, rspi->addr + offset);
  160. }
  161. static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
  162. {
  163. iowrite32(data, rspi->addr + offset);
  164. }
  165. static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
  166. {
  167. return ioread8(rspi->addr + offset);
  168. }
  169. static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
  170. {
  171. return ioread16(rspi->addr + offset);
  172. }
  173. /* optional functions */
  174. struct spi_ops {
  175. int (*set_config_register)(struct rspi_data *rspi, int access_size);
  176. int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
  177. struct spi_transfer *t);
  178. int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
  179. struct spi_transfer *t);
  180. };
  181. /*
  182. * functions for RSPI
  183. */
  184. static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
  185. {
  186. int spbr;
  187. /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
  188. rspi_write8(rspi, 0x00, RSPI_SPPCR);
  189. /* Sets transfer bit rate */
  190. spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
  191. rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
  192. /* Sets number of frames to be used: 1 frame */
  193. rspi_write8(rspi, 0x00, RSPI_SPDCR);
  194. /* Sets RSPCK, SSL, next-access delay value */
  195. rspi_write8(rspi, 0x00, RSPI_SPCKD);
  196. rspi_write8(rspi, 0x00, RSPI_SSLND);
  197. rspi_write8(rspi, 0x00, RSPI_SPND);
  198. /* Sets parity, interrupt mask */
  199. rspi_write8(rspi, 0x00, RSPI_SPCR2);
  200. /* Sets SPCMD */
  201. rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
  202. RSPI_SPCMD0);
  203. /* Sets RSPI mode */
  204. rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
  205. return 0;
  206. }
  207. /*
  208. * functions for QSPI
  209. */
  210. static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
  211. {
  212. u16 spcmd;
  213. int spbr;
  214. /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
  215. rspi_write8(rspi, 0x00, RSPI_SPPCR);
  216. /* Sets transfer bit rate */
  217. spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
  218. rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
  219. /* Sets number of frames to be used: 1 frame */
  220. rspi_write8(rspi, 0x00, RSPI_SPDCR);
  221. /* Sets RSPCK, SSL, next-access delay value */
  222. rspi_write8(rspi, 0x00, RSPI_SPCKD);
  223. rspi_write8(rspi, 0x00, RSPI_SSLND);
  224. rspi_write8(rspi, 0x00, RSPI_SPND);
  225. /* Data Length Setting */
  226. if (access_size == 8)
  227. spcmd = SPCMD_SPB_8BIT;
  228. else if (access_size == 16)
  229. spcmd = SPCMD_SPB_16BIT;
  230. else if (access_size == 32)
  231. spcmd = SPCMD_SPB_32BIT;
  232. spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
  233. /* Resets transfer data length */
  234. rspi_write32(rspi, 0, QSPI_SPBMUL0);
  235. /* Resets transmit and receive buffer */
  236. rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
  237. /* Sets buffer to allow normal operation */
  238. rspi_write8(rspi, 0x00, QSPI_SPBFCR);
  239. /* Sets SPCMD */
  240. rspi_write16(rspi, spcmd, RSPI_SPCMD0);
  241. /* Enables SPI function in a master mode */
  242. rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
  243. return 0;
  244. }
  245. #define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
  246. static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
  247. {
  248. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
  249. }
  250. static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
  251. {
  252. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
  253. }
  254. static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
  255. u8 enable_bit)
  256. {
  257. int ret;
  258. rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
  259. rspi_enable_irq(rspi, enable_bit);
  260. ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
  261. if (ret == 0 && !(rspi->spsr & wait_mask))
  262. return -ETIMEDOUT;
  263. return 0;
  264. }
  265. static void rspi_assert_ssl(struct rspi_data *rspi)
  266. {
  267. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
  268. }
  269. static void rspi_negate_ssl(struct rspi_data *rspi)
  270. {
  271. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
  272. }
  273. static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
  274. struct spi_transfer *t)
  275. {
  276. int remain = t->len;
  277. u8 *data;
  278. data = (u8 *)t->tx_buf;
  279. while (remain > 0) {
  280. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
  281. RSPI_SPCR);
  282. if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
  283. dev_err(&rspi->master->dev,
  284. "%s: tx empty timeout\n", __func__);
  285. return -ETIMEDOUT;
  286. }
  287. rspi_write16(rspi, *data, RSPI_SPDR);
  288. data++;
  289. remain--;
  290. }
  291. /* Waiting for the last transmition */
  292. rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
  293. return 0;
  294. }
  295. static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
  296. struct spi_transfer *t)
  297. {
  298. int remain = t->len;
  299. u8 *data;
  300. rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
  301. rspi_write8(rspi, 0x00, QSPI_SPBFCR);
  302. data = (u8 *)t->tx_buf;
  303. while (remain > 0) {
  304. if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
  305. dev_err(&rspi->master->dev,
  306. "%s: tx empty timeout\n", __func__);
  307. return -ETIMEDOUT;
  308. }
  309. rspi_write8(rspi, *data++, RSPI_SPDR);
  310. if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
  311. dev_err(&rspi->master->dev,
  312. "%s: receive timeout\n", __func__);
  313. return -ETIMEDOUT;
  314. }
  315. rspi_read8(rspi, RSPI_SPDR);
  316. remain--;
  317. }
  318. /* Waiting for the last transmition */
  319. rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
  320. return 0;
  321. }
  322. #define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
  323. static void rspi_dma_complete(void *arg)
  324. {
  325. struct rspi_data *rspi = arg;
  326. rspi->dma_callbacked = 1;
  327. wake_up_interruptible(&rspi->wait);
  328. }
  329. static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
  330. struct dma_chan *chan,
  331. enum dma_transfer_direction dir)
  332. {
  333. sg_init_table(sg, 1);
  334. sg_set_buf(sg, buf, len);
  335. sg_dma_len(sg) = len;
  336. return dma_map_sg(chan->device->dev, sg, 1, dir);
  337. }
  338. static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
  339. enum dma_transfer_direction dir)
  340. {
  341. dma_unmap_sg(chan->device->dev, sg, 1, dir);
  342. }
  343. static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
  344. {
  345. u16 *dst = buf;
  346. const u8 *src = data;
  347. while (len) {
  348. *dst++ = (u16)(*src++);
  349. len--;
  350. }
  351. }
  352. static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
  353. {
  354. u8 *dst = buf;
  355. const u16 *src = data;
  356. while (len) {
  357. *dst++ = (u8)*src++;
  358. len--;
  359. }
  360. }
  361. static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
  362. {
  363. struct scatterlist sg;
  364. void *buf = NULL;
  365. struct dma_async_tx_descriptor *desc;
  366. unsigned len;
  367. int ret = 0;
  368. if (rspi->dma_width_16bit) {
  369. /*
  370. * If DMAC bus width is 16-bit, the driver allocates a dummy
  371. * buffer. And, the driver converts original data into the
  372. * DMAC data as the following format:
  373. * original data: 1st byte, 2nd byte ...
  374. * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
  375. */
  376. len = t->len * 2;
  377. buf = kmalloc(len, GFP_KERNEL);
  378. if (!buf)
  379. return -ENOMEM;
  380. rspi_memory_to_8bit(buf, t->tx_buf, t->len);
  381. } else {
  382. len = t->len;
  383. buf = (void *)t->tx_buf;
  384. }
  385. if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
  386. ret = -EFAULT;
  387. goto end_nomap;
  388. }
  389. desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
  390. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  391. if (!desc) {
  392. ret = -EIO;
  393. goto end;
  394. }
  395. /*
  396. * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
  397. * called. So, this driver disables the IRQ while DMA transfer.
  398. */
  399. disable_irq(rspi->irq);
  400. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
  401. rspi_enable_irq(rspi, SPCR_SPTIE);
  402. rspi->dma_callbacked = 0;
  403. desc->callback = rspi_dma_complete;
  404. desc->callback_param = rspi;
  405. dmaengine_submit(desc);
  406. dma_async_issue_pending(rspi->chan_tx);
  407. ret = wait_event_interruptible_timeout(rspi->wait,
  408. rspi->dma_callbacked, HZ);
  409. if (ret > 0 && rspi->dma_callbacked)
  410. ret = 0;
  411. else if (!ret)
  412. ret = -ETIMEDOUT;
  413. rspi_disable_irq(rspi, SPCR_SPTIE);
  414. enable_irq(rspi->irq);
  415. end:
  416. rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
  417. end_nomap:
  418. if (rspi->dma_width_16bit)
  419. kfree(buf);
  420. return ret;
  421. }
  422. static void rspi_receive_init(struct rspi_data *rspi)
  423. {
  424. unsigned char spsr;
  425. spsr = rspi_read8(rspi, RSPI_SPSR);
  426. if (spsr & SPSR_SPRF)
  427. rspi_read16(rspi, RSPI_SPDR); /* dummy read */
  428. if (spsr & SPSR_OVRF)
  429. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
  430. RSPI_SPCR);
  431. }
  432. static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
  433. struct spi_transfer *t)
  434. {
  435. int remain = t->len;
  436. u8 *data;
  437. rspi_receive_init(rspi);
  438. data = (u8 *)t->rx_buf;
  439. while (remain > 0) {
  440. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
  441. RSPI_SPCR);
  442. if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
  443. dev_err(&rspi->master->dev,
  444. "%s: tx empty timeout\n", __func__);
  445. return -ETIMEDOUT;
  446. }
  447. /* dummy write for generate clock */
  448. rspi_write16(rspi, 0x00, RSPI_SPDR);
  449. if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
  450. dev_err(&rspi->master->dev,
  451. "%s: receive timeout\n", __func__);
  452. return -ETIMEDOUT;
  453. }
  454. /* SPDR allows 16 or 32-bit access only */
  455. *data = (u8)rspi_read16(rspi, RSPI_SPDR);
  456. data++;
  457. remain--;
  458. }
  459. return 0;
  460. }
  461. static void qspi_receive_init(struct rspi_data *rspi)
  462. {
  463. unsigned char spsr;
  464. spsr = rspi_read8(rspi, RSPI_SPSR);
  465. if (spsr & SPSR_SPRF)
  466. rspi_read8(rspi, RSPI_SPDR); /* dummy read */
  467. rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
  468. rspi_write8(rspi, 0x00, QSPI_SPBFCR);
  469. }
  470. static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
  471. struct spi_transfer *t)
  472. {
  473. int remain = t->len;
  474. u8 *data;
  475. qspi_receive_init(rspi);
  476. data = (u8 *)t->rx_buf;
  477. while (remain > 0) {
  478. if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
  479. dev_err(&rspi->master->dev,
  480. "%s: tx empty timeout\n", __func__);
  481. return -ETIMEDOUT;
  482. }
  483. /* dummy write for generate clock */
  484. rspi_write8(rspi, 0x00, RSPI_SPDR);
  485. if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
  486. dev_err(&rspi->master->dev,
  487. "%s: receive timeout\n", __func__);
  488. return -ETIMEDOUT;
  489. }
  490. /* SPDR allows 8, 16 or 32-bit access */
  491. *data++ = rspi_read8(rspi, RSPI_SPDR);
  492. remain--;
  493. }
  494. return 0;
  495. }
  496. #define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
  497. static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
  498. {
  499. struct scatterlist sg, sg_dummy;
  500. void *dummy = NULL, *rx_buf = NULL;
  501. struct dma_async_tx_descriptor *desc, *desc_dummy;
  502. unsigned len;
  503. int ret = 0;
  504. if (rspi->dma_width_16bit) {
  505. /*
  506. * If DMAC bus width is 16-bit, the driver allocates a dummy
  507. * buffer. And, finally the driver converts the DMAC data into
  508. * actual data as the following format:
  509. * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
  510. * actual data: 1st byte, 2nd byte ...
  511. */
  512. len = t->len * 2;
  513. rx_buf = kmalloc(len, GFP_KERNEL);
  514. if (!rx_buf)
  515. return -ENOMEM;
  516. } else {
  517. len = t->len;
  518. rx_buf = t->rx_buf;
  519. }
  520. /* prepare dummy transfer to generate SPI clocks */
  521. dummy = kzalloc(len, GFP_KERNEL);
  522. if (!dummy) {
  523. ret = -ENOMEM;
  524. goto end_nomap;
  525. }
  526. if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
  527. DMA_TO_DEVICE)) {
  528. ret = -EFAULT;
  529. goto end_nomap;
  530. }
  531. desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
  532. DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  533. if (!desc_dummy) {
  534. ret = -EIO;
  535. goto end_dummy_mapped;
  536. }
  537. /* prepare receive transfer */
  538. if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
  539. DMA_FROM_DEVICE)) {
  540. ret = -EFAULT;
  541. goto end_dummy_mapped;
  542. }
  543. desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
  544. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  545. if (!desc) {
  546. ret = -EIO;
  547. goto end;
  548. }
  549. rspi_receive_init(rspi);
  550. /*
  551. * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
  552. * called. So, this driver disables the IRQ while DMA transfer.
  553. */
  554. disable_irq(rspi->irq);
  555. rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
  556. rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
  557. rspi->dma_callbacked = 0;
  558. desc->callback = rspi_dma_complete;
  559. desc->callback_param = rspi;
  560. dmaengine_submit(desc);
  561. dma_async_issue_pending(rspi->chan_rx);
  562. desc_dummy->callback = NULL; /* No callback */
  563. dmaengine_submit(desc_dummy);
  564. dma_async_issue_pending(rspi->chan_tx);
  565. ret = wait_event_interruptible_timeout(rspi->wait,
  566. rspi->dma_callbacked, HZ);
  567. if (ret > 0 && rspi->dma_callbacked)
  568. ret = 0;
  569. else if (!ret)
  570. ret = -ETIMEDOUT;
  571. rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
  572. enable_irq(rspi->irq);
  573. end:
  574. rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
  575. end_dummy_mapped:
  576. rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
  577. end_nomap:
  578. if (rspi->dma_width_16bit) {
  579. if (!ret)
  580. rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
  581. kfree(rx_buf);
  582. }
  583. kfree(dummy);
  584. return ret;
  585. }
  586. static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
  587. {
  588. if (t->tx_buf && rspi->chan_tx)
  589. return 1;
  590. /* If the module receives data by DMAC, it also needs TX DMAC */
  591. if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
  592. return 1;
  593. return 0;
  594. }
  595. static void rspi_work(struct work_struct *work)
  596. {
  597. struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
  598. struct spi_message *mesg;
  599. struct spi_transfer *t;
  600. unsigned long flags;
  601. int ret;
  602. while (1) {
  603. spin_lock_irqsave(&rspi->lock, flags);
  604. if (list_empty(&rspi->queue)) {
  605. spin_unlock_irqrestore(&rspi->lock, flags);
  606. break;
  607. }
  608. mesg = list_entry(rspi->queue.next, struct spi_message, queue);
  609. list_del_init(&mesg->queue);
  610. spin_unlock_irqrestore(&rspi->lock, flags);
  611. rspi_assert_ssl(rspi);
  612. list_for_each_entry(t, &mesg->transfers, transfer_list) {
  613. if (t->tx_buf) {
  614. if (rspi_is_dma(rspi, t))
  615. ret = rspi_send_dma(rspi, t);
  616. else
  617. ret = send_pio(rspi, mesg, t);
  618. if (ret < 0)
  619. goto error;
  620. }
  621. if (t->rx_buf) {
  622. if (rspi_is_dma(rspi, t))
  623. ret = rspi_receive_dma(rspi, t);
  624. else
  625. ret = receive_pio(rspi, mesg, t);
  626. if (ret < 0)
  627. goto error;
  628. }
  629. mesg->actual_length += t->len;
  630. }
  631. rspi_negate_ssl(rspi);
  632. mesg->status = 0;
  633. mesg->complete(mesg->context);
  634. }
  635. return;
  636. error:
  637. mesg->status = ret;
  638. mesg->complete(mesg->context);
  639. }
  640. static int rspi_setup(struct spi_device *spi)
  641. {
  642. struct rspi_data *rspi = spi_master_get_devdata(spi->master);
  643. if (!spi->bits_per_word)
  644. spi->bits_per_word = 8;
  645. rspi->max_speed_hz = spi->max_speed_hz;
  646. set_config_register(rspi, 8);
  647. return 0;
  648. }
  649. static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
  650. {
  651. struct rspi_data *rspi = spi_master_get_devdata(spi->master);
  652. unsigned long flags;
  653. mesg->actual_length = 0;
  654. mesg->status = -EINPROGRESS;
  655. spin_lock_irqsave(&rspi->lock, flags);
  656. list_add_tail(&mesg->queue, &rspi->queue);
  657. schedule_work(&rspi->ws);
  658. spin_unlock_irqrestore(&rspi->lock, flags);
  659. return 0;
  660. }
  661. static void rspi_cleanup(struct spi_device *spi)
  662. {
  663. }
  664. static irqreturn_t rspi_irq(int irq, void *_sr)
  665. {
  666. struct rspi_data *rspi = (struct rspi_data *)_sr;
  667. unsigned long spsr;
  668. irqreturn_t ret = IRQ_NONE;
  669. unsigned char disable_irq = 0;
  670. rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
  671. if (spsr & SPSR_SPRF)
  672. disable_irq |= SPCR_SPRIE;
  673. if (spsr & SPSR_SPTEF)
  674. disable_irq |= SPCR_SPTIE;
  675. if (disable_irq) {
  676. ret = IRQ_HANDLED;
  677. rspi_disable_irq(rspi, disable_irq);
  678. wake_up(&rspi->wait);
  679. }
  680. return ret;
  681. }
  682. static int rspi_request_dma(struct rspi_data *rspi,
  683. struct platform_device *pdev)
  684. {
  685. struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
  686. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  687. dma_cap_mask_t mask;
  688. struct dma_slave_config cfg;
  689. int ret;
  690. if (!res || !rspi_pd)
  691. return 0; /* The driver assumes no error. */
  692. rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
  693. /* If the module receives data by DMAC, it also needs TX DMAC */
  694. if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
  695. dma_cap_zero(mask);
  696. dma_cap_set(DMA_SLAVE, mask);
  697. rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
  698. (void *)rspi_pd->dma_rx_id);
  699. if (rspi->chan_rx) {
  700. cfg.slave_id = rspi_pd->dma_rx_id;
  701. cfg.direction = DMA_DEV_TO_MEM;
  702. cfg.dst_addr = 0;
  703. cfg.src_addr = res->start + RSPI_SPDR;
  704. ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
  705. if (!ret)
  706. dev_info(&pdev->dev, "Use DMA when rx.\n");
  707. else
  708. return ret;
  709. }
  710. }
  711. if (rspi_pd->dma_tx_id) {
  712. dma_cap_zero(mask);
  713. dma_cap_set(DMA_SLAVE, mask);
  714. rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
  715. (void *)rspi_pd->dma_tx_id);
  716. if (rspi->chan_tx) {
  717. cfg.slave_id = rspi_pd->dma_tx_id;
  718. cfg.direction = DMA_MEM_TO_DEV;
  719. cfg.dst_addr = res->start + RSPI_SPDR;
  720. cfg.src_addr = 0;
  721. ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
  722. if (!ret)
  723. dev_info(&pdev->dev, "Use DMA when tx\n");
  724. else
  725. return ret;
  726. }
  727. }
  728. return 0;
  729. }
  730. static void rspi_release_dma(struct rspi_data *rspi)
  731. {
  732. if (rspi->chan_tx)
  733. dma_release_channel(rspi->chan_tx);
  734. if (rspi->chan_rx)
  735. dma_release_channel(rspi->chan_rx);
  736. }
  737. static int rspi_remove(struct platform_device *pdev)
  738. {
  739. struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev));
  740. spi_unregister_master(rspi->master);
  741. rspi_release_dma(rspi);
  742. free_irq(platform_get_irq(pdev, 0), rspi);
  743. clk_put(rspi->clk);
  744. iounmap(rspi->addr);
  745. spi_master_put(rspi->master);
  746. return 0;
  747. }
  748. static int rspi_probe(struct platform_device *pdev)
  749. {
  750. struct resource *res;
  751. struct spi_master *master;
  752. struct rspi_data *rspi;
  753. int ret, irq;
  754. char clk_name[16];
  755. struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
  756. const struct spi_ops *ops;
  757. const struct platform_device_id *id_entry = pdev->id_entry;
  758. ops = (struct spi_ops *)id_entry->driver_data;
  759. /* ops parameter check */
  760. if (!ops->set_config_register) {
  761. dev_err(&pdev->dev, "there is no set_config_register\n");
  762. return -ENODEV;
  763. }
  764. /* get base addr */
  765. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  766. if (unlikely(res == NULL)) {
  767. dev_err(&pdev->dev, "invalid resource\n");
  768. return -EINVAL;
  769. }
  770. irq = platform_get_irq(pdev, 0);
  771. if (irq < 0) {
  772. dev_err(&pdev->dev, "platform_get_irq error\n");
  773. return -ENODEV;
  774. }
  775. master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
  776. if (master == NULL) {
  777. dev_err(&pdev->dev, "spi_alloc_master error.\n");
  778. return -ENOMEM;
  779. }
  780. rspi = spi_master_get_devdata(master);
  781. platform_set_drvdata(pdev, rspi);
  782. rspi->ops = ops;
  783. rspi->master = master;
  784. rspi->addr = ioremap(res->start, resource_size(res));
  785. if (rspi->addr == NULL) {
  786. dev_err(&pdev->dev, "ioremap error.\n");
  787. ret = -ENOMEM;
  788. goto error1;
  789. }
  790. snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
  791. rspi->clk = clk_get(&pdev->dev, clk_name);
  792. if (IS_ERR(rspi->clk)) {
  793. dev_err(&pdev->dev, "cannot get clock\n");
  794. ret = PTR_ERR(rspi->clk);
  795. goto error2;
  796. }
  797. clk_enable(rspi->clk);
  798. INIT_LIST_HEAD(&rspi->queue);
  799. spin_lock_init(&rspi->lock);
  800. INIT_WORK(&rspi->ws, rspi_work);
  801. init_waitqueue_head(&rspi->wait);
  802. master->num_chipselect = rspi_pd->num_chipselect;
  803. if (!master->num_chipselect)
  804. master->num_chipselect = 2; /* default */
  805. master->bus_num = pdev->id;
  806. master->setup = rspi_setup;
  807. master->transfer = rspi_transfer;
  808. master->cleanup = rspi_cleanup;
  809. ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
  810. if (ret < 0) {
  811. dev_err(&pdev->dev, "request_irq error\n");
  812. goto error3;
  813. }
  814. rspi->irq = irq;
  815. ret = rspi_request_dma(rspi, pdev);
  816. if (ret < 0) {
  817. dev_err(&pdev->dev, "rspi_request_dma failed.\n");
  818. goto error4;
  819. }
  820. ret = spi_register_master(master);
  821. if (ret < 0) {
  822. dev_err(&pdev->dev, "spi_register_master error.\n");
  823. goto error4;
  824. }
  825. dev_info(&pdev->dev, "probed\n");
  826. return 0;
  827. error4:
  828. rspi_release_dma(rspi);
  829. free_irq(irq, rspi);
  830. error3:
  831. clk_put(rspi->clk);
  832. error2:
  833. iounmap(rspi->addr);
  834. error1:
  835. spi_master_put(master);
  836. return ret;
  837. }
  838. static struct spi_ops rspi_ops = {
  839. .set_config_register = rspi_set_config_register,
  840. .send_pio = rspi_send_pio,
  841. .receive_pio = rspi_receive_pio,
  842. };
  843. static struct spi_ops qspi_ops = {
  844. .set_config_register = qspi_set_config_register,
  845. .send_pio = qspi_send_pio,
  846. .receive_pio = qspi_receive_pio,
  847. };
  848. static struct platform_device_id spi_driver_ids[] = {
  849. { "rspi", (kernel_ulong_t)&rspi_ops },
  850. { "qspi", (kernel_ulong_t)&qspi_ops },
  851. {},
  852. };
  853. MODULE_DEVICE_TABLE(platform, spi_driver_ids);
  854. static struct platform_driver rspi_driver = {
  855. .probe = rspi_probe,
  856. .remove = rspi_remove,
  857. .id_table = spi_driver_ids,
  858. .driver = {
  859. .name = "renesas_spi",
  860. .owner = THIS_MODULE,
  861. },
  862. };
  863. module_platform_driver(rspi_driver);
  864. MODULE_DESCRIPTION("Renesas RSPI bus driver");
  865. MODULE_LICENSE("GPL v2");
  866. MODULE_AUTHOR("Yoshihiro Shimoda");
  867. MODULE_ALIAS("platform:rspi");