spi-sirf.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * SPI bus driver for CSR SiRFprimaII
  3. *
  4. * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5. *
  6. * Licensed under GPLv2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/clk.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/of.h>
  15. #include <linux/bitops.h>
  16. #include <linux/err.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/of_gpio.h>
  19. #include <linux/spi/spi.h>
  20. #include <linux/spi/spi_bitbang.h>
  21. #include <linux/dmaengine.h>
  22. #include <linux/dma-direction.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/sirfsoc_dma.h>
  25. #define DRIVER_NAME "sirfsoc_spi"
  26. #define SIRFSOC_SPI_CTRL 0x0000
  27. #define SIRFSOC_SPI_CMD 0x0004
  28. #define SIRFSOC_SPI_TX_RX_EN 0x0008
  29. #define SIRFSOC_SPI_INT_EN 0x000C
  30. #define SIRFSOC_SPI_INT_STATUS 0x0010
  31. #define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
  32. #define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
  33. #define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
  34. #define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
  35. #define SIRFSOC_SPI_TXFIFO_OP 0x0110
  36. #define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
  37. #define SIRFSOC_SPI_TXFIFO_DATA 0x0118
  38. #define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
  39. #define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
  40. #define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
  41. #define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
  42. #define SIRFSOC_SPI_RXFIFO_OP 0x0130
  43. #define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
  44. #define SIRFSOC_SPI_RXFIFO_DATA 0x0138
  45. #define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
  46. /* SPI CTRL register defines */
  47. #define SIRFSOC_SPI_SLV_MODE BIT(16)
  48. #define SIRFSOC_SPI_CMD_MODE BIT(17)
  49. #define SIRFSOC_SPI_CS_IO_OUT BIT(18)
  50. #define SIRFSOC_SPI_CS_IO_MODE BIT(19)
  51. #define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
  52. #define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
  53. #define SIRFSOC_SPI_TRAN_MSB BIT(22)
  54. #define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
  55. #define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
  56. #define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
  57. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
  58. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
  59. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
  60. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
  61. #define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
  62. #define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
  63. #define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
  64. /* Interrupt Enable */
  65. #define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
  66. #define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
  67. #define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
  68. #define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
  69. #define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
  70. #define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
  71. #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
  72. #define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
  73. #define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
  74. #define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
  75. #define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
  76. #define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
  77. /* Interrupt status */
  78. #define SIRFSOC_SPI_RX_DONE BIT(0)
  79. #define SIRFSOC_SPI_TX_DONE BIT(1)
  80. #define SIRFSOC_SPI_RX_OFLOW BIT(2)
  81. #define SIRFSOC_SPI_TX_UFLOW BIT(3)
  82. #define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
  83. #define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
  84. #define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
  85. #define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
  86. #define SIRFSOC_SPI_FRM_END BIT(10)
  87. /* TX RX enable */
  88. #define SIRFSOC_SPI_RX_EN BIT(0)
  89. #define SIRFSOC_SPI_TX_EN BIT(1)
  90. #define SIRFSOC_SPI_CMD_TX_EN BIT(2)
  91. #define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
  92. #define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
  93. /* FIFO OPs */
  94. #define SIRFSOC_SPI_FIFO_RESET BIT(0)
  95. #define SIRFSOC_SPI_FIFO_START BIT(1)
  96. /* FIFO CTRL */
  97. #define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
  98. #define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
  99. #define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
  100. /* FIFO Status */
  101. #define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
  102. #define SIRFSOC_SPI_FIFO_FULL BIT(8)
  103. #define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
  104. /* 256 bytes rx/tx FIFO */
  105. #define SIRFSOC_SPI_FIFO_SIZE 256
  106. #define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
  107. #define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
  108. #define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
  109. #define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
  110. #define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
  111. /*
  112. * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
  113. * due to the limitation of dma controller
  114. */
  115. #define ALIGNED(x) (!((u32)x & 0x3))
  116. #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
  117. ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
  118. struct sirfsoc_spi {
  119. struct spi_bitbang bitbang;
  120. struct completion rx_done;
  121. struct completion tx_done;
  122. void __iomem *base;
  123. u32 ctrl_freq; /* SPI controller clock speed */
  124. struct clk *clk;
  125. /* rx & tx bufs from the spi_transfer */
  126. const void *tx;
  127. void *rx;
  128. /* place received word into rx buffer */
  129. void (*rx_word) (struct sirfsoc_spi *);
  130. /* get word from tx buffer for sending */
  131. void (*tx_word) (struct sirfsoc_spi *);
  132. /* number of words left to be tranmitted/received */
  133. unsigned int left_tx_word;
  134. unsigned int left_rx_word;
  135. /* rx & tx DMA channels */
  136. struct dma_chan *rx_chan;
  137. struct dma_chan *tx_chan;
  138. dma_addr_t src_start;
  139. dma_addr_t dst_start;
  140. void *dummypage;
  141. int word_width; /* in bytes */
  142. int chipselect[0];
  143. };
  144. static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
  145. {
  146. u32 data;
  147. u8 *rx = sspi->rx;
  148. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  149. if (rx) {
  150. *rx++ = (u8) data;
  151. sspi->rx = rx;
  152. }
  153. sspi->left_rx_word--;
  154. }
  155. static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
  156. {
  157. u32 data = 0;
  158. const u8 *tx = sspi->tx;
  159. if (tx) {
  160. data = *tx++;
  161. sspi->tx = tx;
  162. }
  163. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  164. sspi->left_tx_word--;
  165. }
  166. static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
  167. {
  168. u32 data;
  169. u16 *rx = sspi->rx;
  170. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  171. if (rx) {
  172. *rx++ = (u16) data;
  173. sspi->rx = rx;
  174. }
  175. sspi->left_rx_word--;
  176. }
  177. static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
  178. {
  179. u32 data = 0;
  180. const u16 *tx = sspi->tx;
  181. if (tx) {
  182. data = *tx++;
  183. sspi->tx = tx;
  184. }
  185. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  186. sspi->left_tx_word--;
  187. }
  188. static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
  189. {
  190. u32 data;
  191. u32 *rx = sspi->rx;
  192. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  193. if (rx) {
  194. *rx++ = (u32) data;
  195. sspi->rx = rx;
  196. }
  197. sspi->left_rx_word--;
  198. }
  199. static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
  200. {
  201. u32 data = 0;
  202. const u32 *tx = sspi->tx;
  203. if (tx) {
  204. data = *tx++;
  205. sspi->tx = tx;
  206. }
  207. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  208. sspi->left_tx_word--;
  209. }
  210. static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
  211. {
  212. struct sirfsoc_spi *sspi = dev_id;
  213. u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
  214. writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
  215. /* Error Conditions */
  216. if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
  217. spi_stat & SIRFSOC_SPI_TX_UFLOW) {
  218. complete(&sspi->rx_done);
  219. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  220. }
  221. if (spi_stat & (SIRFSOC_SPI_FRM_END
  222. | SIRFSOC_SPI_RXFIFO_THD_REACH))
  223. while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
  224. & SIRFSOC_SPI_FIFO_EMPTY)) &&
  225. sspi->left_rx_word)
  226. sspi->rx_word(sspi);
  227. if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
  228. | SIRFSOC_SPI_TXFIFO_THD_REACH))
  229. while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
  230. & SIRFSOC_SPI_FIFO_FULL)) &&
  231. sspi->left_tx_word)
  232. sspi->tx_word(sspi);
  233. /* Received all words */
  234. if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) {
  235. complete(&sspi->rx_done);
  236. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  237. }
  238. return IRQ_HANDLED;
  239. }
  240. static void spi_sirfsoc_dma_fini_callback(void *data)
  241. {
  242. struct completion *dma_complete = data;
  243. complete(dma_complete);
  244. }
  245. static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
  246. {
  247. struct sirfsoc_spi *sspi;
  248. int timeout = t->len * 10;
  249. sspi = spi_master_get_devdata(spi->master);
  250. sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
  251. sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
  252. sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
  253. INIT_COMPLETION(sspi->rx_done);
  254. INIT_COMPLETION(sspi->tx_done);
  255. writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
  256. if (sspi->left_tx_word == 1) {
  257. writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
  258. SIRFSOC_SPI_ENA_AUTO_CLR,
  259. sspi->base + SIRFSOC_SPI_CTRL);
  260. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  261. writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  262. } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word <
  263. SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
  264. writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
  265. SIRFSOC_SPI_MUL_DAT_MODE |
  266. SIRFSOC_SPI_ENA_AUTO_CLR,
  267. sspi->base + SIRFSOC_SPI_CTRL);
  268. writel(sspi->left_tx_word - 1,
  269. sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  270. writel(sspi->left_tx_word - 1,
  271. sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  272. } else {
  273. writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
  274. sspi->base + SIRFSOC_SPI_CTRL);
  275. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  276. writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  277. }
  278. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  279. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  280. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  281. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  282. if (IS_DMA_VALID(t)) {
  283. struct dma_async_tx_descriptor *rx_desc, *tx_desc;
  284. sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE);
  285. rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
  286. sspi->dst_start, t->len, DMA_DEV_TO_MEM,
  287. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  288. rx_desc->callback = spi_sirfsoc_dma_fini_callback;
  289. rx_desc->callback_param = &sspi->rx_done;
  290. sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE);
  291. tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
  292. sspi->src_start, t->len, DMA_MEM_TO_DEV,
  293. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  294. tx_desc->callback = spi_sirfsoc_dma_fini_callback;
  295. tx_desc->callback_param = &sspi->tx_done;
  296. dmaengine_submit(tx_desc);
  297. dmaengine_submit(rx_desc);
  298. dma_async_issue_pending(sspi->tx_chan);
  299. dma_async_issue_pending(sspi->rx_chan);
  300. } else {
  301. /* Send the first word to trigger the whole tx/rx process */
  302. sspi->tx_word(sspi);
  303. writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
  304. SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
  305. SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
  306. SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
  307. }
  308. writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
  309. if (!IS_DMA_VALID(t)) { /* for PIO */
  310. if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
  311. dev_err(&spi->dev, "transfer timeout\n");
  312. } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
  313. dev_err(&spi->dev, "transfer timeout\n");
  314. dmaengine_terminate_all(sspi->rx_chan);
  315. } else
  316. sspi->left_rx_word = 0;
  317. /*
  318. * we only wait tx-done event if transferring by DMA. for PIO,
  319. * we get rx data by writing tx data, so if rx is done, tx has
  320. * done earlier
  321. */
  322. if (IS_DMA_VALID(t)) {
  323. if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
  324. dev_err(&spi->dev, "transfer timeout\n");
  325. dmaengine_terminate_all(sspi->tx_chan);
  326. }
  327. }
  328. if (IS_DMA_VALID(t)) {
  329. dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
  330. dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
  331. }
  332. /* TX, RX FIFO stop */
  333. writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  334. writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  335. writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
  336. writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
  337. return t->len - sspi->left_rx_word * sspi->word_width;
  338. }
  339. static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
  340. {
  341. struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
  342. if (sspi->chipselect[spi->chip_select] == 0) {
  343. u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
  344. switch (value) {
  345. case BITBANG_CS_ACTIVE:
  346. if (spi->mode & SPI_CS_HIGH)
  347. regval |= SIRFSOC_SPI_CS_IO_OUT;
  348. else
  349. regval &= ~SIRFSOC_SPI_CS_IO_OUT;
  350. break;
  351. case BITBANG_CS_INACTIVE:
  352. if (spi->mode & SPI_CS_HIGH)
  353. regval &= ~SIRFSOC_SPI_CS_IO_OUT;
  354. else
  355. regval |= SIRFSOC_SPI_CS_IO_OUT;
  356. break;
  357. }
  358. writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
  359. } else {
  360. int gpio = sspi->chipselect[spi->chip_select];
  361. gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
  362. }
  363. }
  364. static int
  365. spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
  366. {
  367. struct sirfsoc_spi *sspi;
  368. u8 bits_per_word = 0;
  369. int hz = 0;
  370. u32 regval;
  371. u32 txfifo_ctrl, rxfifo_ctrl;
  372. u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
  373. sspi = spi_master_get_devdata(spi->master);
  374. bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
  375. hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
  376. regval = (sspi->ctrl_freq / (2 * hz)) - 1;
  377. if (regval > 0xFFFF || regval < 0) {
  378. dev_err(&spi->dev, "Speed %d not supported\n", hz);
  379. return -EINVAL;
  380. }
  381. switch (bits_per_word) {
  382. case 8:
  383. regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
  384. sspi->rx_word = spi_sirfsoc_rx_word_u8;
  385. sspi->tx_word = spi_sirfsoc_tx_word_u8;
  386. txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  387. SIRFSOC_SPI_FIFO_WIDTH_BYTE;
  388. rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  389. SIRFSOC_SPI_FIFO_WIDTH_BYTE;
  390. sspi->word_width = 1;
  391. break;
  392. case 12:
  393. case 16:
  394. regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
  395. SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
  396. sspi->rx_word = spi_sirfsoc_rx_word_u16;
  397. sspi->tx_word = spi_sirfsoc_tx_word_u16;
  398. txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  399. SIRFSOC_SPI_FIFO_WIDTH_WORD;
  400. rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  401. SIRFSOC_SPI_FIFO_WIDTH_WORD;
  402. sspi->word_width = 2;
  403. break;
  404. case 32:
  405. regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
  406. sspi->rx_word = spi_sirfsoc_rx_word_u32;
  407. sspi->tx_word = spi_sirfsoc_tx_word_u32;
  408. txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  409. SIRFSOC_SPI_FIFO_WIDTH_DWORD;
  410. rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  411. SIRFSOC_SPI_FIFO_WIDTH_DWORD;
  412. sspi->word_width = 4;
  413. break;
  414. default:
  415. BUG();
  416. }
  417. if (!(spi->mode & SPI_CS_HIGH))
  418. regval |= SIRFSOC_SPI_CS_IDLE_STAT;
  419. if (!(spi->mode & SPI_LSB_FIRST))
  420. regval |= SIRFSOC_SPI_TRAN_MSB;
  421. if (spi->mode & SPI_CPOL)
  422. regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
  423. /*
  424. * Data should be driven at least 1/2 cycle before the fetch edge to make
  425. * sure that data gets stable at the fetch edge.
  426. */
  427. if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
  428. (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
  429. regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
  430. else
  431. regval |= SIRFSOC_SPI_DRV_POS_EDGE;
  432. writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
  433. SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
  434. SIRFSOC_SPI_FIFO_HC(2),
  435. sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
  436. writel(SIRFSOC_SPI_FIFO_SC(2) |
  437. SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
  438. SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
  439. sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
  440. writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
  441. writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
  442. writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
  443. if (IS_DMA_VALID(t)) {
  444. /* Enable DMA mode for RX, TX */
  445. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
  446. writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
  447. } else {
  448. /* Enable IO mode for RX, TX */
  449. writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
  450. writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
  451. }
  452. return 0;
  453. }
  454. static int spi_sirfsoc_setup(struct spi_device *spi)
  455. {
  456. struct sirfsoc_spi *sspi;
  457. if (!spi->max_speed_hz)
  458. return -EINVAL;
  459. sspi = spi_master_get_devdata(spi->master);
  460. if (!spi->bits_per_word)
  461. spi->bits_per_word = 8;
  462. return spi_sirfsoc_setup_transfer(spi, NULL);
  463. }
  464. static int spi_sirfsoc_probe(struct platform_device *pdev)
  465. {
  466. struct sirfsoc_spi *sspi;
  467. struct spi_master *master;
  468. struct resource *mem_res;
  469. int num_cs, cs_gpio, irq;
  470. u32 rx_dma_ch, tx_dma_ch;
  471. dma_cap_mask_t dma_cap_mask;
  472. int i;
  473. int ret;
  474. ret = of_property_read_u32(pdev->dev.of_node,
  475. "sirf,spi-num-chipselects", &num_cs);
  476. if (ret < 0) {
  477. dev_err(&pdev->dev, "Unable to get chip select number\n");
  478. goto err_cs;
  479. }
  480. ret = of_property_read_u32(pdev->dev.of_node,
  481. "sirf,spi-dma-rx-channel", &rx_dma_ch);
  482. if (ret < 0) {
  483. dev_err(&pdev->dev, "Unable to get rx dma channel\n");
  484. goto err_cs;
  485. }
  486. ret = of_property_read_u32(pdev->dev.of_node,
  487. "sirf,spi-dma-tx-channel", &tx_dma_ch);
  488. if (ret < 0) {
  489. dev_err(&pdev->dev, "Unable to get tx dma channel\n");
  490. goto err_cs;
  491. }
  492. master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
  493. if (!master) {
  494. dev_err(&pdev->dev, "Unable to allocate SPI master\n");
  495. return -ENOMEM;
  496. }
  497. platform_set_drvdata(pdev, master);
  498. sspi = spi_master_get_devdata(master);
  499. master->num_chipselect = num_cs;
  500. for (i = 0; i < master->num_chipselect; i++) {
  501. cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i);
  502. if (cs_gpio < 0) {
  503. dev_err(&pdev->dev, "can't get cs gpio from DT\n");
  504. ret = -ENODEV;
  505. goto free_master;
  506. }
  507. sspi->chipselect[i] = cs_gpio;
  508. if (cs_gpio == 0)
  509. continue; /* use cs from spi controller */
  510. ret = gpio_request(cs_gpio, DRIVER_NAME);
  511. if (ret) {
  512. while (i > 0) {
  513. i--;
  514. if (sspi->chipselect[i] > 0)
  515. gpio_free(sspi->chipselect[i]);
  516. }
  517. dev_err(&pdev->dev, "fail to request cs gpios\n");
  518. goto free_master;
  519. }
  520. }
  521. mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  522. sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
  523. if (IS_ERR(sspi->base)) {
  524. ret = PTR_ERR(sspi->base);
  525. goto free_master;
  526. }
  527. irq = platform_get_irq(pdev, 0);
  528. if (irq < 0) {
  529. ret = -ENXIO;
  530. goto free_master;
  531. }
  532. ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
  533. DRIVER_NAME, sspi);
  534. if (ret)
  535. goto free_master;
  536. sspi->bitbang.master = spi_master_get(master);
  537. sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
  538. sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
  539. sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
  540. sspi->bitbang.master->setup = spi_sirfsoc_setup;
  541. master->bus_num = pdev->id;
  542. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
  543. master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
  544. SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
  545. sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
  546. /* request DMA channels */
  547. dma_cap_zero(dma_cap_mask);
  548. dma_cap_set(DMA_INTERLEAVE, dma_cap_mask);
  549. sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
  550. (void *)rx_dma_ch);
  551. if (!sspi->rx_chan) {
  552. dev_err(&pdev->dev, "can not allocate rx dma channel\n");
  553. ret = -ENODEV;
  554. goto free_master;
  555. }
  556. sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
  557. (void *)tx_dma_ch);
  558. if (!sspi->tx_chan) {
  559. dev_err(&pdev->dev, "can not allocate tx dma channel\n");
  560. ret = -ENODEV;
  561. goto free_rx_dma;
  562. }
  563. sspi->clk = clk_get(&pdev->dev, NULL);
  564. if (IS_ERR(sspi->clk)) {
  565. ret = PTR_ERR(sspi->clk);
  566. goto free_tx_dma;
  567. }
  568. clk_prepare_enable(sspi->clk);
  569. sspi->ctrl_freq = clk_get_rate(sspi->clk);
  570. init_completion(&sspi->rx_done);
  571. init_completion(&sspi->tx_done);
  572. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  573. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  574. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  575. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  576. /* We are not using dummy delay between command and data */
  577. writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
  578. sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
  579. if (!sspi->dummypage) {
  580. ret = -ENOMEM;
  581. goto free_clk;
  582. }
  583. ret = spi_bitbang_start(&sspi->bitbang);
  584. if (ret)
  585. goto free_dummypage;
  586. dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
  587. return 0;
  588. free_dummypage:
  589. kfree(sspi->dummypage);
  590. free_clk:
  591. clk_disable_unprepare(sspi->clk);
  592. clk_put(sspi->clk);
  593. free_tx_dma:
  594. dma_release_channel(sspi->tx_chan);
  595. free_rx_dma:
  596. dma_release_channel(sspi->rx_chan);
  597. free_master:
  598. spi_master_put(master);
  599. err_cs:
  600. return ret;
  601. }
  602. static int spi_sirfsoc_remove(struct platform_device *pdev)
  603. {
  604. struct spi_master *master;
  605. struct sirfsoc_spi *sspi;
  606. int i;
  607. master = platform_get_drvdata(pdev);
  608. sspi = spi_master_get_devdata(master);
  609. spi_bitbang_stop(&sspi->bitbang);
  610. for (i = 0; i < master->num_chipselect; i++) {
  611. if (sspi->chipselect[i] > 0)
  612. gpio_free(sspi->chipselect[i]);
  613. }
  614. kfree(sspi->dummypage);
  615. clk_disable_unprepare(sspi->clk);
  616. clk_put(sspi->clk);
  617. dma_release_channel(sspi->rx_chan);
  618. dma_release_channel(sspi->tx_chan);
  619. spi_master_put(master);
  620. return 0;
  621. }
  622. #ifdef CONFIG_PM
  623. static int spi_sirfsoc_suspend(struct device *dev)
  624. {
  625. struct spi_master *master = dev_get_drvdata(dev);
  626. struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
  627. clk_disable(sspi->clk);
  628. return 0;
  629. }
  630. static int spi_sirfsoc_resume(struct device *dev)
  631. {
  632. struct spi_master *master = dev_get_drvdata(dev);
  633. struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
  634. clk_enable(sspi->clk);
  635. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  636. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  637. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  638. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  639. return 0;
  640. }
  641. static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
  642. .suspend = spi_sirfsoc_suspend,
  643. .resume = spi_sirfsoc_resume,
  644. };
  645. #endif
  646. static const struct of_device_id spi_sirfsoc_of_match[] = {
  647. { .compatible = "sirf,prima2-spi", },
  648. { .compatible = "sirf,marco-spi", },
  649. {}
  650. };
  651. MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
  652. static struct platform_driver spi_sirfsoc_driver = {
  653. .driver = {
  654. .name = DRIVER_NAME,
  655. .owner = THIS_MODULE,
  656. #ifdef CONFIG_PM
  657. .pm = &spi_sirfsoc_pm_ops,
  658. #endif
  659. .of_match_table = spi_sirfsoc_of_match,
  660. },
  661. .probe = spi_sirfsoc_probe,
  662. .remove = spi_sirfsoc_remove,
  663. };
  664. module_platform_driver(spi_sirfsoc_driver);
  665. MODULE_DESCRIPTION("SiRF SoC SPI master driver");
  666. MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, "
  667. "Barry Song <Baohua.Song@csr.com>");
  668. MODULE_LICENSE("GPL v2");