spi-omap2-mcspi.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. /*
  2. * OMAP2 McSPI controller driver
  3. *
  4. * Copyright (C) 2005, 2006 Nokia Corporation
  5. * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
  6. * Juha Yrj�l� <juha.yrjola@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/device.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/dmaengine.h>
  31. #include <linux/omap-dma.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/err.h>
  34. #include <linux/clk.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/pm_runtime.h>
  38. #include <linux/of.h>
  39. #include <linux/of_device.h>
  40. #include <linux/pinctrl/consumer.h>
  41. #include <linux/err.h>
  42. #include <linux/spi/spi.h>
  43. #include <linux/platform_data/spi-omap2-mcspi.h>
  44. #define OMAP2_MCSPI_MAX_FREQ 48000000
  45. #define SPI_AUTOSUSPEND_TIMEOUT 2000
  46. #define OMAP2_MCSPI_REVISION 0x00
  47. #define OMAP2_MCSPI_SYSSTATUS 0x14
  48. #define OMAP2_MCSPI_IRQSTATUS 0x18
  49. #define OMAP2_MCSPI_IRQENABLE 0x1c
  50. #define OMAP2_MCSPI_WAKEUPENABLE 0x20
  51. #define OMAP2_MCSPI_SYST 0x24
  52. #define OMAP2_MCSPI_MODULCTRL 0x28
  53. /* per-channel banks, 0x14 bytes each, first is: */
  54. #define OMAP2_MCSPI_CHCONF0 0x2c
  55. #define OMAP2_MCSPI_CHSTAT0 0x30
  56. #define OMAP2_MCSPI_CHCTRL0 0x34
  57. #define OMAP2_MCSPI_TX0 0x38
  58. #define OMAP2_MCSPI_RX0 0x3c
  59. /* per-register bitmasks: */
  60. #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
  61. #define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
  62. #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
  63. #define OMAP2_MCSPI_CHCONF_PHA BIT(0)
  64. #define OMAP2_MCSPI_CHCONF_POL BIT(1)
  65. #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
  66. #define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
  67. #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
  68. #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
  69. #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
  70. #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
  71. #define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
  72. #define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
  73. #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
  74. #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
  75. #define OMAP2_MCSPI_CHCONF_IS BIT(18)
  76. #define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
  77. #define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
  78. #define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
  79. #define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
  80. #define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
  81. #define OMAP2_MCSPI_CHCTRL_EN BIT(0)
  82. #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
  83. /* We have 2 DMA channels per CS, one for RX and one for TX */
  84. struct omap2_mcspi_dma {
  85. struct dma_chan *dma_tx;
  86. struct dma_chan *dma_rx;
  87. int dma_tx_sync_dev;
  88. int dma_rx_sync_dev;
  89. struct completion dma_tx_completion;
  90. struct completion dma_rx_completion;
  91. };
  92. /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
  93. * cache operations; better heuristics consider wordsize and bitrate.
  94. */
  95. #define DMA_MIN_BYTES 160
  96. /*
  97. * Used for context save and restore, structure members to be updated whenever
  98. * corresponding registers are modified.
  99. */
  100. struct omap2_mcspi_regs {
  101. u32 modulctrl;
  102. u32 wakeupenable;
  103. struct list_head cs;
  104. };
  105. struct omap2_mcspi {
  106. struct spi_master *master;
  107. /* Virtual base address of the controller */
  108. void __iomem *base;
  109. unsigned long phys;
  110. /* SPI1 has 4 channels, while SPI2 has 2 */
  111. struct omap2_mcspi_dma *dma_channels;
  112. struct device *dev;
  113. struct omap2_mcspi_regs ctx;
  114. unsigned int pin_dir:1;
  115. };
  116. struct omap2_mcspi_cs {
  117. void __iomem *base;
  118. unsigned long phys;
  119. int word_len;
  120. struct list_head node;
  121. /* Context save and restore shadow register */
  122. u32 chconf0;
  123. };
  124. static inline void mcspi_write_reg(struct spi_master *master,
  125. int idx, u32 val)
  126. {
  127. struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
  128. __raw_writel(val, mcspi->base + idx);
  129. }
  130. static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
  131. {
  132. struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
  133. return __raw_readl(mcspi->base + idx);
  134. }
  135. static inline void mcspi_write_cs_reg(const struct spi_device *spi,
  136. int idx, u32 val)
  137. {
  138. struct omap2_mcspi_cs *cs = spi->controller_state;
  139. __raw_writel(val, cs->base + idx);
  140. }
  141. static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
  142. {
  143. struct omap2_mcspi_cs *cs = spi->controller_state;
  144. return __raw_readl(cs->base + idx);
  145. }
  146. static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
  147. {
  148. struct omap2_mcspi_cs *cs = spi->controller_state;
  149. return cs->chconf0;
  150. }
  151. static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
  152. {
  153. struct omap2_mcspi_cs *cs = spi->controller_state;
  154. cs->chconf0 = val;
  155. mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
  156. mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
  157. }
  158. static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
  159. int is_read, int enable)
  160. {
  161. u32 l, rw;
  162. l = mcspi_cached_chconf0(spi);
  163. if (is_read) /* 1 is read, 0 write */
  164. rw = OMAP2_MCSPI_CHCONF_DMAR;
  165. else
  166. rw = OMAP2_MCSPI_CHCONF_DMAW;
  167. if (enable)
  168. l |= rw;
  169. else
  170. l &= ~rw;
  171. mcspi_write_chconf0(spi, l);
  172. }
  173. static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
  174. {
  175. u32 l;
  176. l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
  177. mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
  178. /* Flash post-writes */
  179. mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
  180. }
  181. static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
  182. {
  183. u32 l;
  184. l = mcspi_cached_chconf0(spi);
  185. if (cs_active)
  186. l |= OMAP2_MCSPI_CHCONF_FORCE;
  187. else
  188. l &= ~OMAP2_MCSPI_CHCONF_FORCE;
  189. mcspi_write_chconf0(spi, l);
  190. }
  191. static void omap2_mcspi_set_master_mode(struct spi_master *master)
  192. {
  193. struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
  194. struct omap2_mcspi_regs *ctx = &mcspi->ctx;
  195. u32 l;
  196. /*
  197. * Setup when switching from (reset default) slave mode
  198. * to single-channel master mode
  199. */
  200. l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
  201. l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
  202. l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
  203. mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
  204. ctx->modulctrl = l;
  205. }
  206. static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
  207. {
  208. struct spi_master *spi_cntrl = mcspi->master;
  209. struct omap2_mcspi_regs *ctx = &mcspi->ctx;
  210. struct omap2_mcspi_cs *cs;
  211. /* McSPI: context restore */
  212. mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
  213. mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
  214. list_for_each_entry(cs, &ctx->cs, node)
  215. __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
  216. }
  217. static int omap2_prepare_transfer(struct spi_master *master)
  218. {
  219. struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
  220. pm_runtime_get_sync(mcspi->dev);
  221. return 0;
  222. }
  223. static int omap2_unprepare_transfer(struct spi_master *master)
  224. {
  225. struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
  226. pm_runtime_mark_last_busy(mcspi->dev);
  227. pm_runtime_put_autosuspend(mcspi->dev);
  228. return 0;
  229. }
  230. static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
  231. {
  232. unsigned long timeout;
  233. timeout = jiffies + msecs_to_jiffies(1000);
  234. while (!(__raw_readl(reg) & bit)) {
  235. if (time_after(jiffies, timeout))
  236. return -1;
  237. cpu_relax();
  238. }
  239. return 0;
  240. }
  241. static void omap2_mcspi_rx_callback(void *data)
  242. {
  243. struct spi_device *spi = data;
  244. struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
  245. struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  246. complete(&mcspi_dma->dma_rx_completion);
  247. /* We must disable the DMA RX request */
  248. omap2_mcspi_set_dma_req(spi, 1, 0);
  249. }
  250. static void omap2_mcspi_tx_callback(void *data)
  251. {
  252. struct spi_device *spi = data;
  253. struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
  254. struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  255. complete(&mcspi_dma->dma_tx_completion);
  256. /* We must disable the DMA TX request */
  257. omap2_mcspi_set_dma_req(spi, 0, 0);
  258. }
  259. static void omap2_mcspi_tx_dma(struct spi_device *spi,
  260. struct spi_transfer *xfer,
  261. struct dma_slave_config cfg)
  262. {
  263. struct omap2_mcspi *mcspi;
  264. struct omap2_mcspi_dma *mcspi_dma;
  265. unsigned int count;
  266. u8 * rx;
  267. const u8 * tx;
  268. void __iomem *chstat_reg;
  269. struct omap2_mcspi_cs *cs = spi->controller_state;
  270. mcspi = spi_master_get_devdata(spi->master);
  271. mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  272. count = xfer->len;
  273. rx = xfer->rx_buf;
  274. tx = xfer->tx_buf;
  275. chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
  276. if (mcspi_dma->dma_tx) {
  277. struct dma_async_tx_descriptor *tx;
  278. struct scatterlist sg;
  279. dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
  280. sg_init_table(&sg, 1);
  281. sg_dma_address(&sg) = xfer->tx_dma;
  282. sg_dma_len(&sg) = xfer->len;
  283. tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
  284. DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  285. if (tx) {
  286. tx->callback = omap2_mcspi_tx_callback;
  287. tx->callback_param = spi;
  288. dmaengine_submit(tx);
  289. } else {
  290. /* FIXME: fall back to PIO? */
  291. }
  292. }
  293. dma_async_issue_pending(mcspi_dma->dma_tx);
  294. omap2_mcspi_set_dma_req(spi, 0, 1);
  295. wait_for_completion(&mcspi_dma->dma_tx_completion);
  296. dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
  297. DMA_TO_DEVICE);
  298. /* for TX_ONLY mode, be sure all words have shifted out */
  299. if (rx == NULL) {
  300. if (mcspi_wait_for_reg_bit(chstat_reg,
  301. OMAP2_MCSPI_CHSTAT_TXS) < 0)
  302. dev_err(&spi->dev, "TXS timed out\n");
  303. else if (mcspi_wait_for_reg_bit(chstat_reg,
  304. OMAP2_MCSPI_CHSTAT_EOT) < 0)
  305. dev_err(&spi->dev, "EOT timed out\n");
  306. }
  307. }
  308. static unsigned
  309. omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
  310. struct dma_slave_config cfg,
  311. unsigned es)
  312. {
  313. struct omap2_mcspi *mcspi;
  314. struct omap2_mcspi_dma *mcspi_dma;
  315. unsigned int count;
  316. u32 l;
  317. int elements = 0;
  318. int word_len, element_count;
  319. struct omap2_mcspi_cs *cs = spi->controller_state;
  320. mcspi = spi_master_get_devdata(spi->master);
  321. mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  322. count = xfer->len;
  323. word_len = cs->word_len;
  324. l = mcspi_cached_chconf0(spi);
  325. if (word_len <= 8)
  326. element_count = count;
  327. else if (word_len <= 16)
  328. element_count = count >> 1;
  329. else /* word_len <= 32 */
  330. element_count = count >> 2;
  331. if (mcspi_dma->dma_rx) {
  332. struct dma_async_tx_descriptor *tx;
  333. struct scatterlist sg;
  334. size_t len = xfer->len - es;
  335. dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
  336. if (l & OMAP2_MCSPI_CHCONF_TURBO)
  337. len -= es;
  338. sg_init_table(&sg, 1);
  339. sg_dma_address(&sg) = xfer->rx_dma;
  340. sg_dma_len(&sg) = len;
  341. tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
  342. DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
  343. DMA_CTRL_ACK);
  344. if (tx) {
  345. tx->callback = omap2_mcspi_rx_callback;
  346. tx->callback_param = spi;
  347. dmaengine_submit(tx);
  348. } else {
  349. /* FIXME: fall back to PIO? */
  350. }
  351. }
  352. dma_async_issue_pending(mcspi_dma->dma_rx);
  353. omap2_mcspi_set_dma_req(spi, 1, 1);
  354. wait_for_completion(&mcspi_dma->dma_rx_completion);
  355. dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
  356. DMA_FROM_DEVICE);
  357. omap2_mcspi_set_enable(spi, 0);
  358. elements = element_count - 1;
  359. if (l & OMAP2_MCSPI_CHCONF_TURBO) {
  360. elements--;
  361. if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
  362. & OMAP2_MCSPI_CHSTAT_RXS)) {
  363. u32 w;
  364. w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
  365. if (word_len <= 8)
  366. ((u8 *)xfer->rx_buf)[elements++] = w;
  367. else if (word_len <= 16)
  368. ((u16 *)xfer->rx_buf)[elements++] = w;
  369. else /* word_len <= 32 */
  370. ((u32 *)xfer->rx_buf)[elements++] = w;
  371. } else {
  372. dev_err(&spi->dev, "DMA RX penultimate word empty");
  373. count -= (word_len <= 8) ? 2 :
  374. (word_len <= 16) ? 4 :
  375. /* word_len <= 32 */ 8;
  376. omap2_mcspi_set_enable(spi, 1);
  377. return count;
  378. }
  379. }
  380. if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
  381. & OMAP2_MCSPI_CHSTAT_RXS)) {
  382. u32 w;
  383. w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
  384. if (word_len <= 8)
  385. ((u8 *)xfer->rx_buf)[elements] = w;
  386. else if (word_len <= 16)
  387. ((u16 *)xfer->rx_buf)[elements] = w;
  388. else /* word_len <= 32 */
  389. ((u32 *)xfer->rx_buf)[elements] = w;
  390. } else {
  391. dev_err(&spi->dev, "DMA RX last word empty");
  392. count -= (word_len <= 8) ? 1 :
  393. (word_len <= 16) ? 2 :
  394. /* word_len <= 32 */ 4;
  395. }
  396. omap2_mcspi_set_enable(spi, 1);
  397. return count;
  398. }
  399. static unsigned
  400. omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
  401. {
  402. struct omap2_mcspi *mcspi;
  403. struct omap2_mcspi_cs *cs = spi->controller_state;
  404. struct omap2_mcspi_dma *mcspi_dma;
  405. unsigned int count;
  406. u32 l;
  407. u8 *rx;
  408. const u8 *tx;
  409. struct dma_slave_config cfg;
  410. enum dma_slave_buswidth width;
  411. unsigned es;
  412. mcspi = spi_master_get_devdata(spi->master);
  413. mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  414. l = mcspi_cached_chconf0(spi);
  415. if (cs->word_len <= 8) {
  416. width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  417. es = 1;
  418. } else if (cs->word_len <= 16) {
  419. width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  420. es = 2;
  421. } else {
  422. width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  423. es = 4;
  424. }
  425. memset(&cfg, 0, sizeof(cfg));
  426. cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
  427. cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
  428. cfg.src_addr_width = width;
  429. cfg.dst_addr_width = width;
  430. cfg.src_maxburst = 1;
  431. cfg.dst_maxburst = 1;
  432. rx = xfer->rx_buf;
  433. tx = xfer->tx_buf;
  434. count = xfer->len;
  435. if (tx != NULL)
  436. omap2_mcspi_tx_dma(spi, xfer, cfg);
  437. if (rx != NULL)
  438. return omap2_mcspi_rx_dma(spi, xfer, cfg, es);
  439. return count;
  440. }
  441. static unsigned
  442. omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
  443. {
  444. struct omap2_mcspi *mcspi;
  445. struct omap2_mcspi_cs *cs = spi->controller_state;
  446. unsigned int count, c;
  447. u32 l;
  448. void __iomem *base = cs->base;
  449. void __iomem *tx_reg;
  450. void __iomem *rx_reg;
  451. void __iomem *chstat_reg;
  452. int word_len;
  453. mcspi = spi_master_get_devdata(spi->master);
  454. count = xfer->len;
  455. c = count;
  456. word_len = cs->word_len;
  457. l = mcspi_cached_chconf0(spi);
  458. /* We store the pre-calculated register addresses on stack to speed
  459. * up the transfer loop. */
  460. tx_reg = base + OMAP2_MCSPI_TX0;
  461. rx_reg = base + OMAP2_MCSPI_RX0;
  462. chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
  463. if (c < (word_len>>3))
  464. return 0;
  465. if (word_len <= 8) {
  466. u8 *rx;
  467. const u8 *tx;
  468. rx = xfer->rx_buf;
  469. tx = xfer->tx_buf;
  470. do {
  471. c -= 1;
  472. if (tx != NULL) {
  473. if (mcspi_wait_for_reg_bit(chstat_reg,
  474. OMAP2_MCSPI_CHSTAT_TXS) < 0) {
  475. dev_err(&spi->dev, "TXS timed out\n");
  476. goto out;
  477. }
  478. dev_vdbg(&spi->dev, "write-%d %02x\n",
  479. word_len, *tx);
  480. __raw_writel(*tx++, tx_reg);
  481. }
  482. if (rx != NULL) {
  483. if (mcspi_wait_for_reg_bit(chstat_reg,
  484. OMAP2_MCSPI_CHSTAT_RXS) < 0) {
  485. dev_err(&spi->dev, "RXS timed out\n");
  486. goto out;
  487. }
  488. if (c == 1 && tx == NULL &&
  489. (l & OMAP2_MCSPI_CHCONF_TURBO)) {
  490. omap2_mcspi_set_enable(spi, 0);
  491. *rx++ = __raw_readl(rx_reg);
  492. dev_vdbg(&spi->dev, "read-%d %02x\n",
  493. word_len, *(rx - 1));
  494. if (mcspi_wait_for_reg_bit(chstat_reg,
  495. OMAP2_MCSPI_CHSTAT_RXS) < 0) {
  496. dev_err(&spi->dev,
  497. "RXS timed out\n");
  498. goto out;
  499. }
  500. c = 0;
  501. } else if (c == 0 && tx == NULL) {
  502. omap2_mcspi_set_enable(spi, 0);
  503. }
  504. *rx++ = __raw_readl(rx_reg);
  505. dev_vdbg(&spi->dev, "read-%d %02x\n",
  506. word_len, *(rx - 1));
  507. }
  508. } while (c);
  509. } else if (word_len <= 16) {
  510. u16 *rx;
  511. const u16 *tx;
  512. rx = xfer->rx_buf;
  513. tx = xfer->tx_buf;
  514. do {
  515. c -= 2;
  516. if (tx != NULL) {
  517. if (mcspi_wait_for_reg_bit(chstat_reg,
  518. OMAP2_MCSPI_CHSTAT_TXS) < 0) {
  519. dev_err(&spi->dev, "TXS timed out\n");
  520. goto out;
  521. }
  522. dev_vdbg(&spi->dev, "write-%d %04x\n",
  523. word_len, *tx);
  524. __raw_writel(*tx++, tx_reg);
  525. }
  526. if (rx != NULL) {
  527. if (mcspi_wait_for_reg_bit(chstat_reg,
  528. OMAP2_MCSPI_CHSTAT_RXS) < 0) {
  529. dev_err(&spi->dev, "RXS timed out\n");
  530. goto out;
  531. }
  532. if (c == 2 && tx == NULL &&
  533. (l & OMAP2_MCSPI_CHCONF_TURBO)) {
  534. omap2_mcspi_set_enable(spi, 0);
  535. *rx++ = __raw_readl(rx_reg);
  536. dev_vdbg(&spi->dev, "read-%d %04x\n",
  537. word_len, *(rx - 1));
  538. if (mcspi_wait_for_reg_bit(chstat_reg,
  539. OMAP2_MCSPI_CHSTAT_RXS) < 0) {
  540. dev_err(&spi->dev,
  541. "RXS timed out\n");
  542. goto out;
  543. }
  544. c = 0;
  545. } else if (c == 0 && tx == NULL) {
  546. omap2_mcspi_set_enable(spi, 0);
  547. }
  548. *rx++ = __raw_readl(rx_reg);
  549. dev_vdbg(&spi->dev, "read-%d %04x\n",
  550. word_len, *(rx - 1));
  551. }
  552. } while (c >= 2);
  553. } else if (word_len <= 32) {
  554. u32 *rx;
  555. const u32 *tx;
  556. rx = xfer->rx_buf;
  557. tx = xfer->tx_buf;
  558. do {
  559. c -= 4;
  560. if (tx != NULL) {
  561. if (mcspi_wait_for_reg_bit(chstat_reg,
  562. OMAP2_MCSPI_CHSTAT_TXS) < 0) {
  563. dev_err(&spi->dev, "TXS timed out\n");
  564. goto out;
  565. }
  566. dev_vdbg(&spi->dev, "write-%d %08x\n",
  567. word_len, *tx);
  568. __raw_writel(*tx++, tx_reg);
  569. }
  570. if (rx != NULL) {
  571. if (mcspi_wait_for_reg_bit(chstat_reg,
  572. OMAP2_MCSPI_CHSTAT_RXS) < 0) {
  573. dev_err(&spi->dev, "RXS timed out\n");
  574. goto out;
  575. }
  576. if (c == 4 && tx == NULL &&
  577. (l & OMAP2_MCSPI_CHCONF_TURBO)) {
  578. omap2_mcspi_set_enable(spi, 0);
  579. *rx++ = __raw_readl(rx_reg);
  580. dev_vdbg(&spi->dev, "read-%d %08x\n",
  581. word_len, *(rx - 1));
  582. if (mcspi_wait_for_reg_bit(chstat_reg,
  583. OMAP2_MCSPI_CHSTAT_RXS) < 0) {
  584. dev_err(&spi->dev,
  585. "RXS timed out\n");
  586. goto out;
  587. }
  588. c = 0;
  589. } else if (c == 0 && tx == NULL) {
  590. omap2_mcspi_set_enable(spi, 0);
  591. }
  592. *rx++ = __raw_readl(rx_reg);
  593. dev_vdbg(&spi->dev, "read-%d %08x\n",
  594. word_len, *(rx - 1));
  595. }
  596. } while (c >= 4);
  597. }
  598. /* for TX_ONLY mode, be sure all words have shifted out */
  599. if (xfer->rx_buf == NULL) {
  600. if (mcspi_wait_for_reg_bit(chstat_reg,
  601. OMAP2_MCSPI_CHSTAT_TXS) < 0) {
  602. dev_err(&spi->dev, "TXS timed out\n");
  603. } else if (mcspi_wait_for_reg_bit(chstat_reg,
  604. OMAP2_MCSPI_CHSTAT_EOT) < 0)
  605. dev_err(&spi->dev, "EOT timed out\n");
  606. /* disable chan to purge rx datas received in TX_ONLY transfer,
  607. * otherwise these rx datas will affect the direct following
  608. * RX_ONLY transfer.
  609. */
  610. omap2_mcspi_set_enable(spi, 0);
  611. }
  612. out:
  613. omap2_mcspi_set_enable(spi, 1);
  614. return count - c;
  615. }
  616. static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
  617. {
  618. u32 div;
  619. for (div = 0; div < 15; div++)
  620. if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
  621. return div;
  622. return 15;
  623. }
  624. /* called only when no transfer is active to this device */
  625. static int omap2_mcspi_setup_transfer(struct spi_device *spi,
  626. struct spi_transfer *t)
  627. {
  628. struct omap2_mcspi_cs *cs = spi->controller_state;
  629. struct omap2_mcspi *mcspi;
  630. struct spi_master *spi_cntrl;
  631. u32 l = 0, div = 0;
  632. u8 word_len = spi->bits_per_word;
  633. u32 speed_hz = spi->max_speed_hz;
  634. mcspi = spi_master_get_devdata(spi->master);
  635. spi_cntrl = mcspi->master;
  636. if (t != NULL && t->bits_per_word)
  637. word_len = t->bits_per_word;
  638. cs->word_len = word_len;
  639. if (t && t->speed_hz)
  640. speed_hz = t->speed_hz;
  641. speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
  642. div = omap2_mcspi_calc_divisor(speed_hz);
  643. l = mcspi_cached_chconf0(spi);
  644. /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
  645. * REVISIT: this controller could support SPI_3WIRE mode.
  646. */
  647. if (mcspi->pin_dir == MCSPI_PINDIR_D0_OUT_D1_IN) {
  648. l &= ~OMAP2_MCSPI_CHCONF_IS;
  649. l &= ~OMAP2_MCSPI_CHCONF_DPE1;
  650. l |= OMAP2_MCSPI_CHCONF_DPE0;
  651. } else {
  652. l |= OMAP2_MCSPI_CHCONF_IS;
  653. l |= OMAP2_MCSPI_CHCONF_DPE1;
  654. l &= ~OMAP2_MCSPI_CHCONF_DPE0;
  655. }
  656. /* wordlength */
  657. l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
  658. l |= (word_len - 1) << 7;
  659. /* set chipselect polarity; manage with FORCE */
  660. if (!(spi->mode & SPI_CS_HIGH))
  661. l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
  662. else
  663. l &= ~OMAP2_MCSPI_CHCONF_EPOL;
  664. /* set clock divisor */
  665. l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
  666. l |= div << 2;
  667. /* set SPI mode 0..3 */
  668. if (spi->mode & SPI_CPOL)
  669. l |= OMAP2_MCSPI_CHCONF_POL;
  670. else
  671. l &= ~OMAP2_MCSPI_CHCONF_POL;
  672. if (spi->mode & SPI_CPHA)
  673. l |= OMAP2_MCSPI_CHCONF_PHA;
  674. else
  675. l &= ~OMAP2_MCSPI_CHCONF_PHA;
  676. mcspi_write_chconf0(spi, l);
  677. dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
  678. OMAP2_MCSPI_MAX_FREQ >> div,
  679. (spi->mode & SPI_CPHA) ? "trailing" : "leading",
  680. (spi->mode & SPI_CPOL) ? "inverted" : "normal");
  681. return 0;
  682. }
  683. static int omap2_mcspi_request_dma(struct spi_device *spi)
  684. {
  685. struct spi_master *master = spi->master;
  686. struct omap2_mcspi *mcspi;
  687. struct omap2_mcspi_dma *mcspi_dma;
  688. dma_cap_mask_t mask;
  689. unsigned sig;
  690. mcspi = spi_master_get_devdata(master);
  691. mcspi_dma = mcspi->dma_channels + spi->chip_select;
  692. init_completion(&mcspi_dma->dma_rx_completion);
  693. init_completion(&mcspi_dma->dma_tx_completion);
  694. dma_cap_zero(mask);
  695. dma_cap_set(DMA_SLAVE, mask);
  696. sig = mcspi_dma->dma_rx_sync_dev;
  697. mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
  698. if (!mcspi_dma->dma_rx) {
  699. dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
  700. return -EAGAIN;
  701. }
  702. sig = mcspi_dma->dma_tx_sync_dev;
  703. mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
  704. if (!mcspi_dma->dma_tx) {
  705. dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
  706. dma_release_channel(mcspi_dma->dma_rx);
  707. mcspi_dma->dma_rx = NULL;
  708. return -EAGAIN;
  709. }
  710. return 0;
  711. }
  712. static int omap2_mcspi_setup(struct spi_device *spi)
  713. {
  714. int ret;
  715. struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
  716. struct omap2_mcspi_regs *ctx = &mcspi->ctx;
  717. struct omap2_mcspi_dma *mcspi_dma;
  718. struct omap2_mcspi_cs *cs = spi->controller_state;
  719. if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
  720. dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
  721. spi->bits_per_word);
  722. return -EINVAL;
  723. }
  724. mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  725. if (!cs) {
  726. cs = kzalloc(sizeof *cs, GFP_KERNEL);
  727. if (!cs)
  728. return -ENOMEM;
  729. cs->base = mcspi->base + spi->chip_select * 0x14;
  730. cs->phys = mcspi->phys + spi->chip_select * 0x14;
  731. cs->chconf0 = 0;
  732. spi->controller_state = cs;
  733. /* Link this to context save list */
  734. list_add_tail(&cs->node, &ctx->cs);
  735. }
  736. if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
  737. ret = omap2_mcspi_request_dma(spi);
  738. if (ret < 0)
  739. return ret;
  740. }
  741. ret = pm_runtime_get_sync(mcspi->dev);
  742. if (ret < 0)
  743. return ret;
  744. ret = omap2_mcspi_setup_transfer(spi, NULL);
  745. pm_runtime_mark_last_busy(mcspi->dev);
  746. pm_runtime_put_autosuspend(mcspi->dev);
  747. return ret;
  748. }
  749. static void omap2_mcspi_cleanup(struct spi_device *spi)
  750. {
  751. struct omap2_mcspi *mcspi;
  752. struct omap2_mcspi_dma *mcspi_dma;
  753. struct omap2_mcspi_cs *cs;
  754. mcspi = spi_master_get_devdata(spi->master);
  755. if (spi->controller_state) {
  756. /* Unlink controller state from context save list */
  757. cs = spi->controller_state;
  758. list_del(&cs->node);
  759. kfree(cs);
  760. }
  761. if (spi->chip_select < spi->master->num_chipselect) {
  762. mcspi_dma = &mcspi->dma_channels[spi->chip_select];
  763. if (mcspi_dma->dma_rx) {
  764. dma_release_channel(mcspi_dma->dma_rx);
  765. mcspi_dma->dma_rx = NULL;
  766. }
  767. if (mcspi_dma->dma_tx) {
  768. dma_release_channel(mcspi_dma->dma_tx);
  769. mcspi_dma->dma_tx = NULL;
  770. }
  771. }
  772. }
  773. static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
  774. {
  775. /* We only enable one channel at a time -- the one whose message is
  776. * -- although this controller would gladly
  777. * arbitrate among multiple channels. This corresponds to "single
  778. * channel" master mode. As a side effect, we need to manage the
  779. * chipselect with the FORCE bit ... CS != channel enable.
  780. */
  781. struct spi_device *spi;
  782. struct spi_transfer *t = NULL;
  783. int cs_active = 0;
  784. struct omap2_mcspi_cs *cs;
  785. struct omap2_mcspi_device_config *cd;
  786. int par_override = 0;
  787. int status = 0;
  788. u32 chconf;
  789. spi = m->spi;
  790. cs = spi->controller_state;
  791. cd = spi->controller_data;
  792. omap2_mcspi_set_enable(spi, 1);
  793. list_for_each_entry(t, &m->transfers, transfer_list) {
  794. if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
  795. status = -EINVAL;
  796. break;
  797. }
  798. if (par_override || t->speed_hz || t->bits_per_word) {
  799. par_override = 1;
  800. status = omap2_mcspi_setup_transfer(spi, t);
  801. if (status < 0)
  802. break;
  803. if (!t->speed_hz && !t->bits_per_word)
  804. par_override = 0;
  805. }
  806. if (!cs_active) {
  807. omap2_mcspi_force_cs(spi, 1);
  808. cs_active = 1;
  809. }
  810. chconf = mcspi_cached_chconf0(spi);
  811. chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
  812. chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
  813. if (t->tx_buf == NULL)
  814. chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
  815. else if (t->rx_buf == NULL)
  816. chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
  817. if (cd && cd->turbo_mode && t->tx_buf == NULL) {
  818. /* Turbo mode is for more than one word */
  819. if (t->len > ((cs->word_len + 7) >> 3))
  820. chconf |= OMAP2_MCSPI_CHCONF_TURBO;
  821. }
  822. mcspi_write_chconf0(spi, chconf);
  823. if (t->len) {
  824. unsigned count;
  825. /* RX_ONLY mode needs dummy data in TX reg */
  826. if (t->tx_buf == NULL)
  827. __raw_writel(0, cs->base
  828. + OMAP2_MCSPI_TX0);
  829. if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
  830. count = omap2_mcspi_txrx_dma(spi, t);
  831. else
  832. count = omap2_mcspi_txrx_pio(spi, t);
  833. m->actual_length += count;
  834. if (count != t->len) {
  835. status = -EIO;
  836. break;
  837. }
  838. }
  839. if (t->delay_usecs)
  840. udelay(t->delay_usecs);
  841. /* ignore the "leave it on after last xfer" hint */
  842. if (t->cs_change) {
  843. omap2_mcspi_force_cs(spi, 0);
  844. cs_active = 0;
  845. }
  846. }
  847. /* Restore defaults if they were overriden */
  848. if (par_override) {
  849. par_override = 0;
  850. status = omap2_mcspi_setup_transfer(spi, NULL);
  851. }
  852. if (cs_active)
  853. omap2_mcspi_force_cs(spi, 0);
  854. omap2_mcspi_set_enable(spi, 0);
  855. m->status = status;
  856. }
  857. static int omap2_mcspi_transfer_one_message(struct spi_master *master,
  858. struct spi_message *m)
  859. {
  860. struct omap2_mcspi *mcspi;
  861. struct spi_transfer *t;
  862. mcspi = spi_master_get_devdata(master);
  863. m->actual_length = 0;
  864. m->status = 0;
  865. /* reject invalid messages and transfers */
  866. if (list_empty(&m->transfers))
  867. return -EINVAL;
  868. list_for_each_entry(t, &m->transfers, transfer_list) {
  869. const void *tx_buf = t->tx_buf;
  870. void *rx_buf = t->rx_buf;
  871. unsigned len = t->len;
  872. if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
  873. || (len && !(rx_buf || tx_buf))
  874. || (t->bits_per_word &&
  875. ( t->bits_per_word < 4
  876. || t->bits_per_word > 32))) {
  877. dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
  878. t->speed_hz,
  879. len,
  880. tx_buf ? "tx" : "",
  881. rx_buf ? "rx" : "",
  882. t->bits_per_word);
  883. return -EINVAL;
  884. }
  885. if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
  886. dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
  887. t->speed_hz,
  888. OMAP2_MCSPI_MAX_FREQ >> 15);
  889. return -EINVAL;
  890. }
  891. if (m->is_dma_mapped || len < DMA_MIN_BYTES)
  892. continue;
  893. if (tx_buf != NULL) {
  894. t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
  895. len, DMA_TO_DEVICE);
  896. if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
  897. dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
  898. 'T', len);
  899. return -EINVAL;
  900. }
  901. }
  902. if (rx_buf != NULL) {
  903. t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
  904. DMA_FROM_DEVICE);
  905. if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
  906. dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
  907. 'R', len);
  908. if (tx_buf != NULL)
  909. dma_unmap_single(mcspi->dev, t->tx_dma,
  910. len, DMA_TO_DEVICE);
  911. return -EINVAL;
  912. }
  913. }
  914. }
  915. omap2_mcspi_work(mcspi, m);
  916. spi_finalize_current_message(master);
  917. return 0;
  918. }
  919. static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
  920. {
  921. struct spi_master *master = mcspi->master;
  922. struct omap2_mcspi_regs *ctx = &mcspi->ctx;
  923. int ret = 0;
  924. ret = pm_runtime_get_sync(mcspi->dev);
  925. if (ret < 0)
  926. return ret;
  927. mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
  928. OMAP2_MCSPI_WAKEUPENABLE_WKEN);
  929. ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
  930. omap2_mcspi_set_master_mode(master);
  931. pm_runtime_mark_last_busy(mcspi->dev);
  932. pm_runtime_put_autosuspend(mcspi->dev);
  933. return 0;
  934. }
  935. static int omap_mcspi_runtime_resume(struct device *dev)
  936. {
  937. struct omap2_mcspi *mcspi;
  938. struct spi_master *master;
  939. master = dev_get_drvdata(dev);
  940. mcspi = spi_master_get_devdata(master);
  941. omap2_mcspi_restore_ctx(mcspi);
  942. return 0;
  943. }
  944. static struct omap2_mcspi_platform_config omap2_pdata = {
  945. .regs_offset = 0,
  946. };
  947. static struct omap2_mcspi_platform_config omap4_pdata = {
  948. .regs_offset = OMAP4_MCSPI_REG_OFFSET,
  949. };
  950. static const struct of_device_id omap_mcspi_of_match[] = {
  951. {
  952. .compatible = "ti,omap2-mcspi",
  953. .data = &omap2_pdata,
  954. },
  955. {
  956. .compatible = "ti,omap4-mcspi",
  957. .data = &omap4_pdata,
  958. },
  959. { },
  960. };
  961. MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
  962. static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
  963. {
  964. struct spi_master *master;
  965. const struct omap2_mcspi_platform_config *pdata;
  966. struct omap2_mcspi *mcspi;
  967. struct resource *r;
  968. int status = 0, i;
  969. u32 regs_offset = 0;
  970. static int bus_num = 1;
  971. struct device_node *node = pdev->dev.of_node;
  972. const struct of_device_id *match;
  973. struct pinctrl *pinctrl;
  974. master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
  975. if (master == NULL) {
  976. dev_dbg(&pdev->dev, "master allocation failed\n");
  977. return -ENOMEM;
  978. }
  979. /* the spi->mode bits understood by this driver: */
  980. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  981. master->setup = omap2_mcspi_setup;
  982. master->prepare_transfer_hardware = omap2_prepare_transfer;
  983. master->unprepare_transfer_hardware = omap2_unprepare_transfer;
  984. master->transfer_one_message = omap2_mcspi_transfer_one_message;
  985. master->cleanup = omap2_mcspi_cleanup;
  986. master->dev.of_node = node;
  987. dev_set_drvdata(&pdev->dev, master);
  988. mcspi = spi_master_get_devdata(master);
  989. mcspi->master = master;
  990. match = of_match_device(omap_mcspi_of_match, &pdev->dev);
  991. if (match) {
  992. u32 num_cs = 1; /* default number of chipselect */
  993. pdata = match->data;
  994. of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
  995. master->num_chipselect = num_cs;
  996. master->bus_num = bus_num++;
  997. if (of_get_property(node, "ti,pindir-d0-in-d1-out", NULL))
  998. mcspi->pin_dir = MCSPI_PINDIR_D0_IN_D1_OUT;
  999. } else {
  1000. pdata = pdev->dev.platform_data;
  1001. master->num_chipselect = pdata->num_cs;
  1002. if (pdev->id != -1)
  1003. master->bus_num = pdev->id;
  1004. mcspi->pin_dir = pdata->pin_dir;
  1005. }
  1006. regs_offset = pdata->regs_offset;
  1007. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1008. if (r == NULL) {
  1009. status = -ENODEV;
  1010. goto free_master;
  1011. }
  1012. r->start += regs_offset;
  1013. r->end += regs_offset;
  1014. mcspi->phys = r->start;
  1015. mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
  1016. if (!mcspi->base) {
  1017. dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
  1018. status = -ENOMEM;
  1019. goto free_master;
  1020. }
  1021. mcspi->dev = &pdev->dev;
  1022. INIT_LIST_HEAD(&mcspi->ctx.cs);
  1023. mcspi->dma_channels = kcalloc(master->num_chipselect,
  1024. sizeof(struct omap2_mcspi_dma),
  1025. GFP_KERNEL);
  1026. if (mcspi->dma_channels == NULL)
  1027. goto free_master;
  1028. for (i = 0; i < master->num_chipselect; i++) {
  1029. char dma_ch_name[14];
  1030. struct resource *dma_res;
  1031. sprintf(dma_ch_name, "rx%d", i);
  1032. dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
  1033. dma_ch_name);
  1034. if (!dma_res) {
  1035. dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
  1036. status = -ENODEV;
  1037. break;
  1038. }
  1039. mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
  1040. sprintf(dma_ch_name, "tx%d", i);
  1041. dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
  1042. dma_ch_name);
  1043. if (!dma_res) {
  1044. dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
  1045. status = -ENODEV;
  1046. break;
  1047. }
  1048. mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
  1049. }
  1050. if (status < 0)
  1051. goto dma_chnl_free;
  1052. pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
  1053. if (IS_ERR(pinctrl))
  1054. dev_warn(&pdev->dev,
  1055. "pins are not configured from the driver\n");
  1056. pm_runtime_use_autosuspend(&pdev->dev);
  1057. pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
  1058. pm_runtime_enable(&pdev->dev);
  1059. if (status || omap2_mcspi_master_setup(mcspi) < 0)
  1060. goto disable_pm;
  1061. status = spi_register_master(master);
  1062. if (status < 0)
  1063. goto disable_pm;
  1064. return status;
  1065. disable_pm:
  1066. pm_runtime_disable(&pdev->dev);
  1067. dma_chnl_free:
  1068. kfree(mcspi->dma_channels);
  1069. free_master:
  1070. spi_master_put(master);
  1071. return status;
  1072. }
  1073. static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
  1074. {
  1075. struct spi_master *master;
  1076. struct omap2_mcspi *mcspi;
  1077. struct omap2_mcspi_dma *dma_channels;
  1078. master = dev_get_drvdata(&pdev->dev);
  1079. mcspi = spi_master_get_devdata(master);
  1080. dma_channels = mcspi->dma_channels;
  1081. pm_runtime_put_sync(mcspi->dev);
  1082. pm_runtime_disable(&pdev->dev);
  1083. spi_unregister_master(master);
  1084. kfree(dma_channels);
  1085. return 0;
  1086. }
  1087. /* work with hotplug and coldplug */
  1088. MODULE_ALIAS("platform:omap2_mcspi");
  1089. #ifdef CONFIG_SUSPEND
  1090. /*
  1091. * When SPI wake up from off-mode, CS is in activate state. If it was in
  1092. * unactive state when driver was suspend, then force it to unactive state at
  1093. * wake up.
  1094. */
  1095. static int omap2_mcspi_resume(struct device *dev)
  1096. {
  1097. struct spi_master *master = dev_get_drvdata(dev);
  1098. struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
  1099. struct omap2_mcspi_regs *ctx = &mcspi->ctx;
  1100. struct omap2_mcspi_cs *cs;
  1101. pm_runtime_get_sync(mcspi->dev);
  1102. list_for_each_entry(cs, &ctx->cs, node) {
  1103. if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
  1104. /*
  1105. * We need to toggle CS state for OMAP take this
  1106. * change in account.
  1107. */
  1108. cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
  1109. __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
  1110. cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
  1111. __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
  1112. }
  1113. }
  1114. pm_runtime_mark_last_busy(mcspi->dev);
  1115. pm_runtime_put_autosuspend(mcspi->dev);
  1116. return 0;
  1117. }
  1118. #else
  1119. #define omap2_mcspi_resume NULL
  1120. #endif
  1121. static const struct dev_pm_ops omap2_mcspi_pm_ops = {
  1122. .resume = omap2_mcspi_resume,
  1123. .runtime_resume = omap_mcspi_runtime_resume,
  1124. };
  1125. static struct platform_driver omap2_mcspi_driver = {
  1126. .driver = {
  1127. .name = "omap2_mcspi",
  1128. .owner = THIS_MODULE,
  1129. .pm = &omap2_mcspi_pm_ops,
  1130. .of_match_table = omap_mcspi_of_match,
  1131. },
  1132. .probe = omap2_mcspi_probe,
  1133. .remove = __devexit_p(omap2_mcspi_remove),
  1134. };
  1135. module_platform_driver(omap2_mcspi_driver);
  1136. MODULE_LICENSE("GPL");