spi_mpc8xxx.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402
  1. /*
  2. * MPC8xxx SPI controller driver.
  3. *
  4. * Maintainer: Kumar Gala
  5. *
  6. * Copyright (C) 2006 Polycom, Inc.
  7. *
  8. * CPM SPI and QE buffer descriptors mode support:
  9. * Copyright (c) 2009 MontaVista Software, Inc.
  10. * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/init.h>
  19. #include <linux/types.h>
  20. #include <linux/kernel.h>
  21. #include <linux/bug.h>
  22. #include <linux/errno.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <linux/completion.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/delay.h>
  28. #include <linux/irq.h>
  29. #include <linux/device.h>
  30. #include <linux/spi/spi.h>
  31. #include <linux/spi/spi_bitbang.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/fsl_devices.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/mm.h>
  36. #include <linux/mutex.h>
  37. #include <linux/of.h>
  38. #include <linux/of_platform.h>
  39. #include <linux/gpio.h>
  40. #include <linux/of_gpio.h>
  41. #include <linux/of_spi.h>
  42. #include <linux/slab.h>
  43. #include <sysdev/fsl_soc.h>
  44. #include <asm/cpm.h>
  45. #include <asm/qe.h>
  46. #include <asm/irq.h>
  47. /* CPM1 and CPM2 are mutually exclusive. */
  48. #ifdef CONFIG_CPM1
  49. #include <asm/cpm1.h>
  50. #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
  51. #else
  52. #include <asm/cpm2.h>
  53. #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
  54. #endif
  55. /* SPI Controller registers */
  56. struct mpc8xxx_spi_reg {
  57. u8 res1[0x20];
  58. __be32 mode;
  59. __be32 event;
  60. __be32 mask;
  61. __be32 command;
  62. __be32 transmit;
  63. __be32 receive;
  64. };
  65. /* SPI Parameter RAM */
  66. struct spi_pram {
  67. __be16 rbase; /* Rx Buffer descriptor base address */
  68. __be16 tbase; /* Tx Buffer descriptor base address */
  69. u8 rfcr; /* Rx function code */
  70. u8 tfcr; /* Tx function code */
  71. __be16 mrblr; /* Max receive buffer length */
  72. __be32 rstate; /* Internal */
  73. __be32 rdp; /* Internal */
  74. __be16 rbptr; /* Internal */
  75. __be16 rbc; /* Internal */
  76. __be32 rxtmp; /* Internal */
  77. __be32 tstate; /* Internal */
  78. __be32 tdp; /* Internal */
  79. __be16 tbptr; /* Internal */
  80. __be16 tbc; /* Internal */
  81. __be32 txtmp; /* Internal */
  82. __be32 res; /* Tx temp. */
  83. __be16 rpbase; /* Relocation pointer (CPM1 only) */
  84. __be16 res1; /* Reserved */
  85. };
  86. /* SPI Controller mode register definitions */
  87. #define SPMODE_LOOP (1 << 30)
  88. #define SPMODE_CI_INACTIVEHIGH (1 << 29)
  89. #define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
  90. #define SPMODE_DIV16 (1 << 27)
  91. #define SPMODE_REV (1 << 26)
  92. #define SPMODE_MS (1 << 25)
  93. #define SPMODE_ENABLE (1 << 24)
  94. #define SPMODE_LEN(x) ((x) << 20)
  95. #define SPMODE_PM(x) ((x) << 16)
  96. #define SPMODE_OP (1 << 14)
  97. #define SPMODE_CG(x) ((x) << 7)
  98. /*
  99. * Default for SPI Mode:
  100. * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
  101. */
  102. #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
  103. SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
  104. /* SPIE register values */
  105. #define SPIE_NE 0x00000200 /* Not empty */
  106. #define SPIE_NF 0x00000100 /* Not full */
  107. /* SPIM register values */
  108. #define SPIM_NE 0x00000200 /* Not empty */
  109. #define SPIM_NF 0x00000100 /* Not full */
  110. #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
  111. #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
  112. /* SPCOM register values */
  113. #define SPCOM_STR (1 << 23) /* Start transmit */
  114. #define SPI_PRAM_SIZE 0x100
  115. #define SPI_MRBLR ((unsigned int)PAGE_SIZE)
  116. /* SPI Controller driver's private data. */
  117. struct mpc8xxx_spi {
  118. struct device *dev;
  119. struct mpc8xxx_spi_reg __iomem *base;
  120. /* rx & tx bufs from the spi_transfer */
  121. const void *tx;
  122. void *rx;
  123. int subblock;
  124. struct spi_pram __iomem *pram;
  125. struct cpm_buf_desc __iomem *tx_bd;
  126. struct cpm_buf_desc __iomem *rx_bd;
  127. struct spi_transfer *xfer_in_progress;
  128. /* dma addresses for CPM transfers */
  129. dma_addr_t tx_dma;
  130. dma_addr_t rx_dma;
  131. bool map_tx_dma;
  132. bool map_rx_dma;
  133. dma_addr_t dma_dummy_tx;
  134. dma_addr_t dma_dummy_rx;
  135. /* functions to deal with different sized buffers */
  136. void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
  137. u32(*get_tx) (struct mpc8xxx_spi *);
  138. unsigned int count;
  139. unsigned int irq;
  140. unsigned nsecs; /* (clock cycle time)/2 */
  141. u32 spibrg; /* SPIBRG input clock */
  142. u32 rx_shift; /* RX data reg shift when in qe mode */
  143. u32 tx_shift; /* TX data reg shift when in qe mode */
  144. unsigned int flags;
  145. struct workqueue_struct *workqueue;
  146. struct work_struct work;
  147. struct list_head queue;
  148. spinlock_t lock;
  149. struct completion done;
  150. };
  151. static void *mpc8xxx_dummy_rx;
  152. static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
  153. static int mpc8xxx_dummy_rx_refcnt;
  154. struct spi_mpc8xxx_cs {
  155. /* functions to deal with different sized buffers */
  156. void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
  157. u32 (*get_tx) (struct mpc8xxx_spi *);
  158. u32 rx_shift; /* RX data reg shift when in qe mode */
  159. u32 tx_shift; /* TX data reg shift when in qe mode */
  160. u32 hw_mode; /* Holds HW mode register settings */
  161. };
  162. static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
  163. {
  164. out_be32(reg, val);
  165. }
  166. static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
  167. {
  168. return in_be32(reg);
  169. }
  170. #define MPC83XX_SPI_RX_BUF(type) \
  171. static \
  172. void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
  173. { \
  174. type *rx = mpc8xxx_spi->rx; \
  175. *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
  176. mpc8xxx_spi->rx = rx; \
  177. }
  178. #define MPC83XX_SPI_TX_BUF(type) \
  179. static \
  180. u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
  181. { \
  182. u32 data; \
  183. const type *tx = mpc8xxx_spi->tx; \
  184. if (!tx) \
  185. return 0; \
  186. data = *tx++ << mpc8xxx_spi->tx_shift; \
  187. mpc8xxx_spi->tx = tx; \
  188. return data; \
  189. }
  190. MPC83XX_SPI_RX_BUF(u8)
  191. MPC83XX_SPI_RX_BUF(u16)
  192. MPC83XX_SPI_RX_BUF(u32)
  193. MPC83XX_SPI_TX_BUF(u8)
  194. MPC83XX_SPI_TX_BUF(u16)
  195. MPC83XX_SPI_TX_BUF(u32)
  196. static void mpc8xxx_spi_change_mode(struct spi_device *spi)
  197. {
  198. struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
  199. struct spi_mpc8xxx_cs *cs = spi->controller_state;
  200. __be32 __iomem *mode = &mspi->base->mode;
  201. unsigned long flags;
  202. if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
  203. return;
  204. /* Turn off IRQs locally to minimize time that SPI is disabled. */
  205. local_irq_save(flags);
  206. /* Turn off SPI unit prior changing mode */
  207. mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
  208. mpc8xxx_spi_write_reg(mode, cs->hw_mode);
  209. /* When in CPM mode, we need to reinit tx and rx. */
  210. if (mspi->flags & SPI_CPM_MODE) {
  211. if (mspi->flags & SPI_QE) {
  212. qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
  213. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  214. } else {
  215. cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
  216. if (mspi->flags & SPI_CPM1) {
  217. out_be16(&mspi->pram->rbptr,
  218. in_be16(&mspi->pram->rbase));
  219. out_be16(&mspi->pram->tbptr,
  220. in_be16(&mspi->pram->tbase));
  221. }
  222. }
  223. }
  224. local_irq_restore(flags);
  225. }
  226. static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
  227. {
  228. struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
  229. struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
  230. bool pol = spi->mode & SPI_CS_HIGH;
  231. struct spi_mpc8xxx_cs *cs = spi->controller_state;
  232. if (value == BITBANG_CS_INACTIVE) {
  233. if (pdata->cs_control)
  234. pdata->cs_control(spi, !pol);
  235. }
  236. if (value == BITBANG_CS_ACTIVE) {
  237. mpc8xxx_spi->rx_shift = cs->rx_shift;
  238. mpc8xxx_spi->tx_shift = cs->tx_shift;
  239. mpc8xxx_spi->get_rx = cs->get_rx;
  240. mpc8xxx_spi->get_tx = cs->get_tx;
  241. mpc8xxx_spi_change_mode(spi);
  242. if (pdata->cs_control)
  243. pdata->cs_control(spi, pol);
  244. }
  245. }
  246. static
  247. int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
  248. {
  249. struct mpc8xxx_spi *mpc8xxx_spi;
  250. u8 bits_per_word, pm;
  251. u32 hz;
  252. struct spi_mpc8xxx_cs *cs = spi->controller_state;
  253. mpc8xxx_spi = spi_master_get_devdata(spi->master);
  254. if (t) {
  255. bits_per_word = t->bits_per_word;
  256. hz = t->speed_hz;
  257. } else {
  258. bits_per_word = 0;
  259. hz = 0;
  260. }
  261. /* spi_transfer level calls that work per-word */
  262. if (!bits_per_word)
  263. bits_per_word = spi->bits_per_word;
  264. /* Make sure its a bit width we support [4..16, 32] */
  265. if ((bits_per_word < 4)
  266. || ((bits_per_word > 16) && (bits_per_word != 32)))
  267. return -EINVAL;
  268. if (!hz)
  269. hz = spi->max_speed_hz;
  270. cs->rx_shift = 0;
  271. cs->tx_shift = 0;
  272. if (bits_per_word <= 8) {
  273. cs->get_rx = mpc8xxx_spi_rx_buf_u8;
  274. cs->get_tx = mpc8xxx_spi_tx_buf_u8;
  275. if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
  276. cs->rx_shift = 16;
  277. cs->tx_shift = 24;
  278. }
  279. } else if (bits_per_word <= 16) {
  280. cs->get_rx = mpc8xxx_spi_rx_buf_u16;
  281. cs->get_tx = mpc8xxx_spi_tx_buf_u16;
  282. if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
  283. cs->rx_shift = 16;
  284. cs->tx_shift = 16;
  285. }
  286. } else if (bits_per_word <= 32) {
  287. cs->get_rx = mpc8xxx_spi_rx_buf_u32;
  288. cs->get_tx = mpc8xxx_spi_tx_buf_u32;
  289. } else
  290. return -EINVAL;
  291. if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
  292. spi->mode & SPI_LSB_FIRST) {
  293. cs->tx_shift = 0;
  294. if (bits_per_word <= 8)
  295. cs->rx_shift = 8;
  296. else
  297. cs->rx_shift = 0;
  298. }
  299. mpc8xxx_spi->rx_shift = cs->rx_shift;
  300. mpc8xxx_spi->tx_shift = cs->tx_shift;
  301. mpc8xxx_spi->get_rx = cs->get_rx;
  302. mpc8xxx_spi->get_tx = cs->get_tx;
  303. if (bits_per_word == 32)
  304. bits_per_word = 0;
  305. else
  306. bits_per_word = bits_per_word - 1;
  307. /* mask out bits we are going to set */
  308. cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
  309. | SPMODE_PM(0xF));
  310. cs->hw_mode |= SPMODE_LEN(bits_per_word);
  311. if ((mpc8xxx_spi->spibrg / hz) > 64) {
  312. cs->hw_mode |= SPMODE_DIV16;
  313. pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
  314. WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
  315. "Will use %d Hz instead.\n", dev_name(&spi->dev),
  316. hz, mpc8xxx_spi->spibrg / 1024);
  317. if (pm > 16)
  318. pm = 16;
  319. } else
  320. pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
  321. if (pm)
  322. pm--;
  323. cs->hw_mode |= SPMODE_PM(pm);
  324. mpc8xxx_spi_change_mode(spi);
  325. return 0;
  326. }
  327. static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
  328. {
  329. struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
  330. struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
  331. unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
  332. unsigned int xfer_ofs;
  333. xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
  334. out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
  335. out_be16(&rx_bd->cbd_datlen, 0);
  336. out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
  337. out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
  338. out_be16(&tx_bd->cbd_datlen, xfer_len);
  339. out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
  340. BD_SC_LAST);
  341. /* start transfer */
  342. mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
  343. }
  344. static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
  345. struct spi_transfer *t, bool is_dma_mapped)
  346. {
  347. struct device *dev = mspi->dev;
  348. if (is_dma_mapped) {
  349. mspi->map_tx_dma = 0;
  350. mspi->map_rx_dma = 0;
  351. } else {
  352. mspi->map_tx_dma = 1;
  353. mspi->map_rx_dma = 1;
  354. }
  355. if (!t->tx_buf) {
  356. mspi->tx_dma = mspi->dma_dummy_tx;
  357. mspi->map_tx_dma = 0;
  358. }
  359. if (!t->rx_buf) {
  360. mspi->rx_dma = mspi->dma_dummy_rx;
  361. mspi->map_rx_dma = 0;
  362. }
  363. if (mspi->map_tx_dma) {
  364. void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
  365. mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
  366. DMA_TO_DEVICE);
  367. if (dma_mapping_error(dev, mspi->tx_dma)) {
  368. dev_err(dev, "unable to map tx dma\n");
  369. return -ENOMEM;
  370. }
  371. } else {
  372. mspi->tx_dma = t->tx_dma;
  373. }
  374. if (mspi->map_rx_dma) {
  375. mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
  376. DMA_FROM_DEVICE);
  377. if (dma_mapping_error(dev, mspi->rx_dma)) {
  378. dev_err(dev, "unable to map rx dma\n");
  379. goto err_rx_dma;
  380. }
  381. } else {
  382. mspi->rx_dma = t->rx_dma;
  383. }
  384. /* enable rx ints */
  385. mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
  386. mspi->xfer_in_progress = t;
  387. mspi->count = t->len;
  388. /* start CPM transfers */
  389. mpc8xxx_spi_cpm_bufs_start(mspi);
  390. return 0;
  391. err_rx_dma:
  392. if (mspi->map_tx_dma)
  393. dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
  394. return -ENOMEM;
  395. }
  396. static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
  397. {
  398. struct device *dev = mspi->dev;
  399. struct spi_transfer *t = mspi->xfer_in_progress;
  400. if (mspi->map_tx_dma)
  401. dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
  402. if (mspi->map_tx_dma)
  403. dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
  404. mspi->xfer_in_progress = NULL;
  405. }
  406. static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
  407. struct spi_transfer *t, unsigned int len)
  408. {
  409. u32 word;
  410. mspi->count = len;
  411. /* enable rx ints */
  412. mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
  413. /* transmit word */
  414. word = mspi->get_tx(mspi);
  415. mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
  416. return 0;
  417. }
  418. static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
  419. bool is_dma_mapped)
  420. {
  421. struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
  422. unsigned int len = t->len;
  423. u8 bits_per_word;
  424. int ret;
  425. bits_per_word = spi->bits_per_word;
  426. if (t->bits_per_word)
  427. bits_per_word = t->bits_per_word;
  428. if (bits_per_word > 8) {
  429. /* invalid length? */
  430. if (len & 1)
  431. return -EINVAL;
  432. len /= 2;
  433. }
  434. if (bits_per_word > 16) {
  435. /* invalid length? */
  436. if (len & 1)
  437. return -EINVAL;
  438. len /= 2;
  439. }
  440. mpc8xxx_spi->tx = t->tx_buf;
  441. mpc8xxx_spi->rx = t->rx_buf;
  442. INIT_COMPLETION(mpc8xxx_spi->done);
  443. if (mpc8xxx_spi->flags & SPI_CPM_MODE)
  444. ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
  445. else
  446. ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
  447. if (ret)
  448. return ret;
  449. wait_for_completion(&mpc8xxx_spi->done);
  450. /* disable rx ints */
  451. mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
  452. if (mpc8xxx_spi->flags & SPI_CPM_MODE)
  453. mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
  454. return mpc8xxx_spi->count;
  455. }
  456. static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
  457. {
  458. struct spi_device *spi = m->spi;
  459. struct spi_transfer *t;
  460. unsigned int cs_change;
  461. const int nsecs = 50;
  462. int status;
  463. cs_change = 1;
  464. status = 0;
  465. list_for_each_entry(t, &m->transfers, transfer_list) {
  466. if (t->bits_per_word || t->speed_hz) {
  467. /* Don't allow changes if CS is active */
  468. status = -EINVAL;
  469. if (cs_change)
  470. status = mpc8xxx_spi_setup_transfer(spi, t);
  471. if (status < 0)
  472. break;
  473. }
  474. if (cs_change) {
  475. mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE);
  476. ndelay(nsecs);
  477. }
  478. cs_change = t->cs_change;
  479. if (t->len)
  480. status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
  481. if (status) {
  482. status = -EMSGSIZE;
  483. break;
  484. }
  485. m->actual_length += t->len;
  486. if (t->delay_usecs)
  487. udelay(t->delay_usecs);
  488. if (cs_change) {
  489. ndelay(nsecs);
  490. mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
  491. ndelay(nsecs);
  492. }
  493. }
  494. m->status = status;
  495. m->complete(m->context);
  496. if (status || !cs_change) {
  497. ndelay(nsecs);
  498. mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
  499. }
  500. mpc8xxx_spi_setup_transfer(spi, NULL);
  501. }
  502. static void mpc8xxx_spi_work(struct work_struct *work)
  503. {
  504. struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
  505. work);
  506. spin_lock_irq(&mpc8xxx_spi->lock);
  507. while (!list_empty(&mpc8xxx_spi->queue)) {
  508. struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
  509. struct spi_message, queue);
  510. list_del_init(&m->queue);
  511. spin_unlock_irq(&mpc8xxx_spi->lock);
  512. mpc8xxx_spi_do_one_msg(m);
  513. spin_lock_irq(&mpc8xxx_spi->lock);
  514. }
  515. spin_unlock_irq(&mpc8xxx_spi->lock);
  516. }
  517. static int mpc8xxx_spi_setup(struct spi_device *spi)
  518. {
  519. struct mpc8xxx_spi *mpc8xxx_spi;
  520. int retval;
  521. u32 hw_mode;
  522. struct spi_mpc8xxx_cs *cs = spi->controller_state;
  523. if (!spi->max_speed_hz)
  524. return -EINVAL;
  525. if (!cs) {
  526. cs = kzalloc(sizeof *cs, GFP_KERNEL);
  527. if (!cs)
  528. return -ENOMEM;
  529. spi->controller_state = cs;
  530. }
  531. mpc8xxx_spi = spi_master_get_devdata(spi->master);
  532. hw_mode = cs->hw_mode; /* Save orginal settings */
  533. cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
  534. /* mask out bits we are going to set */
  535. cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
  536. | SPMODE_REV | SPMODE_LOOP);
  537. if (spi->mode & SPI_CPHA)
  538. cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
  539. if (spi->mode & SPI_CPOL)
  540. cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
  541. if (!(spi->mode & SPI_LSB_FIRST))
  542. cs->hw_mode |= SPMODE_REV;
  543. if (spi->mode & SPI_LOOP)
  544. cs->hw_mode |= SPMODE_LOOP;
  545. retval = mpc8xxx_spi_setup_transfer(spi, NULL);
  546. if (retval < 0) {
  547. cs->hw_mode = hw_mode; /* Restore settings */
  548. return retval;
  549. }
  550. return 0;
  551. }
  552. static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
  553. {
  554. u16 len;
  555. dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
  556. in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
  557. len = in_be16(&mspi->rx_bd->cbd_datlen);
  558. if (len > mspi->count) {
  559. WARN_ON(1);
  560. len = mspi->count;
  561. }
  562. /* Clear the events */
  563. mpc8xxx_spi_write_reg(&mspi->base->event, events);
  564. mspi->count -= len;
  565. if (mspi->count)
  566. mpc8xxx_spi_cpm_bufs_start(mspi);
  567. else
  568. complete(&mspi->done);
  569. }
  570. static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
  571. {
  572. /* We need handle RX first */
  573. if (events & SPIE_NE) {
  574. u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
  575. if (mspi->rx)
  576. mspi->get_rx(rx_data, mspi);
  577. }
  578. if ((events & SPIE_NF) == 0)
  579. /* spin until TX is done */
  580. while (((events =
  581. mpc8xxx_spi_read_reg(&mspi->base->event)) &
  582. SPIE_NF) == 0)
  583. cpu_relax();
  584. /* Clear the events */
  585. mpc8xxx_spi_write_reg(&mspi->base->event, events);
  586. mspi->count -= 1;
  587. if (mspi->count) {
  588. u32 word = mspi->get_tx(mspi);
  589. mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
  590. } else {
  591. complete(&mspi->done);
  592. }
  593. }
  594. static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
  595. {
  596. struct mpc8xxx_spi *mspi = context_data;
  597. irqreturn_t ret = IRQ_NONE;
  598. u32 events;
  599. /* Get interrupt events(tx/rx) */
  600. events = mpc8xxx_spi_read_reg(&mspi->base->event);
  601. if (events)
  602. ret = IRQ_HANDLED;
  603. dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
  604. if (mspi->flags & SPI_CPM_MODE)
  605. mpc8xxx_spi_cpm_irq(mspi, events);
  606. else
  607. mpc8xxx_spi_cpu_irq(mspi, events);
  608. return ret;
  609. }
  610. static int mpc8xxx_spi_transfer(struct spi_device *spi,
  611. struct spi_message *m)
  612. {
  613. struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
  614. unsigned long flags;
  615. m->actual_length = 0;
  616. m->status = -EINPROGRESS;
  617. spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
  618. list_add_tail(&m->queue, &mpc8xxx_spi->queue);
  619. queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
  620. spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
  621. return 0;
  622. }
  623. static void mpc8xxx_spi_cleanup(struct spi_device *spi)
  624. {
  625. kfree(spi->controller_state);
  626. }
  627. static void *mpc8xxx_spi_alloc_dummy_rx(void)
  628. {
  629. mutex_lock(&mpc8xxx_dummy_rx_lock);
  630. if (!mpc8xxx_dummy_rx)
  631. mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
  632. if (mpc8xxx_dummy_rx)
  633. mpc8xxx_dummy_rx_refcnt++;
  634. mutex_unlock(&mpc8xxx_dummy_rx_lock);
  635. return mpc8xxx_dummy_rx;
  636. }
  637. static void mpc8xxx_spi_free_dummy_rx(void)
  638. {
  639. mutex_lock(&mpc8xxx_dummy_rx_lock);
  640. switch (mpc8xxx_dummy_rx_refcnt) {
  641. case 0:
  642. WARN_ON(1);
  643. break;
  644. case 1:
  645. kfree(mpc8xxx_dummy_rx);
  646. mpc8xxx_dummy_rx = NULL;
  647. /* fall through */
  648. default:
  649. mpc8xxx_dummy_rx_refcnt--;
  650. break;
  651. }
  652. mutex_unlock(&mpc8xxx_dummy_rx_lock);
  653. }
  654. static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
  655. {
  656. struct device *dev = mspi->dev;
  657. struct device_node *np = dev_archdata_get_node(&dev->archdata);
  658. const u32 *iprop;
  659. int size;
  660. unsigned long spi_base_ofs;
  661. unsigned long pram_ofs = -ENOMEM;
  662. /* Can't use of_address_to_resource(), QE muram isn't at 0. */
  663. iprop = of_get_property(np, "reg", &size);
  664. /* QE with a fixed pram location? */
  665. if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
  666. return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
  667. /* QE but with a dynamic pram location? */
  668. if (mspi->flags & SPI_QE) {
  669. pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
  670. qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
  671. QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
  672. return pram_ofs;
  673. }
  674. /* CPM1 and CPM2 pram must be at a fixed addr. */
  675. if (!iprop || size != sizeof(*iprop) * 4)
  676. return -ENOMEM;
  677. spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
  678. if (IS_ERR_VALUE(spi_base_ofs))
  679. return -ENOMEM;
  680. if (mspi->flags & SPI_CPM2) {
  681. pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
  682. if (!IS_ERR_VALUE(pram_ofs)) {
  683. u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
  684. out_be16(spi_base, pram_ofs);
  685. }
  686. } else {
  687. struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
  688. u16 rpbase = in_be16(&pram->rpbase);
  689. /* Microcode relocation patch applied? */
  690. if (rpbase)
  691. pram_ofs = rpbase;
  692. else
  693. return spi_base_ofs;
  694. }
  695. cpm_muram_free(spi_base_ofs);
  696. return pram_ofs;
  697. }
  698. static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
  699. {
  700. struct device *dev = mspi->dev;
  701. struct device_node *np = dev_archdata_get_node(&dev->archdata);
  702. const u32 *iprop;
  703. int size;
  704. unsigned long pram_ofs;
  705. unsigned long bds_ofs;
  706. if (!(mspi->flags & SPI_CPM_MODE))
  707. return 0;
  708. if (!mpc8xxx_spi_alloc_dummy_rx())
  709. return -ENOMEM;
  710. if (mspi->flags & SPI_QE) {
  711. iprop = of_get_property(np, "cell-index", &size);
  712. if (iprop && size == sizeof(*iprop))
  713. mspi->subblock = *iprop;
  714. switch (mspi->subblock) {
  715. default:
  716. dev_warn(dev, "cell-index unspecified, assuming SPI1");
  717. /* fall through */
  718. case 0:
  719. mspi->subblock = QE_CR_SUBBLOCK_SPI1;
  720. break;
  721. case 1:
  722. mspi->subblock = QE_CR_SUBBLOCK_SPI2;
  723. break;
  724. }
  725. }
  726. pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
  727. if (IS_ERR_VALUE(pram_ofs)) {
  728. dev_err(dev, "can't allocate spi parameter ram\n");
  729. goto err_pram;
  730. }
  731. bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
  732. sizeof(*mspi->rx_bd), 8);
  733. if (IS_ERR_VALUE(bds_ofs)) {
  734. dev_err(dev, "can't allocate bds\n");
  735. goto err_bds;
  736. }
  737. mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
  738. DMA_TO_DEVICE);
  739. if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
  740. dev_err(dev, "unable to map dummy tx buffer\n");
  741. goto err_dummy_tx;
  742. }
  743. mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
  744. DMA_FROM_DEVICE);
  745. if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
  746. dev_err(dev, "unable to map dummy rx buffer\n");
  747. goto err_dummy_rx;
  748. }
  749. mspi->pram = cpm_muram_addr(pram_ofs);
  750. mspi->tx_bd = cpm_muram_addr(bds_ofs);
  751. mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
  752. /* Initialize parameter ram. */
  753. out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
  754. out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
  755. out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
  756. out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
  757. out_be16(&mspi->pram->mrblr, SPI_MRBLR);
  758. out_be32(&mspi->pram->rstate, 0);
  759. out_be32(&mspi->pram->rdp, 0);
  760. out_be16(&mspi->pram->rbptr, 0);
  761. out_be16(&mspi->pram->rbc, 0);
  762. out_be32(&mspi->pram->rxtmp, 0);
  763. out_be32(&mspi->pram->tstate, 0);
  764. out_be32(&mspi->pram->tdp, 0);
  765. out_be16(&mspi->pram->tbptr, 0);
  766. out_be16(&mspi->pram->tbc, 0);
  767. out_be32(&mspi->pram->txtmp, 0);
  768. return 0;
  769. err_dummy_rx:
  770. dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
  771. err_dummy_tx:
  772. cpm_muram_free(bds_ofs);
  773. err_bds:
  774. cpm_muram_free(pram_ofs);
  775. err_pram:
  776. mpc8xxx_spi_free_dummy_rx();
  777. return -ENOMEM;
  778. }
  779. static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
  780. {
  781. struct device *dev = mspi->dev;
  782. dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
  783. dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
  784. cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
  785. cpm_muram_free(cpm_muram_offset(mspi->pram));
  786. mpc8xxx_spi_free_dummy_rx();
  787. }
  788. static const char *mpc8xxx_spi_strmode(unsigned int flags)
  789. {
  790. if (flags & SPI_QE_CPU_MODE) {
  791. return "QE CPU";
  792. } else if (flags & SPI_CPM_MODE) {
  793. if (flags & SPI_QE)
  794. return "QE";
  795. else if (flags & SPI_CPM2)
  796. return "CPM2";
  797. else
  798. return "CPM1";
  799. }
  800. return "CPU";
  801. }
  802. static struct spi_master * __devinit
  803. mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
  804. {
  805. struct fsl_spi_platform_data *pdata = dev->platform_data;
  806. struct spi_master *master;
  807. struct mpc8xxx_spi *mpc8xxx_spi;
  808. u32 regval;
  809. int ret = 0;
  810. master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
  811. if (master == NULL) {
  812. ret = -ENOMEM;
  813. goto err;
  814. }
  815. dev_set_drvdata(dev, master);
  816. /* the spi->mode bits understood by this driver: */
  817. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
  818. | SPI_LSB_FIRST | SPI_LOOP;
  819. master->setup = mpc8xxx_spi_setup;
  820. master->transfer = mpc8xxx_spi_transfer;
  821. master->cleanup = mpc8xxx_spi_cleanup;
  822. mpc8xxx_spi = spi_master_get_devdata(master);
  823. mpc8xxx_spi->dev = dev;
  824. mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
  825. mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
  826. mpc8xxx_spi->flags = pdata->flags;
  827. mpc8xxx_spi->spibrg = pdata->sysclk;
  828. ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
  829. if (ret)
  830. goto err_cpm_init;
  831. mpc8xxx_spi->rx_shift = 0;
  832. mpc8xxx_spi->tx_shift = 0;
  833. if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
  834. mpc8xxx_spi->rx_shift = 16;
  835. mpc8xxx_spi->tx_shift = 24;
  836. }
  837. init_completion(&mpc8xxx_spi->done);
  838. mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
  839. if (mpc8xxx_spi->base == NULL) {
  840. ret = -ENOMEM;
  841. goto err_ioremap;
  842. }
  843. mpc8xxx_spi->irq = irq;
  844. /* Register for SPI Interrupt */
  845. ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq,
  846. 0, "mpc8xxx_spi", mpc8xxx_spi);
  847. if (ret != 0)
  848. goto unmap_io;
  849. master->bus_num = pdata->bus_num;
  850. master->num_chipselect = pdata->max_chipselect;
  851. /* SPI controller initializations */
  852. mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0);
  853. mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
  854. mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0);
  855. mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff);
  856. /* Enable SPI interface */
  857. regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
  858. if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
  859. regval |= SPMODE_OP;
  860. mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
  861. spin_lock_init(&mpc8xxx_spi->lock);
  862. init_completion(&mpc8xxx_spi->done);
  863. INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
  864. INIT_LIST_HEAD(&mpc8xxx_spi->queue);
  865. mpc8xxx_spi->workqueue = create_singlethread_workqueue(
  866. dev_name(master->dev.parent));
  867. if (mpc8xxx_spi->workqueue == NULL) {
  868. ret = -EBUSY;
  869. goto free_irq;
  870. }
  871. ret = spi_register_master(master);
  872. if (ret < 0)
  873. goto unreg_master;
  874. dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
  875. mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
  876. return master;
  877. unreg_master:
  878. destroy_workqueue(mpc8xxx_spi->workqueue);
  879. free_irq:
  880. free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
  881. unmap_io:
  882. iounmap(mpc8xxx_spi->base);
  883. err_ioremap:
  884. mpc8xxx_spi_cpm_free(mpc8xxx_spi);
  885. err_cpm_init:
  886. spi_master_put(master);
  887. err:
  888. return ERR_PTR(ret);
  889. }
  890. static int __devexit mpc8xxx_spi_remove(struct device *dev)
  891. {
  892. struct mpc8xxx_spi *mpc8xxx_spi;
  893. struct spi_master *master;
  894. master = dev_get_drvdata(dev);
  895. mpc8xxx_spi = spi_master_get_devdata(master);
  896. flush_workqueue(mpc8xxx_spi->workqueue);
  897. destroy_workqueue(mpc8xxx_spi->workqueue);
  898. spi_unregister_master(master);
  899. free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
  900. iounmap(mpc8xxx_spi->base);
  901. mpc8xxx_spi_cpm_free(mpc8xxx_spi);
  902. return 0;
  903. }
  904. struct mpc8xxx_spi_probe_info {
  905. struct fsl_spi_platform_data pdata;
  906. int *gpios;
  907. bool *alow_flags;
  908. };
  909. static struct mpc8xxx_spi_probe_info *
  910. to_of_pinfo(struct fsl_spi_platform_data *pdata)
  911. {
  912. return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
  913. }
  914. static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
  915. {
  916. struct device *dev = spi->dev.parent;
  917. struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
  918. u16 cs = spi->chip_select;
  919. int gpio = pinfo->gpios[cs];
  920. bool alow = pinfo->alow_flags[cs];
  921. gpio_set_value(gpio, on ^ alow);
  922. }
  923. static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
  924. {
  925. struct device_node *np = dev_archdata_get_node(&dev->archdata);
  926. struct fsl_spi_platform_data *pdata = dev->platform_data;
  927. struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
  928. unsigned int ngpios;
  929. int i = 0;
  930. int ret;
  931. ngpios = of_gpio_count(np);
  932. if (!ngpios) {
  933. /*
  934. * SPI w/o chip-select line. One SPI device is still permitted
  935. * though.
  936. */
  937. pdata->max_chipselect = 1;
  938. return 0;
  939. }
  940. pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
  941. if (!pinfo->gpios)
  942. return -ENOMEM;
  943. memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
  944. pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
  945. GFP_KERNEL);
  946. if (!pinfo->alow_flags) {
  947. ret = -ENOMEM;
  948. goto err_alloc_flags;
  949. }
  950. for (; i < ngpios; i++) {
  951. int gpio;
  952. enum of_gpio_flags flags;
  953. gpio = of_get_gpio_flags(np, i, &flags);
  954. if (!gpio_is_valid(gpio)) {
  955. dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
  956. ret = gpio;
  957. goto err_loop;
  958. }
  959. ret = gpio_request(gpio, dev_name(dev));
  960. if (ret) {
  961. dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
  962. goto err_loop;
  963. }
  964. pinfo->gpios[i] = gpio;
  965. pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
  966. ret = gpio_direction_output(pinfo->gpios[i],
  967. pinfo->alow_flags[i]);
  968. if (ret) {
  969. dev_err(dev, "can't set output direction for gpio "
  970. "#%d: %d\n", i, ret);
  971. goto err_loop;
  972. }
  973. }
  974. pdata->max_chipselect = ngpios;
  975. pdata->cs_control = mpc8xxx_spi_cs_control;
  976. return 0;
  977. err_loop:
  978. while (i >= 0) {
  979. if (gpio_is_valid(pinfo->gpios[i]))
  980. gpio_free(pinfo->gpios[i]);
  981. i--;
  982. }
  983. kfree(pinfo->alow_flags);
  984. pinfo->alow_flags = NULL;
  985. err_alloc_flags:
  986. kfree(pinfo->gpios);
  987. pinfo->gpios = NULL;
  988. return ret;
  989. }
  990. static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
  991. {
  992. struct fsl_spi_platform_data *pdata = dev->platform_data;
  993. struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
  994. int i;
  995. if (!pinfo->gpios)
  996. return 0;
  997. for (i = 0; i < pdata->max_chipselect; i++) {
  998. if (gpio_is_valid(pinfo->gpios[i]))
  999. gpio_free(pinfo->gpios[i]);
  1000. }
  1001. kfree(pinfo->gpios);
  1002. kfree(pinfo->alow_flags);
  1003. return 0;
  1004. }
  1005. static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
  1006. const struct of_device_id *ofid)
  1007. {
  1008. struct device *dev = &ofdev->dev;
  1009. struct device_node *np = ofdev->node;
  1010. struct mpc8xxx_spi_probe_info *pinfo;
  1011. struct fsl_spi_platform_data *pdata;
  1012. struct spi_master *master;
  1013. struct resource mem;
  1014. struct resource irq;
  1015. const void *prop;
  1016. int ret = -ENOMEM;
  1017. pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
  1018. if (!pinfo)
  1019. return -ENOMEM;
  1020. pdata = &pinfo->pdata;
  1021. dev->platform_data = pdata;
  1022. /* Allocate bus num dynamically. */
  1023. pdata->bus_num = -1;
  1024. /* SPI controller is either clocked from QE or SoC clock. */
  1025. pdata->sysclk = get_brgfreq();
  1026. if (pdata->sysclk == -1) {
  1027. pdata->sysclk = fsl_get_sys_freq();
  1028. if (pdata->sysclk == -1) {
  1029. ret = -ENODEV;
  1030. goto err_clk;
  1031. }
  1032. }
  1033. prop = of_get_property(np, "mode", NULL);
  1034. if (prop && !strcmp(prop, "cpu-qe"))
  1035. pdata->flags = SPI_QE_CPU_MODE;
  1036. else if (prop && !strcmp(prop, "qe"))
  1037. pdata->flags = SPI_CPM_MODE | SPI_QE;
  1038. else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
  1039. pdata->flags = SPI_CPM_MODE | SPI_CPM2;
  1040. else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
  1041. pdata->flags = SPI_CPM_MODE | SPI_CPM1;
  1042. ret = of_mpc8xxx_spi_get_chipselects(dev);
  1043. if (ret)
  1044. goto err;
  1045. ret = of_address_to_resource(np, 0, &mem);
  1046. if (ret)
  1047. goto err;
  1048. ret = of_irq_to_resource(np, 0, &irq);
  1049. if (!ret) {
  1050. ret = -EINVAL;
  1051. goto err;
  1052. }
  1053. master = mpc8xxx_spi_probe(dev, &mem, irq.start);
  1054. if (IS_ERR(master)) {
  1055. ret = PTR_ERR(master);
  1056. goto err;
  1057. }
  1058. of_register_spi_devices(master, np);
  1059. return 0;
  1060. err:
  1061. of_mpc8xxx_spi_free_chipselects(dev);
  1062. err_clk:
  1063. kfree(pinfo);
  1064. return ret;
  1065. }
  1066. static int __devexit of_mpc8xxx_spi_remove(struct of_device *ofdev)
  1067. {
  1068. int ret;
  1069. ret = mpc8xxx_spi_remove(&ofdev->dev);
  1070. if (ret)
  1071. return ret;
  1072. of_mpc8xxx_spi_free_chipselects(&ofdev->dev);
  1073. return 0;
  1074. }
  1075. static const struct of_device_id of_mpc8xxx_spi_match[] = {
  1076. { .compatible = "fsl,spi" },
  1077. {},
  1078. };
  1079. MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match);
  1080. static struct of_platform_driver of_mpc8xxx_spi_driver = {
  1081. .name = "mpc8xxx_spi",
  1082. .match_table = of_mpc8xxx_spi_match,
  1083. .probe = of_mpc8xxx_spi_probe,
  1084. .remove = __devexit_p(of_mpc8xxx_spi_remove),
  1085. };
  1086. #ifdef CONFIG_MPC832x_RDB
  1087. /*
  1088. * XXX XXX XXX
  1089. * This is "legacy" platform driver, was used by the MPC8323E-RDB boards
  1090. * only. The driver should go away soon, since newer MPC8323E-RDB's device
  1091. * tree can work with OpenFirmware driver. But for now we support old trees
  1092. * as well.
  1093. */
  1094. static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
  1095. {
  1096. struct resource *mem;
  1097. int irq;
  1098. struct spi_master *master;
  1099. if (!pdev->dev.platform_data)
  1100. return -EINVAL;
  1101. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1102. if (!mem)
  1103. return -EINVAL;
  1104. irq = platform_get_irq(pdev, 0);
  1105. if (irq <= 0)
  1106. return -EINVAL;
  1107. master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
  1108. if (IS_ERR(master))
  1109. return PTR_ERR(master);
  1110. return 0;
  1111. }
  1112. static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
  1113. {
  1114. return mpc8xxx_spi_remove(&pdev->dev);
  1115. }
  1116. MODULE_ALIAS("platform:mpc8xxx_spi");
  1117. static struct platform_driver mpc8xxx_spi_driver = {
  1118. .probe = plat_mpc8xxx_spi_probe,
  1119. .remove = __devexit_p(plat_mpc8xxx_spi_remove),
  1120. .driver = {
  1121. .name = "mpc8xxx_spi",
  1122. .owner = THIS_MODULE,
  1123. },
  1124. };
  1125. static bool legacy_driver_failed;
  1126. static void __init legacy_driver_register(void)
  1127. {
  1128. legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
  1129. }
  1130. static void __exit legacy_driver_unregister(void)
  1131. {
  1132. if (legacy_driver_failed)
  1133. return;
  1134. platform_driver_unregister(&mpc8xxx_spi_driver);
  1135. }
  1136. #else
  1137. static void __init legacy_driver_register(void) {}
  1138. static void __exit legacy_driver_unregister(void) {}
  1139. #endif /* CONFIG_MPC832x_RDB */
  1140. static int __init mpc8xxx_spi_init(void)
  1141. {
  1142. legacy_driver_register();
  1143. return of_register_platform_driver(&of_mpc8xxx_spi_driver);
  1144. }
  1145. static void __exit mpc8xxx_spi_exit(void)
  1146. {
  1147. of_unregister_platform_driver(&of_mpc8xxx_spi_driver);
  1148. legacy_driver_unregister();
  1149. }
  1150. module_init(mpc8xxx_spi_init);
  1151. module_exit(mpc8xxx_spi_exit);
  1152. MODULE_AUTHOR("Kumar Gala");
  1153. MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver");
  1154. MODULE_LICENSE("GPL");