spi_bfin5xx.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326
  1. /*
  2. * File: drivers/spi/bfin5xx_spi.c
  3. * Based on: N/A
  4. * Author: Luke Yang (Analog Devices Inc.)
  5. *
  6. * Created: March. 10th 2006
  7. * Description: SPI controller driver for Blackfin 5xx
  8. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  9. *
  10. * Modified:
  11. * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
  12. * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
  13. *
  14. * Copyright 2004-2006 Analog Devices Inc.
  15. *
  16. * This program is free software ; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation ; either version 2, or (at your option)
  19. * any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY ; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with this program ; see the file COPYING.
  28. * If not, write to the Free Software Foundation,
  29. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  30. */
  31. #include <linux/init.h>
  32. #include <linux/module.h>
  33. #include <linux/device.h>
  34. #include <linux/ioport.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/platform_device.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/spi/spi.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/delay.h>
  42. #include <asm/io.h>
  43. #include <asm/irq.h>
  44. #include <asm/delay.h>
  45. #include <asm/dma.h>
  46. #include <asm/bfin5xx_spi.h>
  47. MODULE_AUTHOR("Luke Yang");
  48. MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller");
  49. MODULE_LICENSE("GPL");
  50. #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
  51. #define DEFINE_SPI_REG(reg, off) \
  52. static inline u16 read_##reg(void) \
  53. { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \
  54. static inline void write_##reg(u16 v) \
  55. {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\
  56. SSYNC();}
  57. DEFINE_SPI_REG(CTRL, 0x00)
  58. DEFINE_SPI_REG(FLAG, 0x04)
  59. DEFINE_SPI_REG(STAT, 0x08)
  60. DEFINE_SPI_REG(TDBR, 0x0C)
  61. DEFINE_SPI_REG(RDBR, 0x10)
  62. DEFINE_SPI_REG(BAUD, 0x14)
  63. DEFINE_SPI_REG(SHAW, 0x18)
  64. #define START_STATE ((void*)0)
  65. #define RUNNING_STATE ((void*)1)
  66. #define DONE_STATE ((void*)2)
  67. #define ERROR_STATE ((void*)-1)
  68. #define QUEUE_RUNNING 0
  69. #define QUEUE_STOPPED 1
  70. int dma_requested;
  71. struct driver_data {
  72. /* Driver model hookup */
  73. struct platform_device *pdev;
  74. /* SPI framework hookup */
  75. struct spi_master *master;
  76. /* BFIN hookup */
  77. struct bfin5xx_spi_master *master_info;
  78. /* Driver message queue */
  79. struct workqueue_struct *workqueue;
  80. struct work_struct pump_messages;
  81. spinlock_t lock;
  82. struct list_head queue;
  83. int busy;
  84. int run;
  85. /* Message Transfer pump */
  86. struct tasklet_struct pump_transfers;
  87. /* Current message transfer state info */
  88. struct spi_message *cur_msg;
  89. struct spi_transfer *cur_transfer;
  90. struct chip_data *cur_chip;
  91. size_t len_in_bytes;
  92. size_t len;
  93. void *tx;
  94. void *tx_end;
  95. void *rx;
  96. void *rx_end;
  97. int dma_mapped;
  98. dma_addr_t rx_dma;
  99. dma_addr_t tx_dma;
  100. size_t rx_map_len;
  101. size_t tx_map_len;
  102. u8 n_bytes;
  103. void (*write) (struct driver_data *);
  104. void (*read) (struct driver_data *);
  105. void (*duplex) (struct driver_data *);
  106. };
  107. struct chip_data {
  108. u16 ctl_reg;
  109. u16 baud;
  110. u16 flag;
  111. u8 chip_select_num;
  112. u8 n_bytes;
  113. u8 width; /* 0 or 1 */
  114. u8 enable_dma;
  115. u8 bits_per_word; /* 8 or 16 */
  116. u8 cs_change_per_word;
  117. u8 cs_chg_udelay;
  118. void (*write) (struct driver_data *);
  119. void (*read) (struct driver_data *);
  120. void (*duplex) (struct driver_data *);
  121. };
  122. static void bfin_spi_enable(struct driver_data *drv_data)
  123. {
  124. u16 cr;
  125. cr = read_CTRL();
  126. write_CTRL(cr | BIT_CTL_ENABLE);
  127. SSYNC();
  128. }
  129. static void bfin_spi_disable(struct driver_data *drv_data)
  130. {
  131. u16 cr;
  132. cr = read_CTRL();
  133. write_CTRL(cr & (~BIT_CTL_ENABLE));
  134. SSYNC();
  135. }
  136. /* Caculate the SPI_BAUD register value based on input HZ */
  137. static u16 hz_to_spi_baud(u32 speed_hz)
  138. {
  139. u_long sclk = get_sclk();
  140. u16 spi_baud = (sclk / (2 * speed_hz));
  141. if ((sclk % (2 * speed_hz)) > 0)
  142. spi_baud++;
  143. return spi_baud;
  144. }
  145. static int flush(struct driver_data *drv_data)
  146. {
  147. unsigned long limit = loops_per_jiffy << 1;
  148. /* wait for stop and clear stat */
  149. while (!(read_STAT() & BIT_STAT_SPIF) && limit--)
  150. continue;
  151. write_STAT(BIT_STAT_CLR);
  152. return limit;
  153. }
  154. /* stop controller and re-config current chip*/
  155. static void restore_state(struct driver_data *drv_data)
  156. {
  157. struct chip_data *chip = drv_data->cur_chip;
  158. /* Clear status and disable clock */
  159. write_STAT(BIT_STAT_CLR);
  160. bfin_spi_disable(drv_data);
  161. dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
  162. #if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
  163. dev_dbg(&drv_data->pdev->dev,
  164. "chip select number is %d\n", chip->chip_select_num);
  165. switch (chip->chip_select_num) {
  166. case 1:
  167. bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00);
  168. SSYNC();
  169. break;
  170. case 2:
  171. case 3:
  172. bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI);
  173. SSYNC();
  174. bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
  175. SSYNC();
  176. break;
  177. case 4:
  178. bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI);
  179. SSYNC();
  180. bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840);
  181. SSYNC();
  182. break;
  183. case 5:
  184. bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI);
  185. SSYNC();
  186. bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820);
  187. SSYNC();
  188. break;
  189. case 6:
  190. bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI);
  191. SSYNC();
  192. bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810);
  193. SSYNC();
  194. break;
  195. case 7:
  196. bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI);
  197. SSYNC();
  198. bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
  199. SSYNC();
  200. break;
  201. }
  202. #endif
  203. /* Load the registers */
  204. write_CTRL(chip->ctl_reg);
  205. write_BAUD(chip->baud);
  206. write_FLAG(chip->flag);
  207. }
  208. /* used to kick off transfer in rx mode */
  209. static unsigned short dummy_read(void)
  210. {
  211. unsigned short tmp;
  212. tmp = read_RDBR();
  213. return tmp;
  214. }
  215. static void null_writer(struct driver_data *drv_data)
  216. {
  217. u8 n_bytes = drv_data->n_bytes;
  218. while (drv_data->tx < drv_data->tx_end) {
  219. write_TDBR(0);
  220. while ((read_STAT() & BIT_STAT_TXS))
  221. continue;
  222. drv_data->tx += n_bytes;
  223. }
  224. }
  225. static void null_reader(struct driver_data *drv_data)
  226. {
  227. u8 n_bytes = drv_data->n_bytes;
  228. dummy_read();
  229. while (drv_data->rx < drv_data->rx_end) {
  230. while (!(read_STAT() & BIT_STAT_RXS))
  231. continue;
  232. dummy_read();
  233. drv_data->rx += n_bytes;
  234. }
  235. }
  236. static void u8_writer(struct driver_data *drv_data)
  237. {
  238. dev_dbg(&drv_data->pdev->dev,
  239. "cr8-s is 0x%x\n", read_STAT());
  240. while (drv_data->tx < drv_data->tx_end) {
  241. write_TDBR(*(u8 *) (drv_data->tx));
  242. while (read_STAT() & BIT_STAT_TXS)
  243. continue;
  244. ++drv_data->tx;
  245. }
  246. /* poll for SPI completion before returning */
  247. while (!(read_STAT() & BIT_STAT_SPIF))
  248. continue;
  249. }
  250. static void u8_cs_chg_writer(struct driver_data *drv_data)
  251. {
  252. struct chip_data *chip = drv_data->cur_chip;
  253. while (drv_data->tx < drv_data->tx_end) {
  254. write_FLAG(chip->flag);
  255. SSYNC();
  256. write_TDBR(*(u8 *) (drv_data->tx));
  257. while (read_STAT() & BIT_STAT_TXS)
  258. continue;
  259. while (!(read_STAT() & BIT_STAT_SPIF))
  260. continue;
  261. write_FLAG(0xFF00 | chip->flag);
  262. SSYNC();
  263. if (chip->cs_chg_udelay)
  264. udelay(chip->cs_chg_udelay);
  265. ++drv_data->tx;
  266. }
  267. write_FLAG(0xFF00);
  268. SSYNC();
  269. }
  270. static void u8_reader(struct driver_data *drv_data)
  271. {
  272. dev_dbg(&drv_data->pdev->dev,
  273. "cr-8 is 0x%x\n", read_STAT());
  274. /* clear TDBR buffer before read(else it will be shifted out) */
  275. write_TDBR(0xFFFF);
  276. dummy_read();
  277. while (drv_data->rx < drv_data->rx_end - 1) {
  278. while (!(read_STAT() & BIT_STAT_RXS))
  279. continue;
  280. *(u8 *) (drv_data->rx) = read_RDBR();
  281. ++drv_data->rx;
  282. }
  283. while (!(read_STAT() & BIT_STAT_RXS))
  284. continue;
  285. *(u8 *) (drv_data->rx) = read_SHAW();
  286. ++drv_data->rx;
  287. }
  288. static void u8_cs_chg_reader(struct driver_data *drv_data)
  289. {
  290. struct chip_data *chip = drv_data->cur_chip;
  291. while (drv_data->rx < drv_data->rx_end) {
  292. write_FLAG(chip->flag);
  293. SSYNC();
  294. read_RDBR(); /* kick off */
  295. while (!(read_STAT() & BIT_STAT_RXS))
  296. continue;
  297. while (!(read_STAT() & BIT_STAT_SPIF))
  298. continue;
  299. *(u8 *) (drv_data->rx) = read_SHAW();
  300. write_FLAG(0xFF00 | chip->flag);
  301. SSYNC();
  302. if (chip->cs_chg_udelay)
  303. udelay(chip->cs_chg_udelay);
  304. ++drv_data->rx;
  305. }
  306. write_FLAG(0xFF00);
  307. SSYNC();
  308. }
  309. static void u8_duplex(struct driver_data *drv_data)
  310. {
  311. /* in duplex mode, clk is triggered by writing of TDBR */
  312. while (drv_data->rx < drv_data->rx_end) {
  313. write_TDBR(*(u8 *) (drv_data->tx));
  314. while (!(read_STAT() & BIT_STAT_SPIF))
  315. continue;
  316. while (!(read_STAT() & BIT_STAT_RXS))
  317. continue;
  318. *(u8 *) (drv_data->rx) = read_RDBR();
  319. ++drv_data->rx;
  320. ++drv_data->tx;
  321. }
  322. }
  323. static void u8_cs_chg_duplex(struct driver_data *drv_data)
  324. {
  325. struct chip_data *chip = drv_data->cur_chip;
  326. while (drv_data->rx < drv_data->rx_end) {
  327. write_FLAG(chip->flag);
  328. SSYNC();
  329. write_TDBR(*(u8 *) (drv_data->tx));
  330. while (!(read_STAT() & BIT_STAT_SPIF))
  331. continue;
  332. while (!(read_STAT() & BIT_STAT_RXS))
  333. continue;
  334. *(u8 *) (drv_data->rx) = read_RDBR();
  335. write_FLAG(0xFF00 | chip->flag);
  336. SSYNC();
  337. if (chip->cs_chg_udelay)
  338. udelay(chip->cs_chg_udelay);
  339. ++drv_data->rx;
  340. ++drv_data->tx;
  341. }
  342. write_FLAG(0xFF00);
  343. SSYNC();
  344. }
  345. static void u16_writer(struct driver_data *drv_data)
  346. {
  347. dev_dbg(&drv_data->pdev->dev,
  348. "cr16 is 0x%x\n", read_STAT());
  349. while (drv_data->tx < drv_data->tx_end) {
  350. write_TDBR(*(u16 *) (drv_data->tx));
  351. while ((read_STAT() & BIT_STAT_TXS))
  352. continue;
  353. drv_data->tx += 2;
  354. }
  355. /* poll for SPI completion before returning */
  356. while (!(read_STAT() & BIT_STAT_SPIF))
  357. continue;
  358. }
  359. static void u16_cs_chg_writer(struct driver_data *drv_data)
  360. {
  361. struct chip_data *chip = drv_data->cur_chip;
  362. while (drv_data->tx < drv_data->tx_end) {
  363. write_FLAG(chip->flag);
  364. SSYNC();
  365. write_TDBR(*(u16 *) (drv_data->tx));
  366. while ((read_STAT() & BIT_STAT_TXS))
  367. continue;
  368. while (!(read_STAT() & BIT_STAT_SPIF))
  369. continue;
  370. write_FLAG(0xFF00 | chip->flag);
  371. SSYNC();
  372. if (chip->cs_chg_udelay)
  373. udelay(chip->cs_chg_udelay);
  374. drv_data->tx += 2;
  375. }
  376. write_FLAG(0xFF00);
  377. SSYNC();
  378. }
  379. static void u16_reader(struct driver_data *drv_data)
  380. {
  381. dev_dbg(&drv_data->pdev->dev,
  382. "cr-16 is 0x%x\n", read_STAT());
  383. dummy_read();
  384. while (drv_data->rx < (drv_data->rx_end - 2)) {
  385. while (!(read_STAT() & BIT_STAT_RXS))
  386. continue;
  387. *(u16 *) (drv_data->rx) = read_RDBR();
  388. drv_data->rx += 2;
  389. }
  390. while (!(read_STAT() & BIT_STAT_RXS))
  391. continue;
  392. *(u16 *) (drv_data->rx) = read_SHAW();
  393. drv_data->rx += 2;
  394. }
  395. static void u16_cs_chg_reader(struct driver_data *drv_data)
  396. {
  397. struct chip_data *chip = drv_data->cur_chip;
  398. while (drv_data->rx < drv_data->rx_end) {
  399. write_FLAG(chip->flag);
  400. SSYNC();
  401. read_RDBR(); /* kick off */
  402. while (!(read_STAT() & BIT_STAT_RXS))
  403. continue;
  404. while (!(read_STAT() & BIT_STAT_SPIF))
  405. continue;
  406. *(u16 *) (drv_data->rx) = read_SHAW();
  407. write_FLAG(0xFF00 | chip->flag);
  408. SSYNC();
  409. if (chip->cs_chg_udelay)
  410. udelay(chip->cs_chg_udelay);
  411. drv_data->rx += 2;
  412. }
  413. write_FLAG(0xFF00);
  414. SSYNC();
  415. }
  416. static void u16_duplex(struct driver_data *drv_data)
  417. {
  418. /* in duplex mode, clk is triggered by writing of TDBR */
  419. while (drv_data->tx < drv_data->tx_end) {
  420. write_TDBR(*(u16 *) (drv_data->tx));
  421. while (!(read_STAT() & BIT_STAT_SPIF))
  422. continue;
  423. while (!(read_STAT() & BIT_STAT_RXS))
  424. continue;
  425. *(u16 *) (drv_data->rx) = read_RDBR();
  426. drv_data->rx += 2;
  427. drv_data->tx += 2;
  428. }
  429. }
  430. static void u16_cs_chg_duplex(struct driver_data *drv_data)
  431. {
  432. struct chip_data *chip = drv_data->cur_chip;
  433. while (drv_data->tx < drv_data->tx_end) {
  434. write_FLAG(chip->flag);
  435. SSYNC();
  436. write_TDBR(*(u16 *) (drv_data->tx));
  437. while (!(read_STAT() & BIT_STAT_SPIF))
  438. continue;
  439. while (!(read_STAT() & BIT_STAT_RXS))
  440. continue;
  441. *(u16 *) (drv_data->rx) = read_RDBR();
  442. write_FLAG(0xFF00 | chip->flag);
  443. SSYNC();
  444. if (chip->cs_chg_udelay)
  445. udelay(chip->cs_chg_udelay);
  446. drv_data->rx += 2;
  447. drv_data->tx += 2;
  448. }
  449. write_FLAG(0xFF00);
  450. SSYNC();
  451. }
  452. /* test if ther is more transfer to be done */
  453. static void *next_transfer(struct driver_data *drv_data)
  454. {
  455. struct spi_message *msg = drv_data->cur_msg;
  456. struct spi_transfer *trans = drv_data->cur_transfer;
  457. /* Move to next transfer */
  458. if (trans->transfer_list.next != &msg->transfers) {
  459. drv_data->cur_transfer =
  460. list_entry(trans->transfer_list.next,
  461. struct spi_transfer, transfer_list);
  462. return RUNNING_STATE;
  463. } else
  464. return DONE_STATE;
  465. }
  466. /*
  467. * caller already set message->status;
  468. * dma and pio irqs are blocked give finished message back
  469. */
  470. static void giveback(struct driver_data *drv_data)
  471. {
  472. struct spi_transfer *last_transfer;
  473. unsigned long flags;
  474. struct spi_message *msg;
  475. spin_lock_irqsave(&drv_data->lock, flags);
  476. msg = drv_data->cur_msg;
  477. drv_data->cur_msg = NULL;
  478. drv_data->cur_transfer = NULL;
  479. drv_data->cur_chip = NULL;
  480. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  481. spin_unlock_irqrestore(&drv_data->lock, flags);
  482. last_transfer = list_entry(msg->transfers.prev,
  483. struct spi_transfer, transfer_list);
  484. msg->state = NULL;
  485. /* disable chip select signal. And not stop spi in autobuffer mode */
  486. if (drv_data->tx_dma != 0xFFFF) {
  487. write_FLAG(0xFF00);
  488. bfin_spi_disable(drv_data);
  489. }
  490. if (msg->complete)
  491. msg->complete(msg->context);
  492. }
  493. static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  494. {
  495. struct driver_data *drv_data = (struct driver_data *)dev_id;
  496. struct spi_message *msg = drv_data->cur_msg;
  497. dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n");
  498. clear_dma_irqstat(CH_SPI);
  499. /* Wait for DMA to complete */
  500. while (get_dma_curr_irqstat(CH_SPI) & DMA_RUN)
  501. continue;
  502. /*
  503. * wait for the last transaction shifted out. HRM states:
  504. * at this point there may still be data in the SPI DMA FIFO waiting
  505. * to be transmitted ... software needs to poll TXS in the SPI_STAT
  506. * register until it goes low for 2 successive reads
  507. */
  508. if (drv_data->tx != NULL) {
  509. while ((bfin_read_SPI_STAT() & TXS) ||
  510. (bfin_read_SPI_STAT() & TXS))
  511. continue;
  512. }
  513. while (!(bfin_read_SPI_STAT() & SPIF))
  514. continue;
  515. bfin_spi_disable(drv_data);
  516. msg->actual_length += drv_data->len_in_bytes;
  517. /* Move to next transfer */
  518. msg->state = next_transfer(drv_data);
  519. /* Schedule transfer tasklet */
  520. tasklet_schedule(&drv_data->pump_transfers);
  521. /* free the irq handler before next transfer */
  522. dev_dbg(&drv_data->pdev->dev,
  523. "disable dma channel irq%d\n",
  524. CH_SPI);
  525. dma_disable_irq(CH_SPI);
  526. return IRQ_HANDLED;
  527. }
  528. static void pump_transfers(unsigned long data)
  529. {
  530. struct driver_data *drv_data = (struct driver_data *)data;
  531. struct spi_message *message = NULL;
  532. struct spi_transfer *transfer = NULL;
  533. struct spi_transfer *previous = NULL;
  534. struct chip_data *chip = NULL;
  535. u8 width;
  536. u16 cr, dma_width, dma_config;
  537. u32 tranf_success = 1;
  538. /* Get current state information */
  539. message = drv_data->cur_msg;
  540. transfer = drv_data->cur_transfer;
  541. chip = drv_data->cur_chip;
  542. /*
  543. * if msg is error or done, report it back using complete() callback
  544. */
  545. /* Handle for abort */
  546. if (message->state == ERROR_STATE) {
  547. message->status = -EIO;
  548. giveback(drv_data);
  549. return;
  550. }
  551. /* Handle end of message */
  552. if (message->state == DONE_STATE) {
  553. message->status = 0;
  554. giveback(drv_data);
  555. return;
  556. }
  557. /* Delay if requested at end of transfer */
  558. if (message->state == RUNNING_STATE) {
  559. previous = list_entry(transfer->transfer_list.prev,
  560. struct spi_transfer, transfer_list);
  561. if (previous->delay_usecs)
  562. udelay(previous->delay_usecs);
  563. }
  564. /* Setup the transfer state based on the type of transfer */
  565. if (flush(drv_data) == 0) {
  566. dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
  567. message->status = -EIO;
  568. giveback(drv_data);
  569. return;
  570. }
  571. if (transfer->tx_buf != NULL) {
  572. drv_data->tx = (void *)transfer->tx_buf;
  573. drv_data->tx_end = drv_data->tx + transfer->len;
  574. dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n",
  575. transfer->tx_buf, drv_data->tx_end);
  576. } else {
  577. drv_data->tx = NULL;
  578. }
  579. if (transfer->rx_buf != NULL) {
  580. drv_data->rx = transfer->rx_buf;
  581. drv_data->rx_end = drv_data->rx + transfer->len;
  582. dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
  583. transfer->rx_buf, drv_data->rx_end);
  584. } else {
  585. drv_data->rx = NULL;
  586. }
  587. drv_data->rx_dma = transfer->rx_dma;
  588. drv_data->tx_dma = transfer->tx_dma;
  589. drv_data->len_in_bytes = transfer->len;
  590. width = chip->width;
  591. if (width == CFG_SPI_WORDSIZE16) {
  592. drv_data->len = (transfer->len) >> 1;
  593. } else {
  594. drv_data->len = transfer->len;
  595. }
  596. drv_data->write = drv_data->tx ? chip->write : null_writer;
  597. drv_data->read = drv_data->rx ? chip->read : null_reader;
  598. drv_data->duplex = chip->duplex ? chip->duplex : null_writer;
  599. dev_dbg(&drv_data->pdev->dev,
  600. "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n",
  601. drv_data->write, chip->write, null_writer);
  602. /* speed and width has been set on per message */
  603. message->state = RUNNING_STATE;
  604. dma_config = 0;
  605. /* restore spi status for each spi transfer */
  606. if (transfer->speed_hz) {
  607. write_BAUD(hz_to_spi_baud(transfer->speed_hz));
  608. } else {
  609. write_BAUD(chip->baud);
  610. }
  611. write_FLAG(chip->flag);
  612. dev_dbg(&drv_data->pdev->dev,
  613. "now pumping a transfer: width is %d, len is %d\n",
  614. width, transfer->len);
  615. /*
  616. * Try to map dma buffer and do a dma transfer if
  617. * successful use different way to r/w according to
  618. * drv_data->cur_chip->enable_dma
  619. */
  620. if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
  621. write_STAT(BIT_STAT_CLR);
  622. disable_dma(CH_SPI);
  623. clear_dma_irqstat(CH_SPI);
  624. bfin_spi_disable(drv_data);
  625. /* config dma channel */
  626. dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
  627. if (width == CFG_SPI_WORDSIZE16) {
  628. set_dma_x_count(CH_SPI, drv_data->len);
  629. set_dma_x_modify(CH_SPI, 2);
  630. dma_width = WDSIZE_16;
  631. } else {
  632. set_dma_x_count(CH_SPI, drv_data->len);
  633. set_dma_x_modify(CH_SPI, 1);
  634. dma_width = WDSIZE_8;
  635. }
  636. /* set transfer width,direction. And enable spi */
  637. cr = (read_CTRL() & (~BIT_CTL_TIMOD));
  638. /* dirty hack for autobuffer DMA mode */
  639. if (drv_data->tx_dma == 0xFFFF) {
  640. dev_dbg(&drv_data->pdev->dev,
  641. "doing autobuffer DMA out.\n");
  642. /* no irq in autobuffer mode */
  643. dma_config =
  644. (DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
  645. set_dma_config(CH_SPI, dma_config);
  646. set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx);
  647. enable_dma(CH_SPI);
  648. write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) |
  649. (CFG_SPI_ENABLE << 14));
  650. /* just return here, there can only be one transfer in this mode */
  651. message->status = 0;
  652. giveback(drv_data);
  653. return;
  654. }
  655. /* In dma mode, rx or tx must be NULL in one transfer */
  656. if (drv_data->rx != NULL) {
  657. /* set transfer mode, and enable SPI */
  658. dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n");
  659. /* disable SPI before write to TDBR */
  660. write_CTRL(cr & ~BIT_CTL_ENABLE);
  661. /* clear tx reg soformer data is not shifted out */
  662. write_TDBR(0xFF);
  663. set_dma_x_count(CH_SPI, drv_data->len);
  664. /* start dma */
  665. dma_enable_irq(CH_SPI);
  666. dma_config = (WNR | RESTART | dma_width | DI_EN);
  667. set_dma_config(CH_SPI, dma_config);
  668. set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx);
  669. enable_dma(CH_SPI);
  670. cr |=
  671. CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE <<
  672. 14);
  673. /* set transfer mode, and enable SPI */
  674. write_CTRL(cr);
  675. } else if (drv_data->tx != NULL) {
  676. dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
  677. /* start dma */
  678. dma_enable_irq(CH_SPI);
  679. dma_config = (RESTART | dma_width | DI_EN);
  680. set_dma_config(CH_SPI, dma_config);
  681. set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx);
  682. enable_dma(CH_SPI);
  683. write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) |
  684. (CFG_SPI_ENABLE << 14));
  685. }
  686. } else {
  687. /* IO mode write then read */
  688. dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
  689. write_STAT(BIT_STAT_CLR);
  690. if (drv_data->tx != NULL && drv_data->rx != NULL) {
  691. /* full duplex mode */
  692. BUG_ON((drv_data->tx_end - drv_data->tx) !=
  693. (drv_data->rx_end - drv_data->rx));
  694. cr = (read_CTRL() & (~BIT_CTL_TIMOD));
  695. cr |= CFG_SPI_WRITE | (width << 8) |
  696. (CFG_SPI_ENABLE << 14);
  697. dev_dbg(&drv_data->pdev->dev,
  698. "IO duplex: cr is 0x%x\n", cr);
  699. write_CTRL(cr);
  700. SSYNC();
  701. drv_data->duplex(drv_data);
  702. if (drv_data->tx != drv_data->tx_end)
  703. tranf_success = 0;
  704. } else if (drv_data->tx != NULL) {
  705. /* write only half duplex */
  706. cr = (read_CTRL() & (~BIT_CTL_TIMOD));
  707. cr |= CFG_SPI_WRITE | (width << 8) |
  708. (CFG_SPI_ENABLE << 14);
  709. dev_dbg(&drv_data->pdev->dev,
  710. "IO write: cr is 0x%x\n", cr);
  711. write_CTRL(cr);
  712. SSYNC();
  713. drv_data->write(drv_data);
  714. if (drv_data->tx != drv_data->tx_end)
  715. tranf_success = 0;
  716. } else if (drv_data->rx != NULL) {
  717. /* read only half duplex */
  718. cr = (read_CTRL() & (~BIT_CTL_TIMOD));
  719. cr |= CFG_SPI_READ | (width << 8) |
  720. (CFG_SPI_ENABLE << 14);
  721. dev_dbg(&drv_data->pdev->dev,
  722. "IO read: cr is 0x%x\n", cr);
  723. write_CTRL(cr);
  724. SSYNC();
  725. drv_data->read(drv_data);
  726. if (drv_data->rx != drv_data->rx_end)
  727. tranf_success = 0;
  728. }
  729. if (!tranf_success) {
  730. dev_dbg(&drv_data->pdev->dev,
  731. "IO write error!\n");
  732. message->state = ERROR_STATE;
  733. } else {
  734. /* Update total byte transfered */
  735. message->actual_length += drv_data->len;
  736. /* Move to next transfer of this msg */
  737. message->state = next_transfer(drv_data);
  738. }
  739. /* Schedule next transfer tasklet */
  740. tasklet_schedule(&drv_data->pump_transfers);
  741. }
  742. }
  743. /* pop a msg from queue and kick off real transfer */
  744. static void pump_messages(struct work_struct *work)
  745. {
  746. struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages);
  747. unsigned long flags;
  748. /* Lock queue and check for queue work */
  749. spin_lock_irqsave(&drv_data->lock, flags);
  750. if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
  751. /* pumper kicked off but no work to do */
  752. drv_data->busy = 0;
  753. spin_unlock_irqrestore(&drv_data->lock, flags);
  754. return;
  755. }
  756. /* Make sure we are not already running a message */
  757. if (drv_data->cur_msg) {
  758. spin_unlock_irqrestore(&drv_data->lock, flags);
  759. return;
  760. }
  761. /* Extract head of queue */
  762. drv_data->cur_msg = list_entry(drv_data->queue.next,
  763. struct spi_message, queue);
  764. list_del_init(&drv_data->cur_msg->queue);
  765. /* Initial message state */
  766. drv_data->cur_msg->state = START_STATE;
  767. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  768. struct spi_transfer, transfer_list);
  769. /* Setup the SSP using the per chip configuration */
  770. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  771. restore_state(drv_data);
  772. dev_dbg(&drv_data->pdev->dev,
  773. "got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
  774. drv_data->cur_chip->baud, drv_data->cur_chip->flag,
  775. drv_data->cur_chip->ctl_reg);
  776. dev_dbg(&drv_data->pdev->dev,
  777. "the first transfer len is %d\n",
  778. drv_data->cur_transfer->len);
  779. /* Mark as busy and launch transfers */
  780. tasklet_schedule(&drv_data->pump_transfers);
  781. drv_data->busy = 1;
  782. spin_unlock_irqrestore(&drv_data->lock, flags);
  783. }
  784. /*
  785. * got a msg to transfer, queue it in drv_data->queue.
  786. * And kick off message pumper
  787. */
  788. static int transfer(struct spi_device *spi, struct spi_message *msg)
  789. {
  790. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  791. unsigned long flags;
  792. spin_lock_irqsave(&drv_data->lock, flags);
  793. if (drv_data->run == QUEUE_STOPPED) {
  794. spin_unlock_irqrestore(&drv_data->lock, flags);
  795. return -ESHUTDOWN;
  796. }
  797. msg->actual_length = 0;
  798. msg->status = -EINPROGRESS;
  799. msg->state = START_STATE;
  800. dev_dbg(&spi->dev, "adding an msg in transfer() \n");
  801. list_add_tail(&msg->queue, &drv_data->queue);
  802. if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
  803. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  804. spin_unlock_irqrestore(&drv_data->lock, flags);
  805. return 0;
  806. }
  807. /* first setup for new devices */
  808. static int setup(struct spi_device *spi)
  809. {
  810. struct bfin5xx_spi_chip *chip_info = NULL;
  811. struct chip_data *chip;
  812. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  813. u8 spi_flg;
  814. /* Abort device setup if requested features are not supported */
  815. if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
  816. dev_err(&spi->dev, "requested mode not fully supported\n");
  817. return -EINVAL;
  818. }
  819. /* Zero (the default) here means 8 bits */
  820. if (!spi->bits_per_word)
  821. spi->bits_per_word = 8;
  822. if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
  823. return -EINVAL;
  824. /* Only alloc (or use chip_info) on first setup */
  825. chip = spi_get_ctldata(spi);
  826. if (chip == NULL) {
  827. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  828. if (!chip)
  829. return -ENOMEM;
  830. chip->enable_dma = 0;
  831. chip_info = spi->controller_data;
  832. }
  833. /* chip_info isn't always needed */
  834. if (chip_info) {
  835. chip->enable_dma = chip_info->enable_dma != 0
  836. && drv_data->master_info->enable_dma;
  837. chip->ctl_reg = chip_info->ctl_reg;
  838. chip->bits_per_word = chip_info->bits_per_word;
  839. chip->cs_change_per_word = chip_info->cs_change_per_word;
  840. chip->cs_chg_udelay = chip_info->cs_chg_udelay;
  841. }
  842. /* translate common spi framework into our register */
  843. if (spi->mode & SPI_CPOL)
  844. chip->ctl_reg |= CPOL;
  845. if (spi->mode & SPI_CPHA)
  846. chip->ctl_reg |= CPHA;
  847. if (spi->mode & SPI_LSB_FIRST)
  848. chip->ctl_reg |= LSBF;
  849. /* we dont support running in slave mode (yet?) */
  850. chip->ctl_reg |= MSTR;
  851. /*
  852. * if any one SPI chip is registered and wants DMA, request the
  853. * DMA channel for it
  854. */
  855. if (chip->enable_dma && !dma_requested) {
  856. /* register dma irq handler */
  857. if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) {
  858. dev_dbg(&spi->dev,
  859. "Unable to request BlackFin SPI DMA channel\n");
  860. return -ENODEV;
  861. }
  862. if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data)
  863. < 0) {
  864. dev_dbg(&spi->dev, "Unable to set dma callback\n");
  865. return -EPERM;
  866. }
  867. dma_disable_irq(CH_SPI);
  868. dma_requested = 1;
  869. }
  870. /*
  871. * Notice: for blackfin, the speed_hz is the value of register
  872. * SPI_BAUD, not the real baudrate
  873. */
  874. chip->baud = hz_to_spi_baud(spi->max_speed_hz);
  875. spi_flg = ~(1 << (spi->chip_select));
  876. chip->flag = ((u16) spi_flg << 8) | (1 << (spi->chip_select));
  877. chip->chip_select_num = spi->chip_select;
  878. switch (chip->bits_per_word) {
  879. case 8:
  880. chip->n_bytes = 1;
  881. chip->width = CFG_SPI_WORDSIZE8;
  882. chip->read = chip->cs_change_per_word ?
  883. u8_cs_chg_reader : u8_reader;
  884. chip->write = chip->cs_change_per_word ?
  885. u8_cs_chg_writer : u8_writer;
  886. chip->duplex = chip->cs_change_per_word ?
  887. u8_cs_chg_duplex : u8_duplex;
  888. break;
  889. case 16:
  890. chip->n_bytes = 2;
  891. chip->width = CFG_SPI_WORDSIZE16;
  892. chip->read = chip->cs_change_per_word ?
  893. u16_cs_chg_reader : u16_reader;
  894. chip->write = chip->cs_change_per_word ?
  895. u16_cs_chg_writer : u16_writer;
  896. chip->duplex = chip->cs_change_per_word ?
  897. u16_cs_chg_duplex : u16_duplex;
  898. break;
  899. default:
  900. dev_err(&spi->dev, "%d bits_per_word is not supported\n",
  901. chip->bits_per_word);
  902. kfree(chip);
  903. return -ENODEV;
  904. }
  905. dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
  906. spi->modalias, chip->width, chip->enable_dma);
  907. dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
  908. chip->ctl_reg, chip->flag);
  909. spi_set_ctldata(spi, chip);
  910. return 0;
  911. }
  912. /*
  913. * callback for spi framework.
  914. * clean driver specific data
  915. */
  916. static void cleanup(struct spi_device *spi)
  917. {
  918. struct chip_data *chip = spi_get_ctldata(spi);
  919. kfree(chip);
  920. }
  921. static inline int init_queue(struct driver_data *drv_data)
  922. {
  923. INIT_LIST_HEAD(&drv_data->queue);
  924. spin_lock_init(&drv_data->lock);
  925. drv_data->run = QUEUE_STOPPED;
  926. drv_data->busy = 0;
  927. /* init transfer tasklet */
  928. tasklet_init(&drv_data->pump_transfers,
  929. pump_transfers, (unsigned long)drv_data);
  930. /* init messages workqueue */
  931. INIT_WORK(&drv_data->pump_messages, pump_messages);
  932. drv_data->workqueue =
  933. create_singlethread_workqueue(drv_data->master->dev.parent->bus_id);
  934. if (drv_data->workqueue == NULL)
  935. return -EBUSY;
  936. return 0;
  937. }
  938. static inline int start_queue(struct driver_data *drv_data)
  939. {
  940. unsigned long flags;
  941. spin_lock_irqsave(&drv_data->lock, flags);
  942. if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
  943. spin_unlock_irqrestore(&drv_data->lock, flags);
  944. return -EBUSY;
  945. }
  946. drv_data->run = QUEUE_RUNNING;
  947. drv_data->cur_msg = NULL;
  948. drv_data->cur_transfer = NULL;
  949. drv_data->cur_chip = NULL;
  950. spin_unlock_irqrestore(&drv_data->lock, flags);
  951. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  952. return 0;
  953. }
  954. static inline int stop_queue(struct driver_data *drv_data)
  955. {
  956. unsigned long flags;
  957. unsigned limit = 500;
  958. int status = 0;
  959. spin_lock_irqsave(&drv_data->lock, flags);
  960. /*
  961. * This is a bit lame, but is optimized for the common execution path.
  962. * A wait_queue on the drv_data->busy could be used, but then the common
  963. * execution path (pump_messages) would be required to call wake_up or
  964. * friends on every SPI message. Do this instead
  965. */
  966. drv_data->run = QUEUE_STOPPED;
  967. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  968. spin_unlock_irqrestore(&drv_data->lock, flags);
  969. msleep(10);
  970. spin_lock_irqsave(&drv_data->lock, flags);
  971. }
  972. if (!list_empty(&drv_data->queue) || drv_data->busy)
  973. status = -EBUSY;
  974. spin_unlock_irqrestore(&drv_data->lock, flags);
  975. return status;
  976. }
  977. static inline int destroy_queue(struct driver_data *drv_data)
  978. {
  979. int status;
  980. status = stop_queue(drv_data);
  981. if (status != 0)
  982. return status;
  983. destroy_workqueue(drv_data->workqueue);
  984. return 0;
  985. }
  986. static int __init bfin5xx_spi_probe(struct platform_device *pdev)
  987. {
  988. struct device *dev = &pdev->dev;
  989. struct bfin5xx_spi_master *platform_info;
  990. struct spi_master *master;
  991. struct driver_data *drv_data = 0;
  992. int status = 0;
  993. platform_info = dev->platform_data;
  994. /* Allocate master with space for drv_data */
  995. master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
  996. if (!master) {
  997. dev_err(&pdev->dev, "can not alloc spi_master\n");
  998. return -ENOMEM;
  999. }
  1000. drv_data = spi_master_get_devdata(master);
  1001. drv_data->master = master;
  1002. drv_data->master_info = platform_info;
  1003. drv_data->pdev = pdev;
  1004. master->bus_num = pdev->id;
  1005. master->num_chipselect = platform_info->num_chipselect;
  1006. master->cleanup = cleanup;
  1007. master->setup = setup;
  1008. master->transfer = transfer;
  1009. /* Initial and start queue */
  1010. status = init_queue(drv_data);
  1011. if (status != 0) {
  1012. dev_err(&pdev->dev, "problem initializing queue\n");
  1013. goto out_error_queue_alloc;
  1014. }
  1015. status = start_queue(drv_data);
  1016. if (status != 0) {
  1017. dev_err(&pdev->dev, "problem starting queue\n");
  1018. goto out_error_queue_alloc;
  1019. }
  1020. /* Register with the SPI framework */
  1021. platform_set_drvdata(pdev, drv_data);
  1022. status = spi_register_master(master);
  1023. if (status != 0) {
  1024. dev_err(&pdev->dev, "problem registering spi master\n");
  1025. goto out_error_queue_alloc;
  1026. }
  1027. dev_dbg(&pdev->dev, "controller probe successfully\n");
  1028. return status;
  1029. out_error_queue_alloc:
  1030. destroy_queue(drv_data);
  1031. spi_master_put(master);
  1032. return status;
  1033. }
  1034. /* stop hardware and remove the driver */
  1035. static int __devexit bfin5xx_spi_remove(struct platform_device *pdev)
  1036. {
  1037. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1038. int status = 0;
  1039. if (!drv_data)
  1040. return 0;
  1041. /* Remove the queue */
  1042. status = destroy_queue(drv_data);
  1043. if (status != 0)
  1044. return status;
  1045. /* Disable the SSP at the peripheral and SOC level */
  1046. bfin_spi_disable(drv_data);
  1047. /* Release DMA */
  1048. if (drv_data->master_info->enable_dma) {
  1049. if (dma_channel_active(CH_SPI))
  1050. free_dma(CH_SPI);
  1051. }
  1052. /* Disconnect from the SPI framework */
  1053. spi_unregister_master(drv_data->master);
  1054. /* Prevent double remove */
  1055. platform_set_drvdata(pdev, NULL);
  1056. return 0;
  1057. }
  1058. #ifdef CONFIG_PM
  1059. static int bfin5xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
  1060. {
  1061. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1062. int status = 0;
  1063. status = stop_queue(drv_data);
  1064. if (status != 0)
  1065. return status;
  1066. /* stop hardware */
  1067. bfin_spi_disable(drv_data);
  1068. return 0;
  1069. }
  1070. static int bfin5xx_spi_resume(struct platform_device *pdev)
  1071. {
  1072. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1073. int status = 0;
  1074. /* Enable the SPI interface */
  1075. bfin_spi_enable(drv_data);
  1076. /* Start the queue running */
  1077. status = start_queue(drv_data);
  1078. if (status != 0) {
  1079. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1080. return status;
  1081. }
  1082. return 0;
  1083. }
  1084. #else
  1085. #define bfin5xx_spi_suspend NULL
  1086. #define bfin5xx_spi_resume NULL
  1087. #endif /* CONFIG_PM */
  1088. MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */
  1089. static struct platform_driver bfin5xx_spi_driver = {
  1090. .driver = {
  1091. .name = "bfin-spi-master",
  1092. .owner = THIS_MODULE,
  1093. },
  1094. .suspend = bfin5xx_spi_suspend,
  1095. .resume = bfin5xx_spi_resume,
  1096. .remove = __devexit_p(bfin5xx_spi_remove),
  1097. };
  1098. static int __init bfin5xx_spi_init(void)
  1099. {
  1100. return platform_driver_probe(&bfin5xx_spi_driver, bfin5xx_spi_probe);
  1101. }
  1102. module_init(bfin5xx_spi_init);
  1103. static void __exit bfin5xx_spi_exit(void)
  1104. {
  1105. platform_driver_unregister(&bfin5xx_spi_driver);
  1106. }
  1107. module_exit(bfin5xx_spi_exit);