spi_bfin5xx.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475
  1. /*
  2. * Blackfin On-Chip SPI Driver
  3. *
  4. * Copyright 2004-2007 Analog Devices Inc.
  5. *
  6. * Enter bugs at http://blackfin.uclinux.org/
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/slab.h>
  15. #include <linux/io.h>
  16. #include <linux/ioport.h>
  17. #include <linux/irq.h>
  18. #include <linux/errno.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/spi/spi.h>
  23. #include <linux/workqueue.h>
  24. #include <asm/dma.h>
  25. #include <asm/portmux.h>
  26. #include <asm/bfin5xx_spi.h>
  27. #include <asm/cacheflush.h>
  28. #define DRV_NAME "bfin-spi"
  29. #define DRV_AUTHOR "Bryan Wu, Luke Yang"
  30. #define DRV_DESC "Blackfin on-chip SPI Controller Driver"
  31. #define DRV_VERSION "1.0"
  32. MODULE_AUTHOR(DRV_AUTHOR);
  33. MODULE_DESCRIPTION(DRV_DESC);
  34. MODULE_LICENSE("GPL");
  35. #define START_STATE ((void *)0)
  36. #define RUNNING_STATE ((void *)1)
  37. #define DONE_STATE ((void *)2)
  38. #define ERROR_STATE ((void *)-1)
  39. struct driver_data;
  40. struct transfer_ops {
  41. void (*write) (struct driver_data *);
  42. void (*read) (struct driver_data *);
  43. void (*duplex) (struct driver_data *);
  44. };
  45. struct driver_data {
  46. /* Driver model hookup */
  47. struct platform_device *pdev;
  48. /* SPI framework hookup */
  49. struct spi_master *master;
  50. /* Regs base of SPI controller */
  51. void __iomem *regs_base;
  52. /* Pin request list */
  53. u16 *pin_req;
  54. /* BFIN hookup */
  55. struct bfin5xx_spi_master *master_info;
  56. /* Driver message queue */
  57. struct workqueue_struct *workqueue;
  58. struct work_struct pump_messages;
  59. spinlock_t lock;
  60. struct list_head queue;
  61. int busy;
  62. bool running;
  63. /* Message Transfer pump */
  64. struct tasklet_struct pump_transfers;
  65. /* Current message transfer state info */
  66. struct spi_message *cur_msg;
  67. struct spi_transfer *cur_transfer;
  68. struct chip_data *cur_chip;
  69. size_t len_in_bytes;
  70. size_t len;
  71. void *tx;
  72. void *tx_end;
  73. void *rx;
  74. void *rx_end;
  75. /* DMA stuffs */
  76. int dma_channel;
  77. int dma_mapped;
  78. int dma_requested;
  79. dma_addr_t rx_dma;
  80. dma_addr_t tx_dma;
  81. int irq_requested;
  82. int spi_irq;
  83. size_t rx_map_len;
  84. size_t tx_map_len;
  85. u8 n_bytes;
  86. int cs_change;
  87. const struct transfer_ops *ops;
  88. };
  89. struct chip_data {
  90. u16 ctl_reg;
  91. u16 baud;
  92. u16 flag;
  93. u8 chip_select_num;
  94. u8 n_bytes;
  95. u8 width; /* 0 or 1 */
  96. u8 enable_dma;
  97. u8 bits_per_word; /* 8 or 16 */
  98. u16 cs_chg_udelay; /* Some devices require > 255usec delay */
  99. u32 cs_gpio;
  100. u16 idle_tx_val;
  101. u8 pio_interrupt; /* use spi data irq */
  102. const struct transfer_ops *ops;
  103. };
  104. #define DEFINE_SPI_REG(reg, off) \
  105. static inline u16 read_##reg(struct driver_data *drv_data) \
  106. { return bfin_read16(drv_data->regs_base + off); } \
  107. static inline void write_##reg(struct driver_data *drv_data, u16 v) \
  108. { bfin_write16(drv_data->regs_base + off, v); }
  109. DEFINE_SPI_REG(CTRL, 0x00)
  110. DEFINE_SPI_REG(FLAG, 0x04)
  111. DEFINE_SPI_REG(STAT, 0x08)
  112. DEFINE_SPI_REG(TDBR, 0x0C)
  113. DEFINE_SPI_REG(RDBR, 0x10)
  114. DEFINE_SPI_REG(BAUD, 0x14)
  115. DEFINE_SPI_REG(SHAW, 0x18)
  116. static void bfin_spi_enable(struct driver_data *drv_data)
  117. {
  118. u16 cr;
  119. cr = read_CTRL(drv_data);
  120. write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
  121. }
  122. static void bfin_spi_disable(struct driver_data *drv_data)
  123. {
  124. u16 cr;
  125. cr = read_CTRL(drv_data);
  126. write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE)));
  127. }
  128. /* Caculate the SPI_BAUD register value based on input HZ */
  129. static u16 hz_to_spi_baud(u32 speed_hz)
  130. {
  131. u_long sclk = get_sclk();
  132. u16 spi_baud = (sclk / (2 * speed_hz));
  133. if ((sclk % (2 * speed_hz)) > 0)
  134. spi_baud++;
  135. if (spi_baud < MIN_SPI_BAUD_VAL)
  136. spi_baud = MIN_SPI_BAUD_VAL;
  137. return spi_baud;
  138. }
  139. static int bfin_spi_flush(struct driver_data *drv_data)
  140. {
  141. unsigned long limit = loops_per_jiffy << 1;
  142. /* wait for stop and clear stat */
  143. while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit)
  144. cpu_relax();
  145. write_STAT(drv_data, BIT_STAT_CLR);
  146. return limit;
  147. }
  148. /* Chip select operation functions for cs_change flag */
  149. static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip)
  150. {
  151. if (likely(chip->chip_select_num)) {
  152. u16 flag = read_FLAG(drv_data);
  153. flag &= ~chip->flag;
  154. write_FLAG(drv_data, flag);
  155. } else {
  156. gpio_set_value(chip->cs_gpio, 0);
  157. }
  158. }
  159. static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip)
  160. {
  161. if (likely(chip->chip_select_num)) {
  162. u16 flag = read_FLAG(drv_data);
  163. flag |= chip->flag;
  164. write_FLAG(drv_data, flag);
  165. } else {
  166. gpio_set_value(chip->cs_gpio, 1);
  167. }
  168. /* Move delay here for consistency */
  169. if (chip->cs_chg_udelay)
  170. udelay(chip->cs_chg_udelay);
  171. }
  172. /* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
  173. static inline void bfin_spi_cs_enable(struct driver_data *drv_data, struct chip_data *chip)
  174. {
  175. u16 flag = read_FLAG(drv_data);
  176. flag |= (chip->flag >> 8);
  177. write_FLAG(drv_data, flag);
  178. }
  179. static inline void bfin_spi_cs_disable(struct driver_data *drv_data, struct chip_data *chip)
  180. {
  181. u16 flag = read_FLAG(drv_data);
  182. flag &= ~(chip->flag >> 8);
  183. write_FLAG(drv_data, flag);
  184. }
  185. /* stop controller and re-config current chip*/
  186. static void bfin_spi_restore_state(struct driver_data *drv_data)
  187. {
  188. struct chip_data *chip = drv_data->cur_chip;
  189. /* Clear status and disable clock */
  190. write_STAT(drv_data, BIT_STAT_CLR);
  191. bfin_spi_disable(drv_data);
  192. dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
  193. /* Load the registers */
  194. write_CTRL(drv_data, chip->ctl_reg);
  195. write_BAUD(drv_data, chip->baud);
  196. bfin_spi_enable(drv_data);
  197. bfin_spi_cs_active(drv_data, chip);
  198. }
  199. /* used to kick off transfer in rx mode and read unwanted RX data */
  200. static inline void bfin_spi_dummy_read(struct driver_data *drv_data)
  201. {
  202. (void) read_RDBR(drv_data);
  203. }
  204. static void bfin_spi_u8_writer(struct driver_data *drv_data)
  205. {
  206. /* clear RXS (we check for RXS inside the loop) */
  207. bfin_spi_dummy_read(drv_data);
  208. while (drv_data->tx < drv_data->tx_end) {
  209. write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
  210. /* wait until transfer finished.
  211. checking SPIF or TXS may not guarantee transfer completion */
  212. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  213. cpu_relax();
  214. /* discard RX data and clear RXS */
  215. bfin_spi_dummy_read(drv_data);
  216. }
  217. }
  218. static void bfin_spi_u8_reader(struct driver_data *drv_data)
  219. {
  220. u16 tx_val = drv_data->cur_chip->idle_tx_val;
  221. /* discard old RX data and clear RXS */
  222. bfin_spi_dummy_read(drv_data);
  223. while (drv_data->rx < drv_data->rx_end) {
  224. write_TDBR(drv_data, tx_val);
  225. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  226. cpu_relax();
  227. *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
  228. }
  229. }
  230. static void bfin_spi_u8_duplex(struct driver_data *drv_data)
  231. {
  232. /* discard old RX data and clear RXS */
  233. bfin_spi_dummy_read(drv_data);
  234. while (drv_data->rx < drv_data->rx_end) {
  235. write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
  236. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  237. cpu_relax();
  238. *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
  239. }
  240. }
  241. static const struct transfer_ops bfin_transfer_ops_u8 = {
  242. .write = bfin_spi_u8_writer,
  243. .read = bfin_spi_u8_reader,
  244. .duplex = bfin_spi_u8_duplex,
  245. };
  246. static void bfin_spi_u16_writer(struct driver_data *drv_data)
  247. {
  248. /* clear RXS (we check for RXS inside the loop) */
  249. bfin_spi_dummy_read(drv_data);
  250. while (drv_data->tx < drv_data->tx_end) {
  251. write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
  252. drv_data->tx += 2;
  253. /* wait until transfer finished.
  254. checking SPIF or TXS may not guarantee transfer completion */
  255. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  256. cpu_relax();
  257. /* discard RX data and clear RXS */
  258. bfin_spi_dummy_read(drv_data);
  259. }
  260. }
  261. static void bfin_spi_u16_reader(struct driver_data *drv_data)
  262. {
  263. u16 tx_val = drv_data->cur_chip->idle_tx_val;
  264. /* discard old RX data and clear RXS */
  265. bfin_spi_dummy_read(drv_data);
  266. while (drv_data->rx < drv_data->rx_end) {
  267. write_TDBR(drv_data, tx_val);
  268. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  269. cpu_relax();
  270. *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
  271. drv_data->rx += 2;
  272. }
  273. }
  274. static void bfin_spi_u16_duplex(struct driver_data *drv_data)
  275. {
  276. /* discard old RX data and clear RXS */
  277. bfin_spi_dummy_read(drv_data);
  278. while (drv_data->rx < drv_data->rx_end) {
  279. write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
  280. drv_data->tx += 2;
  281. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  282. cpu_relax();
  283. *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
  284. drv_data->rx += 2;
  285. }
  286. }
  287. static const struct transfer_ops bfin_transfer_ops_u16 = {
  288. .write = bfin_spi_u16_writer,
  289. .read = bfin_spi_u16_reader,
  290. .duplex = bfin_spi_u16_duplex,
  291. };
  292. /* test if ther is more transfer to be done */
  293. static void *bfin_spi_next_transfer(struct driver_data *drv_data)
  294. {
  295. struct spi_message *msg = drv_data->cur_msg;
  296. struct spi_transfer *trans = drv_data->cur_transfer;
  297. /* Move to next transfer */
  298. if (trans->transfer_list.next != &msg->transfers) {
  299. drv_data->cur_transfer =
  300. list_entry(trans->transfer_list.next,
  301. struct spi_transfer, transfer_list);
  302. return RUNNING_STATE;
  303. } else
  304. return DONE_STATE;
  305. }
  306. /*
  307. * caller already set message->status;
  308. * dma and pio irqs are blocked give finished message back
  309. */
  310. static void bfin_spi_giveback(struct driver_data *drv_data)
  311. {
  312. struct chip_data *chip = drv_data->cur_chip;
  313. struct spi_transfer *last_transfer;
  314. unsigned long flags;
  315. struct spi_message *msg;
  316. spin_lock_irqsave(&drv_data->lock, flags);
  317. msg = drv_data->cur_msg;
  318. drv_data->cur_msg = NULL;
  319. drv_data->cur_transfer = NULL;
  320. drv_data->cur_chip = NULL;
  321. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  322. spin_unlock_irqrestore(&drv_data->lock, flags);
  323. last_transfer = list_entry(msg->transfers.prev,
  324. struct spi_transfer, transfer_list);
  325. msg->state = NULL;
  326. if (!drv_data->cs_change)
  327. bfin_spi_cs_deactive(drv_data, chip);
  328. /* Not stop spi in autobuffer mode */
  329. if (drv_data->tx_dma != 0xFFFF)
  330. bfin_spi_disable(drv_data);
  331. if (msg->complete)
  332. msg->complete(msg->context);
  333. }
  334. /* spi data irq handler */
  335. static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
  336. {
  337. struct driver_data *drv_data = dev_id;
  338. struct chip_data *chip = drv_data->cur_chip;
  339. struct spi_message *msg = drv_data->cur_msg;
  340. int n_bytes = drv_data->n_bytes;
  341. /* wait until transfer finished. */
  342. while (!(read_STAT(drv_data) & BIT_STAT_RXS))
  343. cpu_relax();
  344. if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
  345. (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) {
  346. /* last read */
  347. if (drv_data->rx) {
  348. dev_dbg(&drv_data->pdev->dev, "last read\n");
  349. if (n_bytes == 2)
  350. *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
  351. else if (n_bytes == 1)
  352. *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
  353. drv_data->rx += n_bytes;
  354. }
  355. msg->actual_length += drv_data->len_in_bytes;
  356. if (drv_data->cs_change)
  357. bfin_spi_cs_deactive(drv_data, chip);
  358. /* Move to next transfer */
  359. msg->state = bfin_spi_next_transfer(drv_data);
  360. disable_irq(drv_data->spi_irq);
  361. /* Schedule transfer tasklet */
  362. tasklet_schedule(&drv_data->pump_transfers);
  363. return IRQ_HANDLED;
  364. }
  365. if (drv_data->rx && drv_data->tx) {
  366. /* duplex */
  367. dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
  368. if (drv_data->n_bytes == 2) {
  369. *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
  370. write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
  371. } else if (drv_data->n_bytes == 1) {
  372. *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
  373. write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
  374. }
  375. } else if (drv_data->rx) {
  376. /* read */
  377. dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
  378. if (drv_data->n_bytes == 2)
  379. *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
  380. else if (drv_data->n_bytes == 1)
  381. *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
  382. write_TDBR(drv_data, chip->idle_tx_val);
  383. } else if (drv_data->tx) {
  384. /* write */
  385. dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
  386. bfin_spi_dummy_read(drv_data);
  387. if (drv_data->n_bytes == 2)
  388. write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
  389. else if (drv_data->n_bytes == 1)
  390. write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
  391. }
  392. if (drv_data->tx)
  393. drv_data->tx += n_bytes;
  394. if (drv_data->rx)
  395. drv_data->rx += n_bytes;
  396. return IRQ_HANDLED;
  397. }
  398. static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
  399. {
  400. struct driver_data *drv_data = dev_id;
  401. struct chip_data *chip = drv_data->cur_chip;
  402. struct spi_message *msg = drv_data->cur_msg;
  403. unsigned long timeout;
  404. unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
  405. u16 spistat = read_STAT(drv_data);
  406. dev_dbg(&drv_data->pdev->dev,
  407. "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
  408. dmastat, spistat);
  409. clear_dma_irqstat(drv_data->dma_channel);
  410. /*
  411. * wait for the last transaction shifted out. HRM states:
  412. * at this point there may still be data in the SPI DMA FIFO waiting
  413. * to be transmitted ... software needs to poll TXS in the SPI_STAT
  414. * register until it goes low for 2 successive reads
  415. */
  416. if (drv_data->tx != NULL) {
  417. while ((read_STAT(drv_data) & TXS) ||
  418. (read_STAT(drv_data) & TXS))
  419. cpu_relax();
  420. }
  421. dev_dbg(&drv_data->pdev->dev,
  422. "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
  423. dmastat, read_STAT(drv_data));
  424. timeout = jiffies + HZ;
  425. while (!(read_STAT(drv_data) & SPIF))
  426. if (!time_before(jiffies, timeout)) {
  427. dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
  428. break;
  429. } else
  430. cpu_relax();
  431. if ((dmastat & DMA_ERR) && (spistat & RBSY)) {
  432. msg->state = ERROR_STATE;
  433. dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n");
  434. } else {
  435. msg->actual_length += drv_data->len_in_bytes;
  436. if (drv_data->cs_change)
  437. bfin_spi_cs_deactive(drv_data, chip);
  438. /* Move to next transfer */
  439. msg->state = bfin_spi_next_transfer(drv_data);
  440. }
  441. /* Schedule transfer tasklet */
  442. tasklet_schedule(&drv_data->pump_transfers);
  443. /* free the irq handler before next transfer */
  444. dev_dbg(&drv_data->pdev->dev,
  445. "disable dma channel irq%d\n",
  446. drv_data->dma_channel);
  447. dma_disable_irq(drv_data->dma_channel);
  448. return IRQ_HANDLED;
  449. }
  450. static void bfin_spi_pump_transfers(unsigned long data)
  451. {
  452. struct driver_data *drv_data = (struct driver_data *)data;
  453. struct spi_message *message = NULL;
  454. struct spi_transfer *transfer = NULL;
  455. struct spi_transfer *previous = NULL;
  456. struct chip_data *chip = NULL;
  457. u8 width;
  458. u16 cr, dma_width, dma_config;
  459. u32 tranf_success = 1;
  460. u8 full_duplex = 0;
  461. /* Get current state information */
  462. message = drv_data->cur_msg;
  463. transfer = drv_data->cur_transfer;
  464. chip = drv_data->cur_chip;
  465. /*
  466. * if msg is error or done, report it back using complete() callback
  467. */
  468. /* Handle for abort */
  469. if (message->state == ERROR_STATE) {
  470. dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n");
  471. message->status = -EIO;
  472. bfin_spi_giveback(drv_data);
  473. return;
  474. }
  475. /* Handle end of message */
  476. if (message->state == DONE_STATE) {
  477. dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n");
  478. message->status = 0;
  479. bfin_spi_giveback(drv_data);
  480. return;
  481. }
  482. /* Delay if requested at end of transfer */
  483. if (message->state == RUNNING_STATE) {
  484. dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n");
  485. previous = list_entry(transfer->transfer_list.prev,
  486. struct spi_transfer, transfer_list);
  487. if (previous->delay_usecs)
  488. udelay(previous->delay_usecs);
  489. }
  490. /* Flush any existing transfers that may be sitting in the hardware */
  491. if (bfin_spi_flush(drv_data) == 0) {
  492. dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
  493. message->status = -EIO;
  494. bfin_spi_giveback(drv_data);
  495. return;
  496. }
  497. if (transfer->len == 0) {
  498. /* Move to next transfer of this msg */
  499. message->state = bfin_spi_next_transfer(drv_data);
  500. /* Schedule next transfer tasklet */
  501. tasklet_schedule(&drv_data->pump_transfers);
  502. }
  503. if (transfer->tx_buf != NULL) {
  504. drv_data->tx = (void *)transfer->tx_buf;
  505. drv_data->tx_end = drv_data->tx + transfer->len;
  506. dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n",
  507. transfer->tx_buf, drv_data->tx_end);
  508. } else {
  509. drv_data->tx = NULL;
  510. }
  511. if (transfer->rx_buf != NULL) {
  512. full_duplex = transfer->tx_buf != NULL;
  513. drv_data->rx = transfer->rx_buf;
  514. drv_data->rx_end = drv_data->rx + transfer->len;
  515. dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
  516. transfer->rx_buf, drv_data->rx_end);
  517. } else {
  518. drv_data->rx = NULL;
  519. }
  520. drv_data->rx_dma = transfer->rx_dma;
  521. drv_data->tx_dma = transfer->tx_dma;
  522. drv_data->len_in_bytes = transfer->len;
  523. drv_data->cs_change = transfer->cs_change;
  524. /* Bits per word setup */
  525. switch (transfer->bits_per_word) {
  526. case 8:
  527. drv_data->n_bytes = 1;
  528. width = CFG_SPI_WORDSIZE8;
  529. drv_data->ops = &bfin_transfer_ops_u8;
  530. break;
  531. case 16:
  532. drv_data->n_bytes = 2;
  533. width = CFG_SPI_WORDSIZE16;
  534. drv_data->ops = &bfin_transfer_ops_u16;
  535. break;
  536. default:
  537. /* No change, the same as default setting */
  538. transfer->bits_per_word = chip->bits_per_word;
  539. drv_data->n_bytes = chip->n_bytes;
  540. width = chip->width;
  541. drv_data->ops = chip->ops;
  542. break;
  543. }
  544. cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
  545. cr |= (width << 8);
  546. write_CTRL(drv_data, cr);
  547. if (width == CFG_SPI_WORDSIZE16) {
  548. drv_data->len = (transfer->len) >> 1;
  549. } else {
  550. drv_data->len = transfer->len;
  551. }
  552. dev_dbg(&drv_data->pdev->dev,
  553. "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
  554. drv_data->ops, chip->ops, &bfin_transfer_ops_u8);
  555. message->state = RUNNING_STATE;
  556. dma_config = 0;
  557. /* Speed setup (surely valid because already checked) */
  558. if (transfer->speed_hz)
  559. write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz));
  560. else
  561. write_BAUD(drv_data, chip->baud);
  562. write_STAT(drv_data, BIT_STAT_CLR);
  563. cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
  564. if (drv_data->cs_change)
  565. bfin_spi_cs_active(drv_data, chip);
  566. dev_dbg(&drv_data->pdev->dev,
  567. "now pumping a transfer: width is %d, len is %d\n",
  568. width, transfer->len);
  569. /*
  570. * Try to map dma buffer and do a dma transfer. If successful use,
  571. * different way to r/w according to the enable_dma settings and if
  572. * we are not doing a full duplex transfer (since the hardware does
  573. * not support full duplex DMA transfers).
  574. */
  575. if (!full_duplex && drv_data->cur_chip->enable_dma
  576. && drv_data->len > 6) {
  577. unsigned long dma_start_addr, flags;
  578. disable_dma(drv_data->dma_channel);
  579. clear_dma_irqstat(drv_data->dma_channel);
  580. /* config dma channel */
  581. dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
  582. set_dma_x_count(drv_data->dma_channel, drv_data->len);
  583. if (width == CFG_SPI_WORDSIZE16) {
  584. set_dma_x_modify(drv_data->dma_channel, 2);
  585. dma_width = WDSIZE_16;
  586. } else {
  587. set_dma_x_modify(drv_data->dma_channel, 1);
  588. dma_width = WDSIZE_8;
  589. }
  590. /* poll for SPI completion before start */
  591. while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
  592. cpu_relax();
  593. /* dirty hack for autobuffer DMA mode */
  594. if (drv_data->tx_dma == 0xFFFF) {
  595. dev_dbg(&drv_data->pdev->dev,
  596. "doing autobuffer DMA out.\n");
  597. /* no irq in autobuffer mode */
  598. dma_config =
  599. (DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
  600. set_dma_config(drv_data->dma_channel, dma_config);
  601. set_dma_start_addr(drv_data->dma_channel,
  602. (unsigned long)drv_data->tx);
  603. enable_dma(drv_data->dma_channel);
  604. /* start SPI transfer */
  605. write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX);
  606. /* just return here, there can only be one transfer
  607. * in this mode
  608. */
  609. message->status = 0;
  610. bfin_spi_giveback(drv_data);
  611. return;
  612. }
  613. /* In dma mode, rx or tx must be NULL in one transfer */
  614. dma_config = (RESTART | dma_width | DI_EN);
  615. if (drv_data->rx != NULL) {
  616. /* set transfer mode, and enable SPI */
  617. dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n",
  618. drv_data->rx, drv_data->len_in_bytes);
  619. /* invalidate caches, if needed */
  620. if (bfin_addr_dcacheable((unsigned long) drv_data->rx))
  621. invalidate_dcache_range((unsigned long) drv_data->rx,
  622. (unsigned long) (drv_data->rx +
  623. drv_data->len_in_bytes));
  624. dma_config |= WNR;
  625. dma_start_addr = (unsigned long)drv_data->rx;
  626. cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT;
  627. } else if (drv_data->tx != NULL) {
  628. dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
  629. /* flush caches, if needed */
  630. if (bfin_addr_dcacheable((unsigned long) drv_data->tx))
  631. flush_dcache_range((unsigned long) drv_data->tx,
  632. (unsigned long) (drv_data->tx +
  633. drv_data->len_in_bytes));
  634. dma_start_addr = (unsigned long)drv_data->tx;
  635. cr |= BIT_CTL_TIMOD_DMA_TX;
  636. } else
  637. BUG();
  638. /* oh man, here there be monsters ... and i dont mean the
  639. * fluffy cute ones from pixar, i mean the kind that'll eat
  640. * your data, kick your dog, and love it all. do *not* try
  641. * and change these lines unless you (1) heavily test DMA
  642. * with SPI flashes on a loaded system (e.g. ping floods),
  643. * (2) know just how broken the DMA engine interaction with
  644. * the SPI peripheral is, and (3) have someone else to blame
  645. * when you screw it all up anyways.
  646. */
  647. set_dma_start_addr(drv_data->dma_channel, dma_start_addr);
  648. set_dma_config(drv_data->dma_channel, dma_config);
  649. local_irq_save(flags);
  650. SSYNC();
  651. write_CTRL(drv_data, cr);
  652. enable_dma(drv_data->dma_channel);
  653. dma_enable_irq(drv_data->dma_channel);
  654. local_irq_restore(flags);
  655. return;
  656. }
  657. if (chip->pio_interrupt) {
  658. /* use write mode. spi irq should have been disabled */
  659. cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
  660. write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
  661. /* discard old RX data and clear RXS */
  662. bfin_spi_dummy_read(drv_data);
  663. /* start transfer */
  664. if (drv_data->tx == NULL)
  665. write_TDBR(drv_data, chip->idle_tx_val);
  666. else {
  667. if (transfer->bits_per_word == 8)
  668. write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
  669. else if (transfer->bits_per_word == 16)
  670. write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
  671. drv_data->tx += drv_data->n_bytes;
  672. }
  673. /* once TDBR is empty, interrupt is triggered */
  674. enable_irq(drv_data->spi_irq);
  675. return;
  676. }
  677. /* IO mode */
  678. dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
  679. /* we always use SPI_WRITE mode. SPI_READ mode
  680. seems to have problems with setting up the
  681. output value in TDBR prior to the transfer. */
  682. write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
  683. if (full_duplex) {
  684. /* full duplex mode */
  685. BUG_ON((drv_data->tx_end - drv_data->tx) !=
  686. (drv_data->rx_end - drv_data->rx));
  687. dev_dbg(&drv_data->pdev->dev,
  688. "IO duplex: cr is 0x%x\n", cr);
  689. drv_data->ops->duplex(drv_data);
  690. if (drv_data->tx != drv_data->tx_end)
  691. tranf_success = 0;
  692. } else if (drv_data->tx != NULL) {
  693. /* write only half duplex */
  694. dev_dbg(&drv_data->pdev->dev,
  695. "IO write: cr is 0x%x\n", cr);
  696. drv_data->ops->write(drv_data);
  697. if (drv_data->tx != drv_data->tx_end)
  698. tranf_success = 0;
  699. } else if (drv_data->rx != NULL) {
  700. /* read only half duplex */
  701. dev_dbg(&drv_data->pdev->dev,
  702. "IO read: cr is 0x%x\n", cr);
  703. drv_data->ops->read(drv_data);
  704. if (drv_data->rx != drv_data->rx_end)
  705. tranf_success = 0;
  706. }
  707. if (!tranf_success) {
  708. dev_dbg(&drv_data->pdev->dev,
  709. "IO write error!\n");
  710. message->state = ERROR_STATE;
  711. } else {
  712. /* Update total byte transfered */
  713. message->actual_length += drv_data->len_in_bytes;
  714. /* Move to next transfer of this msg */
  715. message->state = bfin_spi_next_transfer(drv_data);
  716. if (drv_data->cs_change)
  717. bfin_spi_cs_deactive(drv_data, chip);
  718. }
  719. /* Schedule next transfer tasklet */
  720. tasklet_schedule(&drv_data->pump_transfers);
  721. }
  722. /* pop a msg from queue and kick off real transfer */
  723. static void bfin_spi_pump_messages(struct work_struct *work)
  724. {
  725. struct driver_data *drv_data;
  726. unsigned long flags;
  727. drv_data = container_of(work, struct driver_data, pump_messages);
  728. /* Lock queue and check for queue work */
  729. spin_lock_irqsave(&drv_data->lock, flags);
  730. if (list_empty(&drv_data->queue) || !drv_data->running) {
  731. /* pumper kicked off but no work to do */
  732. drv_data->busy = 0;
  733. spin_unlock_irqrestore(&drv_data->lock, flags);
  734. return;
  735. }
  736. /* Make sure we are not already running a message */
  737. if (drv_data->cur_msg) {
  738. spin_unlock_irqrestore(&drv_data->lock, flags);
  739. return;
  740. }
  741. /* Extract head of queue */
  742. drv_data->cur_msg = list_entry(drv_data->queue.next,
  743. struct spi_message, queue);
  744. /* Setup the SSP using the per chip configuration */
  745. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  746. bfin_spi_restore_state(drv_data);
  747. list_del_init(&drv_data->cur_msg->queue);
  748. /* Initial message state */
  749. drv_data->cur_msg->state = START_STATE;
  750. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  751. struct spi_transfer, transfer_list);
  752. dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
  753. "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
  754. drv_data->cur_chip->baud, drv_data->cur_chip->flag,
  755. drv_data->cur_chip->ctl_reg);
  756. dev_dbg(&drv_data->pdev->dev,
  757. "the first transfer len is %d\n",
  758. drv_data->cur_transfer->len);
  759. /* Mark as busy and launch transfers */
  760. tasklet_schedule(&drv_data->pump_transfers);
  761. drv_data->busy = 1;
  762. spin_unlock_irqrestore(&drv_data->lock, flags);
  763. }
  764. /*
  765. * got a msg to transfer, queue it in drv_data->queue.
  766. * And kick off message pumper
  767. */
  768. static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  769. {
  770. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  771. unsigned long flags;
  772. spin_lock_irqsave(&drv_data->lock, flags);
  773. if (!drv_data->running) {
  774. spin_unlock_irqrestore(&drv_data->lock, flags);
  775. return -ESHUTDOWN;
  776. }
  777. msg->actual_length = 0;
  778. msg->status = -EINPROGRESS;
  779. msg->state = START_STATE;
  780. dev_dbg(&spi->dev, "adding an msg in transfer() \n");
  781. list_add_tail(&msg->queue, &drv_data->queue);
  782. if (drv_data->running && !drv_data->busy)
  783. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  784. spin_unlock_irqrestore(&drv_data->lock, flags);
  785. return 0;
  786. }
  787. #define MAX_SPI_SSEL 7
  788. static u16 ssel[][MAX_SPI_SSEL] = {
  789. {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
  790. P_SPI0_SSEL4, P_SPI0_SSEL5,
  791. P_SPI0_SSEL6, P_SPI0_SSEL7},
  792. {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
  793. P_SPI1_SSEL4, P_SPI1_SSEL5,
  794. P_SPI1_SSEL6, P_SPI1_SSEL7},
  795. {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
  796. P_SPI2_SSEL4, P_SPI2_SSEL5,
  797. P_SPI2_SSEL6, P_SPI2_SSEL7},
  798. };
  799. /* setup for devices (may be called multiple times -- not just first setup) */
  800. static int bfin_spi_setup(struct spi_device *spi)
  801. {
  802. struct bfin5xx_spi_chip *chip_info;
  803. struct chip_data *chip = NULL;
  804. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  805. int ret = -EINVAL;
  806. if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
  807. goto error;
  808. /* Only alloc (or use chip_info) on first setup */
  809. chip_info = NULL;
  810. chip = spi_get_ctldata(spi);
  811. if (chip == NULL) {
  812. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  813. if (!chip) {
  814. dev_err(&spi->dev, "cannot allocate chip data\n");
  815. ret = -ENOMEM;
  816. goto error;
  817. }
  818. chip->enable_dma = 0;
  819. chip_info = spi->controller_data;
  820. }
  821. /* chip_info isn't always needed */
  822. if (chip_info) {
  823. /* Make sure people stop trying to set fields via ctl_reg
  824. * when they should actually be using common SPI framework.
  825. * Currently we let through: WOM EMISO PSSE GM SZ TIMOD.
  826. * Not sure if a user actually needs/uses any of these,
  827. * but let's assume (for now) they do.
  828. */
  829. if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) {
  830. dev_err(&spi->dev, "do not set bits in ctl_reg "
  831. "that the SPI framework manages\n");
  832. goto error;
  833. }
  834. chip->enable_dma = chip_info->enable_dma != 0
  835. && drv_data->master_info->enable_dma;
  836. chip->ctl_reg = chip_info->ctl_reg;
  837. chip->bits_per_word = chip_info->bits_per_word;
  838. chip->cs_chg_udelay = chip_info->cs_chg_udelay;
  839. chip->cs_gpio = chip_info->cs_gpio;
  840. chip->idle_tx_val = chip_info->idle_tx_val;
  841. chip->pio_interrupt = chip_info->pio_interrupt;
  842. }
  843. /* translate common spi framework into our register */
  844. if (spi->mode & SPI_CPOL)
  845. chip->ctl_reg |= CPOL;
  846. if (spi->mode & SPI_CPHA)
  847. chip->ctl_reg |= CPHA;
  848. if (spi->mode & SPI_LSB_FIRST)
  849. chip->ctl_reg |= LSBF;
  850. /* we dont support running in slave mode (yet?) */
  851. chip->ctl_reg |= MSTR;
  852. /*
  853. * Notice: for blackfin, the speed_hz is the value of register
  854. * SPI_BAUD, not the real baudrate
  855. */
  856. chip->baud = hz_to_spi_baud(spi->max_speed_hz);
  857. chip->flag = (1 << (spi->chip_select)) << 8;
  858. chip->chip_select_num = spi->chip_select;
  859. switch (chip->bits_per_word) {
  860. case 8:
  861. chip->n_bytes = 1;
  862. chip->width = CFG_SPI_WORDSIZE8;
  863. chip->ops = &bfin_transfer_ops_u8;
  864. break;
  865. case 16:
  866. chip->n_bytes = 2;
  867. chip->width = CFG_SPI_WORDSIZE16;
  868. chip->ops = &bfin_transfer_ops_u16;
  869. break;
  870. default:
  871. dev_err(&spi->dev, "%d bits_per_word is not supported\n",
  872. chip->bits_per_word);
  873. goto error;
  874. }
  875. if (chip->enable_dma && chip->pio_interrupt) {
  876. dev_err(&spi->dev, "enable_dma is set, "
  877. "do not set pio_interrupt\n");
  878. goto error;
  879. }
  880. /*
  881. * if any one SPI chip is registered and wants DMA, request the
  882. * DMA channel for it
  883. */
  884. if (chip->enable_dma && !drv_data->dma_requested) {
  885. /* register dma irq handler */
  886. ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA");
  887. if (ret) {
  888. dev_err(&spi->dev,
  889. "Unable to request BlackFin SPI DMA channel\n");
  890. goto error;
  891. }
  892. drv_data->dma_requested = 1;
  893. ret = set_dma_callback(drv_data->dma_channel,
  894. bfin_spi_dma_irq_handler, drv_data);
  895. if (ret) {
  896. dev_err(&spi->dev, "Unable to set dma callback\n");
  897. goto error;
  898. }
  899. dma_disable_irq(drv_data->dma_channel);
  900. }
  901. if (chip->pio_interrupt && !drv_data->irq_requested) {
  902. ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler,
  903. IRQF_DISABLED, "BFIN_SPI", drv_data);
  904. if (ret) {
  905. dev_err(&spi->dev, "Unable to register spi IRQ\n");
  906. goto error;
  907. }
  908. drv_data->irq_requested = 1;
  909. /* we use write mode, spi irq has to be disabled here */
  910. disable_irq(drv_data->spi_irq);
  911. }
  912. if (chip->chip_select_num == 0) {
  913. ret = gpio_request(chip->cs_gpio, spi->modalias);
  914. if (ret) {
  915. dev_err(&spi->dev, "gpio_request() error\n");
  916. goto pin_error;
  917. }
  918. gpio_direction_output(chip->cs_gpio, 1);
  919. }
  920. dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
  921. spi->modalias, chip->width, chip->enable_dma);
  922. dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
  923. chip->ctl_reg, chip->flag);
  924. spi_set_ctldata(spi, chip);
  925. dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
  926. if (chip->chip_select_num > 0 &&
  927. chip->chip_select_num <= spi->master->num_chipselect) {
  928. ret = peripheral_request(ssel[spi->master->bus_num]
  929. [chip->chip_select_num-1], spi->modalias);
  930. if (ret) {
  931. dev_err(&spi->dev, "peripheral_request() error\n");
  932. goto pin_error;
  933. }
  934. }
  935. bfin_spi_cs_enable(drv_data, chip);
  936. bfin_spi_cs_deactive(drv_data, chip);
  937. return 0;
  938. pin_error:
  939. if (chip->chip_select_num == 0)
  940. gpio_free(chip->cs_gpio);
  941. else
  942. peripheral_free(ssel[spi->master->bus_num]
  943. [chip->chip_select_num - 1]);
  944. error:
  945. if (chip) {
  946. if (drv_data->dma_requested)
  947. free_dma(drv_data->dma_channel);
  948. drv_data->dma_requested = 0;
  949. kfree(chip);
  950. /* prevent free 'chip' twice */
  951. spi_set_ctldata(spi, NULL);
  952. }
  953. return ret;
  954. }
  955. /*
  956. * callback for spi framework.
  957. * clean driver specific data
  958. */
  959. static void bfin_spi_cleanup(struct spi_device *spi)
  960. {
  961. struct chip_data *chip = spi_get_ctldata(spi);
  962. struct driver_data *drv_data = spi_master_get_devdata(spi->master);
  963. if (!chip)
  964. return;
  965. if ((chip->chip_select_num > 0)
  966. && (chip->chip_select_num <= spi->master->num_chipselect)) {
  967. peripheral_free(ssel[spi->master->bus_num]
  968. [chip->chip_select_num-1]);
  969. bfin_spi_cs_disable(drv_data, chip);
  970. }
  971. if (chip->chip_select_num == 0)
  972. gpio_free(chip->cs_gpio);
  973. kfree(chip);
  974. /* prevent free 'chip' twice */
  975. spi_set_ctldata(spi, NULL);
  976. }
  977. static inline int bfin_spi_init_queue(struct driver_data *drv_data)
  978. {
  979. INIT_LIST_HEAD(&drv_data->queue);
  980. spin_lock_init(&drv_data->lock);
  981. drv_data->running = false;
  982. drv_data->busy = 0;
  983. /* init transfer tasklet */
  984. tasklet_init(&drv_data->pump_transfers,
  985. bfin_spi_pump_transfers, (unsigned long)drv_data);
  986. /* init messages workqueue */
  987. INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages);
  988. drv_data->workqueue = create_singlethread_workqueue(
  989. dev_name(drv_data->master->dev.parent));
  990. if (drv_data->workqueue == NULL)
  991. return -EBUSY;
  992. return 0;
  993. }
  994. static inline int bfin_spi_start_queue(struct driver_data *drv_data)
  995. {
  996. unsigned long flags;
  997. spin_lock_irqsave(&drv_data->lock, flags);
  998. if (drv_data->running || drv_data->busy) {
  999. spin_unlock_irqrestore(&drv_data->lock, flags);
  1000. return -EBUSY;
  1001. }
  1002. drv_data->running = true;
  1003. drv_data->cur_msg = NULL;
  1004. drv_data->cur_transfer = NULL;
  1005. drv_data->cur_chip = NULL;
  1006. spin_unlock_irqrestore(&drv_data->lock, flags);
  1007. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  1008. return 0;
  1009. }
  1010. static inline int bfin_spi_stop_queue(struct driver_data *drv_data)
  1011. {
  1012. unsigned long flags;
  1013. unsigned limit = 500;
  1014. int status = 0;
  1015. spin_lock_irqsave(&drv_data->lock, flags);
  1016. /*
  1017. * This is a bit lame, but is optimized for the common execution path.
  1018. * A wait_queue on the drv_data->busy could be used, but then the common
  1019. * execution path (pump_messages) would be required to call wake_up or
  1020. * friends on every SPI message. Do this instead
  1021. */
  1022. drv_data->running = false;
  1023. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  1024. spin_unlock_irqrestore(&drv_data->lock, flags);
  1025. msleep(10);
  1026. spin_lock_irqsave(&drv_data->lock, flags);
  1027. }
  1028. if (!list_empty(&drv_data->queue) || drv_data->busy)
  1029. status = -EBUSY;
  1030. spin_unlock_irqrestore(&drv_data->lock, flags);
  1031. return status;
  1032. }
  1033. static inline int bfin_spi_destroy_queue(struct driver_data *drv_data)
  1034. {
  1035. int status;
  1036. status = bfin_spi_stop_queue(drv_data);
  1037. if (status != 0)
  1038. return status;
  1039. destroy_workqueue(drv_data->workqueue);
  1040. return 0;
  1041. }
  1042. static int __init bfin_spi_probe(struct platform_device *pdev)
  1043. {
  1044. struct device *dev = &pdev->dev;
  1045. struct bfin5xx_spi_master *platform_info;
  1046. struct spi_master *master;
  1047. struct driver_data *drv_data = 0;
  1048. struct resource *res;
  1049. int status = 0;
  1050. platform_info = dev->platform_data;
  1051. /* Allocate master with space for drv_data */
  1052. master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
  1053. if (!master) {
  1054. dev_err(&pdev->dev, "can not alloc spi_master\n");
  1055. return -ENOMEM;
  1056. }
  1057. drv_data = spi_master_get_devdata(master);
  1058. drv_data->master = master;
  1059. drv_data->master_info = platform_info;
  1060. drv_data->pdev = pdev;
  1061. drv_data->pin_req = platform_info->pin_req;
  1062. /* the spi->mode bits supported by this driver: */
  1063. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  1064. master->bus_num = pdev->id;
  1065. master->num_chipselect = platform_info->num_chipselect;
  1066. master->cleanup = bfin_spi_cleanup;
  1067. master->setup = bfin_spi_setup;
  1068. master->transfer = bfin_spi_transfer;
  1069. /* Find and map our resources */
  1070. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1071. if (res == NULL) {
  1072. dev_err(dev, "Cannot get IORESOURCE_MEM\n");
  1073. status = -ENOENT;
  1074. goto out_error_get_res;
  1075. }
  1076. drv_data->regs_base = ioremap(res->start, resource_size(res));
  1077. if (drv_data->regs_base == NULL) {
  1078. dev_err(dev, "Cannot map IO\n");
  1079. status = -ENXIO;
  1080. goto out_error_ioremap;
  1081. }
  1082. res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  1083. if (res == NULL) {
  1084. dev_err(dev, "No DMA channel specified\n");
  1085. status = -ENOENT;
  1086. goto out_error_free_io;
  1087. }
  1088. drv_data->dma_channel = res->start;
  1089. drv_data->spi_irq = platform_get_irq(pdev, 0);
  1090. if (drv_data->spi_irq < 0) {
  1091. dev_err(dev, "No spi pio irq specified\n");
  1092. status = -ENOENT;
  1093. goto out_error_free_io;
  1094. }
  1095. /* Initial and start queue */
  1096. status = bfin_spi_init_queue(drv_data);
  1097. if (status != 0) {
  1098. dev_err(dev, "problem initializing queue\n");
  1099. goto out_error_queue_alloc;
  1100. }
  1101. status = bfin_spi_start_queue(drv_data);
  1102. if (status != 0) {
  1103. dev_err(dev, "problem starting queue\n");
  1104. goto out_error_queue_alloc;
  1105. }
  1106. status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
  1107. if (status != 0) {
  1108. dev_err(&pdev->dev, ": Requesting Peripherals failed\n");
  1109. goto out_error_queue_alloc;
  1110. }
  1111. /* Reset SPI registers. If these registers were used by the boot loader,
  1112. * the sky may fall on your head if you enable the dma controller.
  1113. */
  1114. write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
  1115. write_FLAG(drv_data, 0xFF00);
  1116. /* Register with the SPI framework */
  1117. platform_set_drvdata(pdev, drv_data);
  1118. status = spi_register_master(master);
  1119. if (status != 0) {
  1120. dev_err(dev, "problem registering spi master\n");
  1121. goto out_error_queue_alloc;
  1122. }
  1123. dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n",
  1124. DRV_DESC, DRV_VERSION, drv_data->regs_base,
  1125. drv_data->dma_channel);
  1126. return status;
  1127. out_error_queue_alloc:
  1128. bfin_spi_destroy_queue(drv_data);
  1129. out_error_free_io:
  1130. iounmap((void *) drv_data->regs_base);
  1131. out_error_ioremap:
  1132. out_error_get_res:
  1133. spi_master_put(master);
  1134. return status;
  1135. }
  1136. /* stop hardware and remove the driver */
  1137. static int __devexit bfin_spi_remove(struct platform_device *pdev)
  1138. {
  1139. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1140. int status = 0;
  1141. if (!drv_data)
  1142. return 0;
  1143. /* Remove the queue */
  1144. status = bfin_spi_destroy_queue(drv_data);
  1145. if (status != 0)
  1146. return status;
  1147. /* Disable the SSP at the peripheral and SOC level */
  1148. bfin_spi_disable(drv_data);
  1149. /* Release DMA */
  1150. if (drv_data->master_info->enable_dma) {
  1151. if (dma_channel_active(drv_data->dma_channel))
  1152. free_dma(drv_data->dma_channel);
  1153. }
  1154. if (drv_data->irq_requested) {
  1155. free_irq(drv_data->spi_irq, drv_data);
  1156. drv_data->irq_requested = 0;
  1157. }
  1158. /* Disconnect from the SPI framework */
  1159. spi_unregister_master(drv_data->master);
  1160. peripheral_free_list(drv_data->pin_req);
  1161. /* Prevent double remove */
  1162. platform_set_drvdata(pdev, NULL);
  1163. return 0;
  1164. }
  1165. #ifdef CONFIG_PM
  1166. static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
  1167. {
  1168. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1169. int status = 0;
  1170. status = bfin_spi_stop_queue(drv_data);
  1171. if (status != 0)
  1172. return status;
  1173. /* stop hardware */
  1174. bfin_spi_disable(drv_data);
  1175. return 0;
  1176. }
  1177. static int bfin_spi_resume(struct platform_device *pdev)
  1178. {
  1179. struct driver_data *drv_data = platform_get_drvdata(pdev);
  1180. int status = 0;
  1181. /* Enable the SPI interface */
  1182. bfin_spi_enable(drv_data);
  1183. /* Start the queue running */
  1184. status = bfin_spi_start_queue(drv_data);
  1185. if (status != 0) {
  1186. dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
  1187. return status;
  1188. }
  1189. return 0;
  1190. }
  1191. #else
  1192. #define bfin_spi_suspend NULL
  1193. #define bfin_spi_resume NULL
  1194. #endif /* CONFIG_PM */
  1195. MODULE_ALIAS("platform:bfin-spi");
  1196. static struct platform_driver bfin_spi_driver = {
  1197. .driver = {
  1198. .name = DRV_NAME,
  1199. .owner = THIS_MODULE,
  1200. },
  1201. .suspend = bfin_spi_suspend,
  1202. .resume = bfin_spi_resume,
  1203. .remove = __devexit_p(bfin_spi_remove),
  1204. };
  1205. static int __init bfin_spi_init(void)
  1206. {
  1207. return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe);
  1208. }
  1209. module_init(bfin_spi_init);
  1210. static void __exit bfin_spi_exit(void)
  1211. {
  1212. platform_driver_unregister(&bfin_spi_driver);
  1213. }
  1214. module_exit(bfin_spi_exit);