pata_bf54x.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743
  1. /*
  2. * File: drivers/ata/pata_bf54x.c
  3. * Author: Sonic Zhang <sonic.zhang@analog.com>
  4. *
  5. * Created:
  6. * Description: PATA Driver for blackfin 54x
  7. *
  8. * Modified:
  9. * Copyright 2007 Analog Devices Inc.
  10. *
  11. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, see the file COPYING, or write
  25. * to the Free Software Foundation, Inc.,
  26. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/pci.h>
  31. #include <linux/init.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/delay.h>
  34. #include <linux/device.h>
  35. #include <scsi/scsi_host.h>
  36. #include <linux/libata.h>
  37. #include <linux/platform_device.h>
  38. #include <asm/dma.h>
  39. #include <asm/gpio.h>
  40. #include <asm/portmux.h>
  41. #define DRV_NAME "pata-bf54x"
  42. #define DRV_VERSION "0.9"
  43. #define ATA_REG_CTRL 0x0E
  44. #define ATA_REG_ALTSTATUS ATA_REG_CTRL
  45. /* These are the offset of the controller's registers */
  46. #define ATAPI_OFFSET_CONTROL 0x00
  47. #define ATAPI_OFFSET_STATUS 0x04
  48. #define ATAPI_OFFSET_DEV_ADDR 0x08
  49. #define ATAPI_OFFSET_DEV_TXBUF 0x0c
  50. #define ATAPI_OFFSET_DEV_RXBUF 0x10
  51. #define ATAPI_OFFSET_INT_MASK 0x14
  52. #define ATAPI_OFFSET_INT_STATUS 0x18
  53. #define ATAPI_OFFSET_XFER_LEN 0x1c
  54. #define ATAPI_OFFSET_LINE_STATUS 0x20
  55. #define ATAPI_OFFSET_SM_STATE 0x24
  56. #define ATAPI_OFFSET_TERMINATE 0x28
  57. #define ATAPI_OFFSET_PIO_TFRCNT 0x2c
  58. #define ATAPI_OFFSET_DMA_TFRCNT 0x30
  59. #define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
  60. #define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
  61. #define ATAPI_OFFSET_REG_TIM_0 0x40
  62. #define ATAPI_OFFSET_PIO_TIM_0 0x44
  63. #define ATAPI_OFFSET_PIO_TIM_1 0x48
  64. #define ATAPI_OFFSET_MULTI_TIM_0 0x50
  65. #define ATAPI_OFFSET_MULTI_TIM_1 0x54
  66. #define ATAPI_OFFSET_MULTI_TIM_2 0x58
  67. #define ATAPI_OFFSET_ULTRA_TIM_0 0x60
  68. #define ATAPI_OFFSET_ULTRA_TIM_1 0x64
  69. #define ATAPI_OFFSET_ULTRA_TIM_2 0x68
  70. #define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
  71. #define ATAPI_GET_CONTROL(base)\
  72. bfin_read16(base + ATAPI_OFFSET_CONTROL)
  73. #define ATAPI_SET_CONTROL(base, val)\
  74. bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
  75. #define ATAPI_GET_STATUS(base)\
  76. bfin_read16(base + ATAPI_OFFSET_STATUS)
  77. #define ATAPI_GET_DEV_ADDR(base)\
  78. bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
  79. #define ATAPI_SET_DEV_ADDR(base, val)\
  80. bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
  81. #define ATAPI_GET_DEV_TXBUF(base)\
  82. bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
  83. #define ATAPI_SET_DEV_TXBUF(base, val)\
  84. bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
  85. #define ATAPI_GET_DEV_RXBUF(base)\
  86. bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
  87. #define ATAPI_SET_DEV_RXBUF(base, val)\
  88. bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
  89. #define ATAPI_GET_INT_MASK(base)\
  90. bfin_read16(base + ATAPI_OFFSET_INT_MASK)
  91. #define ATAPI_SET_INT_MASK(base, val)\
  92. bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
  93. #define ATAPI_GET_INT_STATUS(base)\
  94. bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
  95. #define ATAPI_SET_INT_STATUS(base, val)\
  96. bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
  97. #define ATAPI_GET_XFER_LEN(base)\
  98. bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
  99. #define ATAPI_SET_XFER_LEN(base, val)\
  100. bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
  101. #define ATAPI_GET_LINE_STATUS(base)\
  102. bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
  103. #define ATAPI_GET_SM_STATE(base)\
  104. bfin_read16(base + ATAPI_OFFSET_SM_STATE)
  105. #define ATAPI_GET_TERMINATE(base)\
  106. bfin_read16(base + ATAPI_OFFSET_TERMINATE)
  107. #define ATAPI_SET_TERMINATE(base, val)\
  108. bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
  109. #define ATAPI_GET_PIO_TFRCNT(base)\
  110. bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
  111. #define ATAPI_GET_DMA_TFRCNT(base)\
  112. bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
  113. #define ATAPI_GET_UMAIN_TFRCNT(base)\
  114. bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
  115. #define ATAPI_GET_UDMAOUT_TFRCNT(base)\
  116. bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
  117. #define ATAPI_GET_REG_TIM_0(base)\
  118. bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
  119. #define ATAPI_SET_REG_TIM_0(base, val)\
  120. bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
  121. #define ATAPI_GET_PIO_TIM_0(base)\
  122. bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
  123. #define ATAPI_SET_PIO_TIM_0(base, val)\
  124. bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
  125. #define ATAPI_GET_PIO_TIM_1(base)\
  126. bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
  127. #define ATAPI_SET_PIO_TIM_1(base, val)\
  128. bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
  129. #define ATAPI_GET_MULTI_TIM_0(base)\
  130. bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
  131. #define ATAPI_SET_MULTI_TIM_0(base, val)\
  132. bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
  133. #define ATAPI_GET_MULTI_TIM_1(base)\
  134. bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
  135. #define ATAPI_SET_MULTI_TIM_1(base, val)\
  136. bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
  137. #define ATAPI_GET_MULTI_TIM_2(base)\
  138. bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
  139. #define ATAPI_SET_MULTI_TIM_2(base, val)\
  140. bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
  141. #define ATAPI_GET_ULTRA_TIM_0(base)\
  142. bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
  143. #define ATAPI_SET_ULTRA_TIM_0(base, val)\
  144. bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
  145. #define ATAPI_GET_ULTRA_TIM_1(base)\
  146. bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
  147. #define ATAPI_SET_ULTRA_TIM_1(base, val)\
  148. bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
  149. #define ATAPI_GET_ULTRA_TIM_2(base)\
  150. bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
  151. #define ATAPI_SET_ULTRA_TIM_2(base, val)\
  152. bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
  153. #define ATAPI_GET_ULTRA_TIM_3(base)\
  154. bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
  155. #define ATAPI_SET_ULTRA_TIM_3(base, val)\
  156. bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
  157. /**
  158. * PIO Mode - Frequency compatibility
  159. */
  160. /* mode: 0 1 2 3 4 */
  161. static const u32 pio_fsclk[] =
  162. { 33333333, 33333333, 33333333, 33333333, 33333333 };
  163. /**
  164. * MDMA Mode - Frequency compatibility
  165. */
  166. /* mode: 0 1 2 */
  167. static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
  168. /**
  169. * UDMA Mode - Frequency compatibility
  170. *
  171. * UDMA5 - 100 MB/s - SCLK = 133 MHz
  172. * UDMA4 - 66 MB/s - SCLK >= 80 MHz
  173. * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz
  174. * UDMA2 - 33 MB/s - SCLK >= 40 MHz
  175. */
  176. /* mode: 0 1 2 3 4 5 */
  177. static const u32 udma_fsclk[] =
  178. { 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
  179. /**
  180. * Register transfer timing table
  181. */
  182. /* mode: 0 1 2 3 4 */
  183. /* Cycle Time */
  184. static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
  185. /* DIOR/DIOW to end cycle */
  186. static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
  187. /* DIOR/DIOW asserted pulse width */
  188. static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
  189. /**
  190. * PIO timing table
  191. */
  192. /* mode: 0 1 2 3 4 */
  193. /* Cycle Time */
  194. static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
  195. /* Address valid to DIOR/DIORW */
  196. static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
  197. /* DIOR/DIOW to end cycle */
  198. static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
  199. /* DIOR/DIOW asserted pulse width */
  200. static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
  201. /* DIOW data hold */
  202. static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
  203. /* ******************************************************************
  204. * Multiword DMA timing table
  205. * ******************************************************************
  206. */
  207. /* mode: 0 1 2 */
  208. /* Cycle Time */
  209. static const u32 mdma_t0min[] = { 480, 150, 120 };
  210. /* DIOR/DIOW asserted pulse width */
  211. static const u32 mdma_tdmin[] = { 215, 80, 70 };
  212. /* DMACK to read data released */
  213. static const u32 mdma_thmin[] = { 20, 15, 10 };
  214. /* DIOR/DIOW to DMACK hold */
  215. static const u32 mdma_tjmin[] = { 20, 5, 5 };
  216. /* DIOR negated pulse width */
  217. static const u32 mdma_tkrmin[] = { 50, 50, 25 };
  218. /* DIOR negated pulse width */
  219. static const u32 mdma_tkwmin[] = { 215, 50, 25 };
  220. /* CS[1:0] valid to DIOR/DIOW */
  221. static const u32 mdma_tmmin[] = { 50, 30, 25 };
  222. /* DMACK to read data released */
  223. static const u32 mdma_tzmax[] = { 20, 25, 25 };
  224. /**
  225. * Ultra DMA timing table
  226. */
  227. /* mode: 0 1 2 3 4 5 */
  228. static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
  229. static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
  230. static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
  231. static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
  232. static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
  233. static const u32 udma_tmlimin = 20;
  234. static const u32 udma_tzahmin = 20;
  235. static const u32 udma_tenvmin = 20;
  236. static const u32 udma_tackmin = 20;
  237. static const u32 udma_tssmin = 50;
  238. /**
  239. *
  240. * Function: num_clocks_min
  241. *
  242. * Description:
  243. * calculate number of SCLK cycles to meet minimum timing
  244. */
  245. static unsigned short num_clocks_min(unsigned long tmin,
  246. unsigned long fsclk)
  247. {
  248. unsigned long tmp ;
  249. unsigned short result;
  250. tmp = tmin * (fsclk/1000/1000) / 1000;
  251. result = (unsigned short)tmp;
  252. if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
  253. result++;
  254. }
  255. return result;
  256. }
  257. /**
  258. * bfin_set_piomode - Initialize host controller PATA PIO timings
  259. * @ap: Port whose timings we are configuring
  260. * @adev: um
  261. *
  262. * Set PIO mode for device.
  263. *
  264. * LOCKING:
  265. * None (inherited from caller).
  266. */
  267. static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
  268. {
  269. int mode = adev->pio_mode - XFER_PIO_0;
  270. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  271. unsigned int fsclk = get_sclk();
  272. unsigned short teoc_reg, t2_reg, teoc_pio;
  273. unsigned short t4_reg, t2_pio, t1_reg;
  274. unsigned short n0, n6, t6min = 5;
  275. /* the most restrictive timing value is t6 and tc, the DIOW - data hold
  276. * If one SCLK pulse is longer than this minimum value then register
  277. * transfers cannot be supported at this frequency.
  278. */
  279. n6 = num_clocks_min(t6min, fsclk);
  280. if (mode >= 0 && mode <= 4 && n6 >= 1) {
  281. dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
  282. /* calculate the timing values for register transfers. */
  283. while (mode > 0 && pio_fsclk[mode] > fsclk)
  284. mode--;
  285. /* DIOR/DIOW to end cycle time */
  286. t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
  287. /* DIOR/DIOW asserted pulse width */
  288. teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
  289. /* Cycle Time */
  290. n0 = num_clocks_min(reg_t0min[mode], fsclk);
  291. /* increase t2 until we meed the minimum cycle length */
  292. if (t2_reg + teoc_reg < n0)
  293. t2_reg = n0 - teoc_reg;
  294. /* calculate the timing values for pio transfers. */
  295. /* DIOR/DIOW to end cycle time */
  296. t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
  297. /* DIOR/DIOW asserted pulse width */
  298. teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
  299. /* Cycle Time */
  300. n0 = num_clocks_min(pio_t0min[mode], fsclk);
  301. /* increase t2 until we meed the minimum cycle length */
  302. if (t2_pio + teoc_pio < n0)
  303. t2_pio = n0 - teoc_pio;
  304. /* Address valid to DIOR/DIORW */
  305. t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
  306. /* DIOW data hold */
  307. t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
  308. ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
  309. ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
  310. ATAPI_SET_PIO_TIM_1(base, teoc_pio);
  311. if (mode > 2) {
  312. ATAPI_SET_CONTROL(base,
  313. ATAPI_GET_CONTROL(base) | IORDY_EN);
  314. } else {
  315. ATAPI_SET_CONTROL(base,
  316. ATAPI_GET_CONTROL(base) & ~IORDY_EN);
  317. }
  318. /* Disable host ATAPI PIO interrupts */
  319. ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
  320. & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
  321. SSYNC();
  322. }
  323. }
  324. /**
  325. * bfin_set_dmamode - Initialize host controller PATA DMA timings
  326. * @ap: Port whose timings we are configuring
  327. * @adev: um
  328. * @udma: udma mode, 0 - 6
  329. *
  330. * Set UDMA mode for device.
  331. *
  332. * LOCKING:
  333. * None (inherited from caller).
  334. */
  335. static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  336. {
  337. int mode;
  338. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  339. unsigned long fsclk = get_sclk();
  340. unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
  341. unsigned short tm, td, tkr, tkw, teoc, th;
  342. unsigned short n0, nf, tfmin = 5;
  343. unsigned short nmin, tcyc;
  344. mode = adev->dma_mode - XFER_UDMA_0;
  345. if (mode >= 0 && mode <= 5) {
  346. dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode);
  347. /* the most restrictive timing value is t6 and tc,
  348. * the DIOW - data hold. If one SCLK pulse is longer
  349. * than this minimum value then register
  350. * transfers cannot be supported at this frequency.
  351. */
  352. while (mode > 0 && udma_fsclk[mode] > fsclk)
  353. mode--;
  354. nmin = num_clocks_min(udma_tmin[mode], fsclk);
  355. if (nmin >= 1) {
  356. /* calculate the timing values for Ultra DMA. */
  357. tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
  358. tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
  359. tcyc_tdvs = 2;
  360. /* increase tcyc - tdvs (tcyc_tdvs) until we meed
  361. * the minimum cycle length
  362. */
  363. if (tdvs + tcyc_tdvs < tcyc)
  364. tcyc_tdvs = tcyc - tdvs;
  365. /* Mow assign the values required for the timing
  366. * registers
  367. */
  368. if (tcyc_tdvs < 2)
  369. tcyc_tdvs = 2;
  370. if (tdvs < 2)
  371. tdvs = 2;
  372. tack = num_clocks_min(udma_tackmin, fsclk);
  373. tss = num_clocks_min(udma_tssmin, fsclk);
  374. tmli = num_clocks_min(udma_tmlimin, fsclk);
  375. tzah = num_clocks_min(udma_tzahmin, fsclk);
  376. trp = num_clocks_min(udma_trpmin[mode], fsclk);
  377. tenv = num_clocks_min(udma_tenvmin, fsclk);
  378. if (tenv <= udma_tenvmax[mode]) {
  379. ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
  380. ATAPI_SET_ULTRA_TIM_1(base,
  381. (tcyc_tdvs<<8 | tdvs));
  382. ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
  383. ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
  384. /* Enable host ATAPI Untra DMA interrupts */
  385. ATAPI_SET_INT_MASK(base,
  386. ATAPI_GET_INT_MASK(base)
  387. | UDMAIN_DONE_MASK
  388. | UDMAOUT_DONE_MASK
  389. | UDMAIN_TERM_MASK
  390. | UDMAOUT_TERM_MASK);
  391. }
  392. }
  393. }
  394. mode = adev->dma_mode - XFER_MW_DMA_0;
  395. if (mode >= 0 && mode <= 2) {
  396. dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode);
  397. /* the most restrictive timing value is tf, the DMACK to
  398. * read data released. If one SCLK pulse is longer than
  399. * this maximum value then the MDMA mode
  400. * cannot be supported at this frequency.
  401. */
  402. while (mode > 0 && mdma_fsclk[mode] > fsclk)
  403. mode--;
  404. nf = num_clocks_min(tfmin, fsclk);
  405. if (nf >= 1) {
  406. /* calculate the timing values for Multi-word DMA. */
  407. /* DIOR/DIOW asserted pulse width */
  408. td = num_clocks_min(mdma_tdmin[mode], fsclk);
  409. /* DIOR negated pulse width */
  410. tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
  411. /* Cycle Time */
  412. n0 = num_clocks_min(mdma_t0min[mode], fsclk);
  413. /* increase tk until we meed the minimum cycle length */
  414. if (tkw + td < n0)
  415. tkw = n0 - td;
  416. /* DIOR negated pulse width - read */
  417. tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
  418. /* CS{1:0] valid to DIOR/DIOW */
  419. tm = num_clocks_min(mdma_tmmin[mode], fsclk);
  420. /* DIOR/DIOW to DMACK hold */
  421. teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
  422. /* DIOW Data hold */
  423. th = num_clocks_min(mdma_thmin[mode], fsclk);
  424. ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
  425. ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
  426. ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
  427. /* Enable host ATAPI Multi DMA interrupts */
  428. ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
  429. | MULTI_DONE_MASK | MULTI_TERM_MASK);
  430. SSYNC();
  431. }
  432. }
  433. return;
  434. }
  435. /**
  436. *
  437. * Function: wait_complete
  438. *
  439. * Description: Waits the interrupt from device
  440. *
  441. */
  442. static inline void wait_complete(void __iomem *base, unsigned short mask)
  443. {
  444. unsigned short status;
  445. unsigned int i = 0;
  446. #define PATA_BF54X_WAIT_TIMEOUT 10000
  447. for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
  448. status = ATAPI_GET_INT_STATUS(base) & mask;
  449. if (status)
  450. break;
  451. }
  452. ATAPI_SET_INT_STATUS(base, mask);
  453. }
  454. /**
  455. *
  456. * Function: write_atapi_register
  457. *
  458. * Description: Writes to ATA Device Resgister
  459. *
  460. */
  461. static void write_atapi_register(void __iomem *base,
  462. unsigned long ata_reg, unsigned short value)
  463. {
  464. /* Program the ATA_DEV_TXBUF register with write data (to be
  465. * written into the device).
  466. */
  467. ATAPI_SET_DEV_TXBUF(base, value);
  468. /* Program the ATA_DEV_ADDR register with address of the
  469. * device register (0x01 to 0x0F).
  470. */
  471. ATAPI_SET_DEV_ADDR(base, ata_reg);
  472. /* Program the ATA_CTRL register with dir set to write (1)
  473. */
  474. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
  475. /* ensure PIO DMA is not set */
  476. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
  477. /* and start the transfer */
  478. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
  479. /* Wait for the interrupt to indicate the end of the transfer.
  480. * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
  481. */
  482. wait_complete(base, PIO_DONE_INT);
  483. }
  484. /**
  485. *
  486. * Function: read_atapi_register
  487. *
  488. *Description: Reads from ATA Device Resgister
  489. *
  490. */
  491. static unsigned short read_atapi_register(void __iomem *base,
  492. unsigned long ata_reg)
  493. {
  494. /* Program the ATA_DEV_ADDR register with address of the
  495. * device register (0x01 to 0x0F).
  496. */
  497. ATAPI_SET_DEV_ADDR(base, ata_reg);
  498. /* Program the ATA_CTRL register with dir set to read (0) and
  499. */
  500. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
  501. /* ensure PIO DMA is not set */
  502. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
  503. /* and start the transfer */
  504. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
  505. /* Wait for the interrupt to indicate the end of the transfer.
  506. * (PIO_DONE interrupt is set and it doesn't seem to matter
  507. * that we don't clear it)
  508. */
  509. wait_complete(base, PIO_DONE_INT);
  510. /* Read the ATA_DEV_RXBUF register with write data (to be
  511. * written into the device).
  512. */
  513. return ATAPI_GET_DEV_RXBUF(base);
  514. }
  515. /**
  516. *
  517. * Function: write_atapi_register_data
  518. *
  519. * Description: Writes to ATA Device Resgister
  520. *
  521. */
  522. static void write_atapi_data(void __iomem *base,
  523. int len, unsigned short *buf)
  524. {
  525. int i;
  526. /* Set transfer length to 1 */
  527. ATAPI_SET_XFER_LEN(base, 1);
  528. /* Program the ATA_DEV_ADDR register with address of the
  529. * ATA_REG_DATA
  530. */
  531. ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
  532. /* Program the ATA_CTRL register with dir set to write (1)
  533. */
  534. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
  535. /* ensure PIO DMA is not set */
  536. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
  537. for (i = 0; i < len; i++) {
  538. /* Program the ATA_DEV_TXBUF register with write data (to be
  539. * written into the device).
  540. */
  541. ATAPI_SET_DEV_TXBUF(base, buf[i]);
  542. /* and start the transfer */
  543. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
  544. /* Wait for the interrupt to indicate the end of the transfer.
  545. * (We need to wait on and clear rhe ATA_DEV_INT
  546. * interrupt status)
  547. */
  548. wait_complete(base, PIO_DONE_INT);
  549. }
  550. }
  551. /**
  552. *
  553. * Function: read_atapi_register_data
  554. *
  555. * Description: Reads from ATA Device Resgister
  556. *
  557. */
  558. static void read_atapi_data(void __iomem *base,
  559. int len, unsigned short *buf)
  560. {
  561. int i;
  562. /* Set transfer length to 1 */
  563. ATAPI_SET_XFER_LEN(base, 1);
  564. /* Program the ATA_DEV_ADDR register with address of the
  565. * ATA_REG_DATA
  566. */
  567. ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
  568. /* Program the ATA_CTRL register with dir set to read (0) and
  569. */
  570. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
  571. /* ensure PIO DMA is not set */
  572. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
  573. for (i = 0; i < len; i++) {
  574. /* and start the transfer */
  575. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
  576. /* Wait for the interrupt to indicate the end of the transfer.
  577. * (PIO_DONE interrupt is set and it doesn't seem to matter
  578. * that we don't clear it)
  579. */
  580. wait_complete(base, PIO_DONE_INT);
  581. /* Read the ATA_DEV_RXBUF register with write data (to be
  582. * written into the device).
  583. */
  584. buf[i] = ATAPI_GET_DEV_RXBUF(base);
  585. }
  586. }
  587. /**
  588. * bfin_tf_load - send taskfile registers to host controller
  589. * @ap: Port to which output is sent
  590. * @tf: ATA taskfile register set
  591. *
  592. * Note: Original code is ata_sff_tf_load().
  593. */
  594. static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
  595. {
  596. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  597. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  598. if (tf->ctl != ap->last_ctl) {
  599. write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
  600. ap->last_ctl = tf->ctl;
  601. ata_wait_idle(ap);
  602. }
  603. if (is_addr) {
  604. if (tf->flags & ATA_TFLAG_LBA48) {
  605. write_atapi_register(base, ATA_REG_FEATURE,
  606. tf->hob_feature);
  607. write_atapi_register(base, ATA_REG_NSECT,
  608. tf->hob_nsect);
  609. write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
  610. write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
  611. write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
  612. dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X "
  613. "0x%X 0x%X\n",
  614. tf->hob_feature,
  615. tf->hob_nsect,
  616. tf->hob_lbal,
  617. tf->hob_lbam,
  618. tf->hob_lbah);
  619. }
  620. write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
  621. write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
  622. write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
  623. write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
  624. write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
  625. dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  626. tf->feature,
  627. tf->nsect,
  628. tf->lbal,
  629. tf->lbam,
  630. tf->lbah);
  631. }
  632. if (tf->flags & ATA_TFLAG_DEVICE) {
  633. write_atapi_register(base, ATA_REG_DEVICE, tf->device);
  634. dev_dbg(ap->dev, "device 0x%X\n", tf->device);
  635. }
  636. ata_wait_idle(ap);
  637. }
  638. /**
  639. * bfin_check_status - Read device status reg & clear interrupt
  640. * @ap: port where the device is
  641. *
  642. * Note: Original code is ata_check_status().
  643. */
  644. static u8 bfin_check_status(struct ata_port *ap)
  645. {
  646. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  647. return read_atapi_register(base, ATA_REG_STATUS);
  648. }
  649. /**
  650. * bfin_tf_read - input device's ATA taskfile shadow registers
  651. * @ap: Port from which input is read
  652. * @tf: ATA taskfile register set for storing input
  653. *
  654. * Note: Original code is ata_sff_tf_read().
  655. */
  656. static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  657. {
  658. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  659. tf->command = bfin_check_status(ap);
  660. tf->feature = read_atapi_register(base, ATA_REG_ERR);
  661. tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
  662. tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
  663. tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
  664. tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
  665. tf->device = read_atapi_register(base, ATA_REG_DEVICE);
  666. if (tf->flags & ATA_TFLAG_LBA48) {
  667. write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
  668. tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
  669. tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
  670. tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
  671. tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
  672. tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
  673. }
  674. }
  675. /**
  676. * bfin_exec_command - issue ATA command to host controller
  677. * @ap: port to which command is being issued
  678. * @tf: ATA taskfile register set
  679. *
  680. * Note: Original code is ata_sff_exec_command().
  681. */
  682. static void bfin_exec_command(struct ata_port *ap,
  683. const struct ata_taskfile *tf)
  684. {
  685. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  686. dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
  687. write_atapi_register(base, ATA_REG_CMD, tf->command);
  688. ata_sff_pause(ap);
  689. }
  690. /**
  691. * bfin_check_altstatus - Read device alternate status reg
  692. * @ap: port where the device is
  693. */
  694. static u8 bfin_check_altstatus(struct ata_port *ap)
  695. {
  696. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  697. return read_atapi_register(base, ATA_REG_ALTSTATUS);
  698. }
  699. /**
  700. * bfin_dev_select - Select device 0/1 on ATA bus
  701. * @ap: ATA channel to manipulate
  702. * @device: ATA device (numbered from zero) to select
  703. *
  704. * Note: Original code is ata_sff_dev_select().
  705. */
  706. static void bfin_dev_select(struct ata_port *ap, unsigned int device)
  707. {
  708. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  709. u8 tmp;
  710. if (device == 0)
  711. tmp = ATA_DEVICE_OBS;
  712. else
  713. tmp = ATA_DEVICE_OBS | ATA_DEV1;
  714. write_atapi_register(base, ATA_REG_DEVICE, tmp);
  715. ata_sff_pause(ap);
  716. }
  717. /**
  718. * bfin_bmdma_setup - Set up IDE DMA transaction
  719. * @qc: Info associated with this ATA transaction.
  720. *
  721. * Note: Original code is ata_bmdma_setup().
  722. */
  723. static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
  724. {
  725. unsigned short config = WDSIZE_16;
  726. struct scatterlist *sg;
  727. unsigned int si;
  728. dev_dbg(qc->ap->dev, "in atapi dma setup\n");
  729. /* Program the ATA_CTRL register with dir */
  730. if (qc->tf.flags & ATA_TFLAG_WRITE) {
  731. /* fill the ATAPI DMA controller */
  732. set_dma_config(CH_ATAPI_TX, config);
  733. set_dma_x_modify(CH_ATAPI_TX, 2);
  734. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  735. set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
  736. set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
  737. }
  738. } else {
  739. config |= WNR;
  740. /* fill the ATAPI DMA controller */
  741. set_dma_config(CH_ATAPI_RX, config);
  742. set_dma_x_modify(CH_ATAPI_RX, 2);
  743. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  744. set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
  745. set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
  746. }
  747. }
  748. }
  749. /**
  750. * bfin_bmdma_start - Start an IDE DMA transaction
  751. * @qc: Info associated with this ATA transaction.
  752. *
  753. * Note: Original code is ata_bmdma_start().
  754. */
  755. static void bfin_bmdma_start(struct ata_queued_cmd *qc)
  756. {
  757. struct ata_port *ap = qc->ap;
  758. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  759. struct scatterlist *sg;
  760. unsigned int si;
  761. dev_dbg(qc->ap->dev, "in atapi dma start\n");
  762. if (!(ap->udma_mask || ap->mwdma_mask))
  763. return;
  764. /* start ATAPI DMA controller*/
  765. if (qc->tf.flags & ATA_TFLAG_WRITE) {
  766. /*
  767. * On blackfin arch, uncacheable memory is not
  768. * allocated with flag GFP_DMA. DMA buffer from
  769. * common kenel code should be flushed if WB
  770. * data cache is enabled. Otherwise, this loop
  771. * is an empty loop and optimized out.
  772. */
  773. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  774. flush_dcache_range(sg_dma_address(sg),
  775. sg_dma_address(sg) + sg_dma_len(sg));
  776. }
  777. enable_dma(CH_ATAPI_TX);
  778. dev_dbg(qc->ap->dev, "enable udma write\n");
  779. /* Send ATA DMA write command */
  780. bfin_exec_command(ap, &qc->tf);
  781. /* set ATA DMA write direction */
  782. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
  783. | XFER_DIR));
  784. } else {
  785. enable_dma(CH_ATAPI_RX);
  786. dev_dbg(qc->ap->dev, "enable udma read\n");
  787. /* Send ATA DMA read command */
  788. bfin_exec_command(ap, &qc->tf);
  789. /* set ATA DMA read direction */
  790. ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
  791. & ~XFER_DIR));
  792. }
  793. /* Reset all transfer count */
  794. ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
  795. /* Set ATAPI state machine contorl in terminate sequence */
  796. ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
  797. /* Set transfer length to buffer len */
  798. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  799. ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
  800. }
  801. /* Enable ATA DMA operation*/
  802. if (ap->udma_mask)
  803. ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
  804. | ULTRA_START);
  805. else
  806. ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
  807. | MULTI_START);
  808. }
  809. /**
  810. * bfin_bmdma_stop - Stop IDE DMA transfer
  811. * @qc: Command we are ending DMA for
  812. */
  813. static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
  814. {
  815. struct ata_port *ap = qc->ap;
  816. struct scatterlist *sg;
  817. unsigned int si;
  818. dev_dbg(qc->ap->dev, "in atapi dma stop\n");
  819. if (!(ap->udma_mask || ap->mwdma_mask))
  820. return;
  821. /* stop ATAPI DMA controller*/
  822. if (qc->tf.flags & ATA_TFLAG_WRITE)
  823. disable_dma(CH_ATAPI_TX);
  824. else {
  825. disable_dma(CH_ATAPI_RX);
  826. if (ap->hsm_task_state & HSM_ST_LAST) {
  827. /*
  828. * On blackfin arch, uncacheable memory is not
  829. * allocated with flag GFP_DMA. DMA buffer from
  830. * common kenel code should be invalidated if
  831. * data cache is enabled. Otherwise, this loop
  832. * is an empty loop and optimized out.
  833. */
  834. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  835. invalidate_dcache_range(
  836. sg_dma_address(sg),
  837. sg_dma_address(sg)
  838. + sg_dma_len(sg));
  839. }
  840. }
  841. }
  842. }
  843. /**
  844. * bfin_devchk - PATA device presence detection
  845. * @ap: ATA channel to examine
  846. * @device: Device to examine (starting at zero)
  847. *
  848. * Note: Original code is ata_devchk().
  849. */
  850. static unsigned int bfin_devchk(struct ata_port *ap,
  851. unsigned int device)
  852. {
  853. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  854. u8 nsect, lbal;
  855. bfin_dev_select(ap, device);
  856. write_atapi_register(base, ATA_REG_NSECT, 0x55);
  857. write_atapi_register(base, ATA_REG_LBAL, 0xaa);
  858. write_atapi_register(base, ATA_REG_NSECT, 0xaa);
  859. write_atapi_register(base, ATA_REG_LBAL, 0x55);
  860. write_atapi_register(base, ATA_REG_NSECT, 0x55);
  861. write_atapi_register(base, ATA_REG_LBAL, 0xaa);
  862. nsect = read_atapi_register(base, ATA_REG_NSECT);
  863. lbal = read_atapi_register(base, ATA_REG_LBAL);
  864. if ((nsect == 0x55) && (lbal == 0xaa))
  865. return 1; /* we found a device */
  866. return 0; /* nothing found */
  867. }
  868. /**
  869. * bfin_bus_post_reset - PATA device post reset
  870. *
  871. * Note: Original code is ata_bus_post_reset().
  872. */
  873. static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
  874. {
  875. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  876. unsigned int dev0 = devmask & (1 << 0);
  877. unsigned int dev1 = devmask & (1 << 1);
  878. unsigned long deadline;
  879. /* if device 0 was found in ata_devchk, wait for its
  880. * BSY bit to clear
  881. */
  882. if (dev0)
  883. ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
  884. /* if device 1 was found in ata_devchk, wait for
  885. * register access, then wait for BSY to clear
  886. */
  887. deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
  888. while (dev1) {
  889. u8 nsect, lbal;
  890. bfin_dev_select(ap, 1);
  891. nsect = read_atapi_register(base, ATA_REG_NSECT);
  892. lbal = read_atapi_register(base, ATA_REG_LBAL);
  893. if ((nsect == 1) && (lbal == 1))
  894. break;
  895. if (time_after(jiffies, deadline)) {
  896. dev1 = 0;
  897. break;
  898. }
  899. msleep(50); /* give drive a breather */
  900. }
  901. if (dev1)
  902. ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
  903. /* is all this really necessary? */
  904. bfin_dev_select(ap, 0);
  905. if (dev1)
  906. bfin_dev_select(ap, 1);
  907. if (dev0)
  908. bfin_dev_select(ap, 0);
  909. }
  910. /**
  911. * bfin_bus_softreset - PATA device software reset
  912. *
  913. * Note: Original code is ata_bus_softreset().
  914. */
  915. static unsigned int bfin_bus_softreset(struct ata_port *ap,
  916. unsigned int devmask)
  917. {
  918. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  919. /* software reset. causes dev0 to be selected */
  920. write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
  921. udelay(20);
  922. write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
  923. udelay(20);
  924. write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
  925. /* spec mandates ">= 2ms" before checking status.
  926. * We wait 150ms, because that was the magic delay used for
  927. * ATAPI devices in Hale Landis's ATADRVR, for the period of time
  928. * between when the ATA command register is written, and then
  929. * status is checked. Because waiting for "a while" before
  930. * checking status is fine, post SRST, we perform this magic
  931. * delay here as well.
  932. *
  933. * Old drivers/ide uses the 2mS rule and then waits for ready
  934. */
  935. msleep(150);
  936. /* Before we perform post reset processing we want to see if
  937. * the bus shows 0xFF because the odd clown forgets the D7
  938. * pulldown resistor.
  939. */
  940. if (bfin_check_status(ap) == 0xFF)
  941. return 0;
  942. bfin_bus_post_reset(ap, devmask);
  943. return 0;
  944. }
  945. /**
  946. * bfin_softreset - reset host port via ATA SRST
  947. * @ap: port to reset
  948. * @classes: resulting classes of attached devices
  949. *
  950. * Note: Original code is ata_sff_softreset().
  951. */
  952. static int bfin_softreset(struct ata_link *link, unsigned int *classes,
  953. unsigned long deadline)
  954. {
  955. struct ata_port *ap = link->ap;
  956. unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  957. unsigned int devmask = 0, err_mask;
  958. u8 err;
  959. /* determine if device 0/1 are present */
  960. if (bfin_devchk(ap, 0))
  961. devmask |= (1 << 0);
  962. if (slave_possible && bfin_devchk(ap, 1))
  963. devmask |= (1 << 1);
  964. /* select device 0 again */
  965. bfin_dev_select(ap, 0);
  966. /* issue bus reset */
  967. err_mask = bfin_bus_softreset(ap, devmask);
  968. if (err_mask) {
  969. ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
  970. err_mask);
  971. return -EIO;
  972. }
  973. /* determine by signature whether we have ATA or ATAPI devices */
  974. classes[0] = ata_sff_dev_classify(&ap->link.device[0],
  975. devmask & (1 << 0), &err);
  976. if (slave_possible && err != 0x81)
  977. classes[1] = ata_sff_dev_classify(&ap->link.device[1],
  978. devmask & (1 << 1), &err);
  979. return 0;
  980. }
  981. /**
  982. * bfin_bmdma_status - Read IDE DMA status
  983. * @ap: Port associated with this ATA transaction.
  984. */
  985. static unsigned char bfin_bmdma_status(struct ata_port *ap)
  986. {
  987. unsigned char host_stat = 0;
  988. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  989. unsigned short int_status = ATAPI_GET_INT_STATUS(base);
  990. if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
  991. host_stat |= ATA_DMA_ACTIVE;
  992. if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
  993. ATAPI_DEV_INT))
  994. host_stat |= ATA_DMA_INTR;
  995. if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
  996. host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
  997. dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
  998. return host_stat;
  999. }
  1000. /**
  1001. * bfin_data_xfer - Transfer data by PIO
  1002. * @adev: device for this I/O
  1003. * @buf: data buffer
  1004. * @buflen: buffer length
  1005. * @write_data: read/write
  1006. *
  1007. * Note: Original code is ata_sff_data_xfer().
  1008. */
  1009. static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
  1010. unsigned int buflen, int rw)
  1011. {
  1012. struct ata_port *ap = dev->link->ap;
  1013. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  1014. unsigned int words = buflen >> 1;
  1015. unsigned short *buf16 = (u16 *)buf;
  1016. /* Transfer multiple of 2 bytes */
  1017. if (rw == READ)
  1018. read_atapi_data(base, words, buf16);
  1019. else
  1020. write_atapi_data(base, words, buf16);
  1021. /* Transfer trailing 1 byte, if any. */
  1022. if (unlikely(buflen & 0x01)) {
  1023. unsigned short align_buf[1] = { 0 };
  1024. unsigned char *trailing_buf = buf + buflen - 1;
  1025. if (rw == READ) {
  1026. read_atapi_data(base, 1, align_buf);
  1027. memcpy(trailing_buf, align_buf, 1);
  1028. } else {
  1029. memcpy(align_buf, trailing_buf, 1);
  1030. write_atapi_data(base, 1, align_buf);
  1031. }
  1032. words++;
  1033. }
  1034. return words << 1;
  1035. }
  1036. /**
  1037. * bfin_irq_clear - Clear ATAPI interrupt.
  1038. * @ap: Port associated with this ATA transaction.
  1039. *
  1040. * Note: Original code is ata_sff_irq_clear().
  1041. */
  1042. static void bfin_irq_clear(struct ata_port *ap)
  1043. {
  1044. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  1045. dev_dbg(ap->dev, "in atapi irq clear\n");
  1046. ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
  1047. | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
  1048. | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
  1049. }
  1050. /**
  1051. * bfin_irq_on - Enable interrupts on a port.
  1052. * @ap: Port on which interrupts are enabled.
  1053. *
  1054. * Note: Original code is ata_sff_irq_on().
  1055. */
  1056. static unsigned char bfin_irq_on(struct ata_port *ap)
  1057. {
  1058. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  1059. u8 tmp;
  1060. dev_dbg(ap->dev, "in atapi irq on\n");
  1061. ap->ctl &= ~ATA_NIEN;
  1062. ap->last_ctl = ap->ctl;
  1063. write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
  1064. tmp = ata_wait_idle(ap);
  1065. bfin_irq_clear(ap);
  1066. return tmp;
  1067. }
  1068. /**
  1069. * bfin_freeze - Freeze DMA controller port
  1070. * @ap: port to freeze
  1071. *
  1072. * Note: Original code is ata_sff_freeze().
  1073. */
  1074. static void bfin_freeze(struct ata_port *ap)
  1075. {
  1076. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  1077. dev_dbg(ap->dev, "in atapi dma freeze\n");
  1078. ap->ctl |= ATA_NIEN;
  1079. ap->last_ctl = ap->ctl;
  1080. write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
  1081. /* Under certain circumstances, some controllers raise IRQ on
  1082. * ATA_NIEN manipulation. Also, many controllers fail to mask
  1083. * previously pending IRQ on ATA_NIEN assertion. Clear it.
  1084. */
  1085. ap->ops->sff_check_status(ap);
  1086. bfin_irq_clear(ap);
  1087. }
  1088. /**
  1089. * bfin_thaw - Thaw DMA controller port
  1090. * @ap: port to thaw
  1091. *
  1092. * Note: Original code is ata_sff_thaw().
  1093. */
  1094. void bfin_thaw(struct ata_port *ap)
  1095. {
  1096. dev_dbg(ap->dev, "in atapi dma thaw\n");
  1097. bfin_check_status(ap);
  1098. bfin_irq_on(ap);
  1099. }
  1100. /**
  1101. * bfin_postreset - standard postreset callback
  1102. * @ap: the target ata_port
  1103. * @classes: classes of attached devices
  1104. *
  1105. * Note: Original code is ata_sff_postreset().
  1106. */
  1107. static void bfin_postreset(struct ata_link *link, unsigned int *classes)
  1108. {
  1109. struct ata_port *ap = link->ap;
  1110. void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
  1111. /* re-enable interrupts */
  1112. bfin_irq_on(ap);
  1113. /* is double-select really necessary? */
  1114. if (classes[0] != ATA_DEV_NONE)
  1115. bfin_dev_select(ap, 1);
  1116. if (classes[1] != ATA_DEV_NONE)
  1117. bfin_dev_select(ap, 0);
  1118. /* bail out if no device is present */
  1119. if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
  1120. return;
  1121. }
  1122. /* set up device control */
  1123. write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
  1124. }
  1125. static void bfin_port_stop(struct ata_port *ap)
  1126. {
  1127. dev_dbg(ap->dev, "in atapi port stop\n");
  1128. if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
  1129. free_dma(CH_ATAPI_RX);
  1130. free_dma(CH_ATAPI_TX);
  1131. }
  1132. }
  1133. static int bfin_port_start(struct ata_port *ap)
  1134. {
  1135. dev_dbg(ap->dev, "in atapi port start\n");
  1136. if (!(ap->udma_mask || ap->mwdma_mask))
  1137. return 0;
  1138. if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
  1139. if (request_dma(CH_ATAPI_TX,
  1140. "BFIN ATAPI TX DMA") >= 0)
  1141. return 0;
  1142. free_dma(CH_ATAPI_RX);
  1143. }
  1144. ap->udma_mask = 0;
  1145. ap->mwdma_mask = 0;
  1146. dev_err(ap->dev, "Unable to request ATAPI DMA!"
  1147. " Continue in PIO mode.\n");
  1148. return 0;
  1149. }
  1150. static unsigned int bfin_ata_host_intr(struct ata_port *ap,
  1151. struct ata_queued_cmd *qc)
  1152. {
  1153. struct ata_eh_info *ehi = &ap->link.eh_info;
  1154. u8 status, host_stat = 0;
  1155. VPRINTK("ata%u: protocol %d task_state %d\n",
  1156. ap->print_id, qc->tf.protocol, ap->hsm_task_state);
  1157. /* Check whether we are expecting interrupt in this state */
  1158. switch (ap->hsm_task_state) {
  1159. case HSM_ST_FIRST:
  1160. /* Some pre-ATAPI-4 devices assert INTRQ
  1161. * at this state when ready to receive CDB.
  1162. */
  1163. /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
  1164. * The flag was turned on only for atapi devices.
  1165. * No need to check is_atapi_taskfile(&qc->tf) again.
  1166. */
  1167. if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  1168. goto idle_irq;
  1169. break;
  1170. case HSM_ST_LAST:
  1171. if (qc->tf.protocol == ATA_PROT_DMA ||
  1172. qc->tf.protocol == ATAPI_PROT_DMA) {
  1173. /* check status of DMA engine */
  1174. host_stat = ap->ops->bmdma_status(ap);
  1175. VPRINTK("ata%u: host_stat 0x%X\n",
  1176. ap->print_id, host_stat);
  1177. /* if it's not our irq... */
  1178. if (!(host_stat & ATA_DMA_INTR))
  1179. goto idle_irq;
  1180. /* before we do anything else, clear DMA-Start bit */
  1181. ap->ops->bmdma_stop(qc);
  1182. if (unlikely(host_stat & ATA_DMA_ERR)) {
  1183. /* error when transfering data to/from memory */
  1184. qc->err_mask |= AC_ERR_HOST_BUS;
  1185. ap->hsm_task_state = HSM_ST_ERR;
  1186. }
  1187. }
  1188. break;
  1189. case HSM_ST:
  1190. break;
  1191. default:
  1192. goto idle_irq;
  1193. }
  1194. /* check altstatus */
  1195. status = ap->ops->sff_check_altstatus(ap);
  1196. if (status & ATA_BUSY)
  1197. goto busy_ata;
  1198. /* check main status, clearing INTRQ */
  1199. status = ap->ops->sff_check_status(ap);
  1200. if (unlikely(status & ATA_BUSY))
  1201. goto busy_ata;
  1202. /* ack bmdma irq events */
  1203. ap->ops->sff_irq_clear(ap);
  1204. ata_sff_hsm_move(ap, qc, status, 0);
  1205. if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
  1206. qc->tf.protocol == ATAPI_PROT_DMA))
  1207. ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
  1208. busy_ata:
  1209. return 1; /* irq handled */
  1210. idle_irq:
  1211. ap->stats.idle_irq++;
  1212. #ifdef ATA_IRQ_TRAP
  1213. if ((ap->stats.idle_irq % 1000) == 0) {
  1214. ap->ops->irq_ack(ap, 0); /* debug trap */
  1215. ata_port_printk(ap, KERN_WARNING, "irq trap\n");
  1216. return 1;
  1217. }
  1218. #endif
  1219. return 0; /* irq not handled */
  1220. }
  1221. static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
  1222. {
  1223. struct ata_host *host = dev_instance;
  1224. unsigned int i;
  1225. unsigned int handled = 0;
  1226. unsigned long flags;
  1227. /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
  1228. spin_lock_irqsave(&host->lock, flags);
  1229. for (i = 0; i < host->n_ports; i++) {
  1230. struct ata_port *ap;
  1231. ap = host->ports[i];
  1232. if (ap &&
  1233. !(ap->flags & ATA_FLAG_DISABLED)) {
  1234. struct ata_queued_cmd *qc;
  1235. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1236. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
  1237. (qc->flags & ATA_QCFLAG_ACTIVE))
  1238. handled |= bfin_ata_host_intr(ap, qc);
  1239. }
  1240. }
  1241. spin_unlock_irqrestore(&host->lock, flags);
  1242. return IRQ_RETVAL(handled);
  1243. }
  1244. static struct scsi_host_template bfin_sht = {
  1245. ATA_BASE_SHT(DRV_NAME),
  1246. .sg_tablesize = SG_NONE,
  1247. .dma_boundary = ATA_DMA_BOUNDARY,
  1248. };
  1249. static struct ata_port_operations bfin_pata_ops = {
  1250. .inherits = &ata_sff_port_ops,
  1251. .set_piomode = bfin_set_piomode,
  1252. .set_dmamode = bfin_set_dmamode,
  1253. .sff_tf_load = bfin_tf_load,
  1254. .sff_tf_read = bfin_tf_read,
  1255. .sff_exec_command = bfin_exec_command,
  1256. .sff_check_status = bfin_check_status,
  1257. .sff_check_altstatus = bfin_check_altstatus,
  1258. .sff_dev_select = bfin_dev_select,
  1259. .bmdma_setup = bfin_bmdma_setup,
  1260. .bmdma_start = bfin_bmdma_start,
  1261. .bmdma_stop = bfin_bmdma_stop,
  1262. .bmdma_status = bfin_bmdma_status,
  1263. .sff_data_xfer = bfin_data_xfer,
  1264. .qc_prep = ata_noop_qc_prep,
  1265. .freeze = bfin_freeze,
  1266. .thaw = bfin_thaw,
  1267. .softreset = bfin_softreset,
  1268. .postreset = bfin_postreset,
  1269. .sff_irq_clear = bfin_irq_clear,
  1270. .sff_irq_on = bfin_irq_on,
  1271. .port_start = bfin_port_start,
  1272. .port_stop = bfin_port_stop,
  1273. };
  1274. static struct ata_port_info bfin_port_info[] = {
  1275. {
  1276. .flags = ATA_FLAG_SLAVE_POSS
  1277. | ATA_FLAG_MMIO
  1278. | ATA_FLAG_NO_LEGACY,
  1279. .pio_mask = 0x1f, /* pio0-4 */
  1280. .mwdma_mask = 0,
  1281. .udma_mask = 0,
  1282. .port_ops = &bfin_pata_ops,
  1283. },
  1284. };
  1285. /**
  1286. * bfin_reset_controller - initialize BF54x ATAPI controller.
  1287. */
  1288. static int bfin_reset_controller(struct ata_host *host)
  1289. {
  1290. void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
  1291. int count;
  1292. unsigned short status;
  1293. /* Disable all ATAPI interrupts */
  1294. ATAPI_SET_INT_MASK(base, 0);
  1295. SSYNC();
  1296. /* Assert the RESET signal 25us*/
  1297. ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
  1298. udelay(30);
  1299. /* Negate the RESET signal for 2ms*/
  1300. ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
  1301. msleep(2);
  1302. /* Wait on Busy flag to clear */
  1303. count = 10000000;
  1304. do {
  1305. status = read_atapi_register(base, ATA_REG_STATUS);
  1306. } while (--count && (status & ATA_BUSY));
  1307. /* Enable only ATAPI Device interrupt */
  1308. ATAPI_SET_INT_MASK(base, 1);
  1309. SSYNC();
  1310. return (!count);
  1311. }
  1312. /**
  1313. * atapi_io_port - define atapi peripheral port pins.
  1314. */
  1315. static unsigned short atapi_io_port[] = {
  1316. P_ATAPI_RESET,
  1317. P_ATAPI_DIOR,
  1318. P_ATAPI_DIOW,
  1319. P_ATAPI_CS0,
  1320. P_ATAPI_CS1,
  1321. P_ATAPI_DMACK,
  1322. P_ATAPI_DMARQ,
  1323. P_ATAPI_INTRQ,
  1324. P_ATAPI_IORDY,
  1325. 0
  1326. };
  1327. /**
  1328. * bfin_atapi_probe - attach a bfin atapi interface
  1329. * @pdev: platform device
  1330. *
  1331. * Register a bfin atapi interface.
  1332. *
  1333. *
  1334. * Platform devices are expected to contain 2 resources per port:
  1335. *
  1336. * - I/O Base (IORESOURCE_IO)
  1337. * - IRQ (IORESOURCE_IRQ)
  1338. *
  1339. */
  1340. static int __devinit bfin_atapi_probe(struct platform_device *pdev)
  1341. {
  1342. int board_idx = 0;
  1343. struct resource *res;
  1344. struct ata_host *host;
  1345. unsigned int fsclk = get_sclk();
  1346. int udma_mode = 5;
  1347. const struct ata_port_info *ppi[] =
  1348. { &bfin_port_info[board_idx], NULL };
  1349. /*
  1350. * Simple resource validation ..
  1351. */
  1352. if (unlikely(pdev->num_resources != 2)) {
  1353. dev_err(&pdev->dev, "invalid number of resources\n");
  1354. return -EINVAL;
  1355. }
  1356. /*
  1357. * Get the register base first
  1358. */
  1359. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1360. if (res == NULL)
  1361. return -EINVAL;
  1362. while (bfin_port_info[board_idx].udma_mask > 0 &&
  1363. udma_fsclk[udma_mode] > fsclk) {
  1364. udma_mode--;
  1365. bfin_port_info[board_idx].udma_mask >>= 1;
  1366. }
  1367. /*
  1368. * Now that that's out of the way, wire up the port..
  1369. */
  1370. host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
  1371. if (!host)
  1372. return -ENOMEM;
  1373. host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
  1374. if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
  1375. dev_err(&pdev->dev, "Requesting Peripherals faild\n");
  1376. return -EFAULT;
  1377. }
  1378. if (bfin_reset_controller(host)) {
  1379. peripheral_free_list(atapi_io_port);
  1380. dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
  1381. return -EFAULT;
  1382. }
  1383. if (ata_host_activate(host, platform_get_irq(pdev, 0),
  1384. bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
  1385. peripheral_free_list(atapi_io_port);
  1386. dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
  1387. return -ENODEV;
  1388. }
  1389. dev_set_drvdata(&pdev->dev, host);
  1390. return 0;
  1391. }
  1392. /**
  1393. * bfin_atapi_remove - unplug a bfin atapi interface
  1394. * @pdev: platform device
  1395. *
  1396. * A bfin atapi device has been unplugged. Perform the needed
  1397. * cleanup. Also called on module unload for any active devices.
  1398. */
  1399. static int __devexit bfin_atapi_remove(struct platform_device *pdev)
  1400. {
  1401. struct device *dev = &pdev->dev;
  1402. struct ata_host *host = dev_get_drvdata(dev);
  1403. ata_host_detach(host);
  1404. dev_set_drvdata(&pdev->dev, NULL);
  1405. peripheral_free_list(atapi_io_port);
  1406. return 0;
  1407. }
  1408. #ifdef CONFIG_PM
  1409. static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
  1410. {
  1411. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1412. if (host)
  1413. return ata_host_suspend(host, state);
  1414. else
  1415. return 0;
  1416. }
  1417. static int bfin_atapi_resume(struct platform_device *pdev)
  1418. {
  1419. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1420. int ret;
  1421. if (host) {
  1422. ret = bfin_reset_controller(host);
  1423. if (ret) {
  1424. printk(KERN_ERR DRV_NAME ": Error during HW init\n");
  1425. return ret;
  1426. }
  1427. ata_host_resume(host);
  1428. }
  1429. return 0;
  1430. }
  1431. #else
  1432. #define bfin_atapi_suspend NULL
  1433. #define bfin_atapi_resume NULL
  1434. #endif
  1435. static struct platform_driver bfin_atapi_driver = {
  1436. .probe = bfin_atapi_probe,
  1437. .remove = __devexit_p(bfin_atapi_remove),
  1438. .suspend = bfin_atapi_suspend,
  1439. .resume = bfin_atapi_resume,
  1440. .driver = {
  1441. .name = DRV_NAME,
  1442. .owner = THIS_MODULE,
  1443. },
  1444. };
  1445. #define ATAPI_MODE_SIZE 10
  1446. static char bfin_atapi_mode[ATAPI_MODE_SIZE];
  1447. static int __init bfin_atapi_init(void)
  1448. {
  1449. pr_info("register bfin atapi driver\n");
  1450. switch(bfin_atapi_mode[0]) {
  1451. case 'p':
  1452. case 'P':
  1453. break;
  1454. case 'm':
  1455. case 'M':
  1456. bfin_port_info[0].mwdma_mask = ATA_MWDMA2;
  1457. break;
  1458. default:
  1459. bfin_port_info[0].udma_mask = ATA_UDMA5;
  1460. };
  1461. return platform_driver_register(&bfin_atapi_driver);
  1462. }
  1463. static void __exit bfin_atapi_exit(void)
  1464. {
  1465. platform_driver_unregister(&bfin_atapi_driver);
  1466. }
  1467. module_init(bfin_atapi_init);
  1468. module_exit(bfin_atapi_exit);
  1469. /*
  1470. * ATAPI mode:
  1471. * pio/PIO
  1472. * udma/UDMA (default)
  1473. * mwdma/MWDMA
  1474. */
  1475. module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0);
  1476. MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
  1477. MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
  1478. MODULE_LICENSE("GPL");
  1479. MODULE_VERSION(DRV_VERSION);
  1480. MODULE_ALIAS("platform:" DRV_NAME);