au1xxx-ide.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
  3. *
  4. * BRIEF MODULE DESCRIPTION
  5. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  6. *
  7. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  8. *
  9. * This program is free software; you can redistribute it and/or modify it under
  10. * the terms of the GNU General Public License as published by the Free Software
  11. * Foundation; either version 2 of the License, or (at your option) any later
  12. * version.
  13. *
  14. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  15. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  16. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  17. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  18. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  19. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  20. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  21. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  22. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  23. * POSSIBILITY OF SUCH DAMAGE.
  24. *
  25. * You should have received a copy of the GNU General Public License along with
  26. * this program; if not, write to the Free Software Foundation, Inc.,
  27. * 675 Mass Ave, Cambridge, MA 02139, USA.
  28. *
  29. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  30. * Interface and Linux Device Driver" Application Note.
  31. */
  32. #undef REALLY_SLOW_IO /* most systems can safely undef this */
  33. #include <linux/types.h>
  34. #include <linux/module.h>
  35. #include <linux/kernel.h>
  36. #include <linux/delay.h>
  37. #include <linux/platform_device.h>
  38. #include <linux/init.h>
  39. #include <linux/ide.h>
  40. #include <linux/sysdev.h>
  41. #include <linux/dma-mapping.h>
  42. #include "ide-timing.h"
  43. #include <asm/io.h>
  44. #include <asm/mach-au1x00/au1xxx.h>
  45. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  46. #include <asm/mach-au1x00/au1xxx_ide.h>
  47. #define DRV_NAME "au1200-ide"
  48. #define DRV_VERSION "1.0"
  49. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  50. /* enable the burstmode in the dbdma */
  51. #define IDE_AU1XXX_BURSTMODE 1
  52. static _auide_hwif auide_hwif;
  53. static int dbdma_init_done;
  54. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  55. void auide_insw(unsigned long port, void *addr, u32 count)
  56. {
  57. _auide_hwif *ahwif = &auide_hwif;
  58. chan_tab_t *ctp;
  59. au1x_ddma_desc_t *dp;
  60. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  61. DDMA_FLAGS_NOIE)) {
  62. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  63. return;
  64. }
  65. ctp = *((chan_tab_t **)ahwif->rx_chan);
  66. dp = ctp->cur_ptr;
  67. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  68. ;
  69. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  70. }
  71. void auide_outsw(unsigned long port, void *addr, u32 count)
  72. {
  73. _auide_hwif *ahwif = &auide_hwif;
  74. chan_tab_t *ctp;
  75. au1x_ddma_desc_t *dp;
  76. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  77. count << 1, DDMA_FLAGS_NOIE)) {
  78. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  79. return;
  80. }
  81. ctp = *((chan_tab_t **)ahwif->tx_chan);
  82. dp = ctp->cur_ptr;
  83. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  84. ;
  85. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  86. }
  87. #endif
  88. static void auide_tune_drive(ide_drive_t *drive, byte pio)
  89. {
  90. int mem_sttime;
  91. int mem_stcfg;
  92. u8 speed;
  93. /* get the best pio mode for the drive */
  94. pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
  95. printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n",
  96. drive->name, pio);
  97. mem_sttime = 0;
  98. mem_stcfg = au_readl(MEM_STCFG2);
  99. /* set pio mode! */
  100. switch(pio) {
  101. case 0:
  102. mem_sttime = SBC_IDE_TIMING(PIO0);
  103. /* set configuration for RCS2# */
  104. mem_stcfg |= TS_MASK;
  105. mem_stcfg &= ~TCSOE_MASK;
  106. mem_stcfg &= ~TOECS_MASK;
  107. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  108. break;
  109. case 1:
  110. mem_sttime = SBC_IDE_TIMING(PIO1);
  111. /* set configuration for RCS2# */
  112. mem_stcfg |= TS_MASK;
  113. mem_stcfg &= ~TCSOE_MASK;
  114. mem_stcfg &= ~TOECS_MASK;
  115. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  116. break;
  117. case 2:
  118. mem_sttime = SBC_IDE_TIMING(PIO2);
  119. /* set configuration for RCS2# */
  120. mem_stcfg &= ~TS_MASK;
  121. mem_stcfg &= ~TCSOE_MASK;
  122. mem_stcfg &= ~TOECS_MASK;
  123. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  124. break;
  125. case 3:
  126. mem_sttime = SBC_IDE_TIMING(PIO3);
  127. /* set configuration for RCS2# */
  128. mem_stcfg &= ~TS_MASK;
  129. mem_stcfg &= ~TCSOE_MASK;
  130. mem_stcfg &= ~TOECS_MASK;
  131. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  132. break;
  133. case 4:
  134. mem_sttime = SBC_IDE_TIMING(PIO4);
  135. /* set configuration for RCS2# */
  136. mem_stcfg &= ~TS_MASK;
  137. mem_stcfg &= ~TCSOE_MASK;
  138. mem_stcfg &= ~TOECS_MASK;
  139. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  140. break;
  141. }
  142. au_writel(mem_sttime,MEM_STTIME2);
  143. au_writel(mem_stcfg,MEM_STCFG2);
  144. speed = pio + XFER_PIO_0;
  145. ide_config_drive_speed(drive, speed);
  146. }
  147. static int auide_tune_chipset (ide_drive_t *drive, u8 speed)
  148. {
  149. int mem_sttime;
  150. int mem_stcfg;
  151. mem_sttime = 0;
  152. mem_stcfg = au_readl(MEM_STCFG2);
  153. if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {
  154. auide_tune_drive(drive, speed - XFER_PIO_0);
  155. return 0;
  156. }
  157. switch(speed) {
  158. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  159. case XFER_MW_DMA_2:
  160. mem_sttime = SBC_IDE_TIMING(MDMA2);
  161. /* set configuration for RCS2# */
  162. mem_stcfg &= ~TS_MASK;
  163. mem_stcfg &= ~TCSOE_MASK;
  164. mem_stcfg &= ~TOECS_MASK;
  165. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  166. break;
  167. case XFER_MW_DMA_1:
  168. mem_sttime = SBC_IDE_TIMING(MDMA1);
  169. /* set configuration for RCS2# */
  170. mem_stcfg &= ~TS_MASK;
  171. mem_stcfg &= ~TCSOE_MASK;
  172. mem_stcfg &= ~TOECS_MASK;
  173. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  174. break;
  175. case XFER_MW_DMA_0:
  176. mem_sttime = SBC_IDE_TIMING(MDMA0);
  177. /* set configuration for RCS2# */
  178. mem_stcfg |= TS_MASK;
  179. mem_stcfg &= ~TCSOE_MASK;
  180. mem_stcfg &= ~TOECS_MASK;
  181. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  182. break;
  183. #endif
  184. default:
  185. return 1;
  186. }
  187. if (ide_config_drive_speed(drive, speed))
  188. return 1;
  189. au_writel(mem_sttime,MEM_STTIME2);
  190. au_writel(mem_stcfg,MEM_STCFG2);
  191. return 0;
  192. }
  193. /*
  194. * Multi-Word DMA + DbDMA functions
  195. */
  196. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  197. static int auide_build_sglist(ide_drive_t *drive, struct request *rq)
  198. {
  199. ide_hwif_t *hwif = drive->hwif;
  200. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  201. struct scatterlist *sg = hwif->sg_table;
  202. ide_map_sg(drive, rq);
  203. if (rq_data_dir(rq) == READ)
  204. hwif->sg_dma_direction = DMA_FROM_DEVICE;
  205. else
  206. hwif->sg_dma_direction = DMA_TO_DEVICE;
  207. return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
  208. hwif->sg_dma_direction);
  209. }
  210. static int auide_build_dmatable(ide_drive_t *drive)
  211. {
  212. int i, iswrite, count = 0;
  213. ide_hwif_t *hwif = HWIF(drive);
  214. struct request *rq = HWGROUP(drive)->rq;
  215. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  216. struct scatterlist *sg;
  217. iswrite = (rq_data_dir(rq) == WRITE);
  218. /* Save for interrupt context */
  219. ahwif->drive = drive;
  220. /* Build sglist */
  221. hwif->sg_nents = i = auide_build_sglist(drive, rq);
  222. if (!i)
  223. return 0;
  224. /* fill the descriptors */
  225. sg = hwif->sg_table;
  226. while (i && sg_dma_len(sg)) {
  227. u32 cur_addr;
  228. u32 cur_len;
  229. cur_addr = sg_dma_address(sg);
  230. cur_len = sg_dma_len(sg);
  231. while (cur_len) {
  232. u32 flags = DDMA_FLAGS_NOIE;
  233. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  234. if (++count >= PRD_ENTRIES) {
  235. printk(KERN_WARNING "%s: DMA table too small\n",
  236. drive->name);
  237. goto use_pio_instead;
  238. }
  239. /* Lets enable intr for the last descriptor only */
  240. if (1==i)
  241. flags = DDMA_FLAGS_IE;
  242. else
  243. flags = DDMA_FLAGS_NOIE;
  244. if (iswrite) {
  245. if(!put_source_flags(ahwif->tx_chan,
  246. (void*)(page_address(sg->page)
  247. + sg->offset),
  248. tc, flags)) {
  249. printk(KERN_ERR "%s failed %d\n",
  250. __FUNCTION__, __LINE__);
  251. }
  252. } else
  253. {
  254. if(!put_dest_flags(ahwif->rx_chan,
  255. (void*)(page_address(sg->page)
  256. + sg->offset),
  257. tc, flags)) {
  258. printk(KERN_ERR "%s failed %d\n",
  259. __FUNCTION__, __LINE__);
  260. }
  261. }
  262. cur_addr += tc;
  263. cur_len -= tc;
  264. }
  265. sg++;
  266. i--;
  267. }
  268. if (count)
  269. return 1;
  270. use_pio_instead:
  271. dma_unmap_sg(ahwif->dev,
  272. hwif->sg_table,
  273. hwif->sg_nents,
  274. hwif->sg_dma_direction);
  275. return 0; /* revert to PIO for this request */
  276. }
  277. static int auide_dma_end(ide_drive_t *drive)
  278. {
  279. ide_hwif_t *hwif = HWIF(drive);
  280. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  281. if (hwif->sg_nents) {
  282. dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,
  283. hwif->sg_dma_direction);
  284. hwif->sg_nents = 0;
  285. }
  286. return 0;
  287. }
  288. static void auide_dma_start(ide_drive_t *drive )
  289. {
  290. }
  291. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  292. {
  293. /* issue cmd to drive */
  294. ide_execute_command(drive, command, &ide_dma_intr,
  295. (2*WAIT_CMD), NULL);
  296. }
  297. static int auide_dma_setup(ide_drive_t *drive)
  298. {
  299. struct request *rq = HWGROUP(drive)->rq;
  300. if (!auide_build_dmatable(drive)) {
  301. ide_map_sg(drive, rq);
  302. return 1;
  303. }
  304. drive->waiting_for_dma = 1;
  305. return 0;
  306. }
  307. static int auide_dma_check(ide_drive_t *drive)
  308. {
  309. u8 speed;
  310. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  311. if( dbdma_init_done == 0 ){
  312. auide_hwif.white_list = ide_in_drive_list(drive->id,
  313. dma_white_list);
  314. auide_hwif.black_list = ide_in_drive_list(drive->id,
  315. dma_black_list);
  316. auide_hwif.drive = drive;
  317. auide_ddma_init(&auide_hwif);
  318. dbdma_init_done = 1;
  319. }
  320. #endif
  321. /* Is the drive in our DMA black list? */
  322. if ( auide_hwif.black_list ) {
  323. drive->using_dma = 0;
  324. /* Borrowed the warning message from ide-dma.c */
  325. printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
  326. drive->name, drive->id->model);
  327. }
  328. else
  329. drive->using_dma = 1;
  330. speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA);
  331. if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
  332. return HWIF(drive)->ide_dma_on(drive);
  333. return HWIF(drive)->ide_dma_off_quietly(drive);
  334. }
  335. static int auide_dma_test_irq(ide_drive_t *drive)
  336. {
  337. if (drive->waiting_for_dma == 0)
  338. printk(KERN_WARNING "%s: ide_dma_test_irq \
  339. called while not waiting\n", drive->name);
  340. /* If dbdma didn't execute the STOP command yet, the
  341. * active bit is still set
  342. */
  343. drive->waiting_for_dma++;
  344. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  345. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  346. complete\n", drive->name);
  347. return 1;
  348. }
  349. udelay(10);
  350. return 0;
  351. }
  352. static int auide_dma_host_on(ide_drive_t *drive)
  353. {
  354. return 0;
  355. }
  356. static int auide_dma_on(ide_drive_t *drive)
  357. {
  358. drive->using_dma = 1;
  359. return auide_dma_host_on(drive);
  360. }
  361. static int auide_dma_host_off(ide_drive_t *drive)
  362. {
  363. return 0;
  364. }
  365. static int auide_dma_off_quietly(ide_drive_t *drive)
  366. {
  367. drive->using_dma = 0;
  368. return auide_dma_host_off(drive);
  369. }
  370. static int auide_dma_lostirq(ide_drive_t *drive)
  371. {
  372. printk(KERN_ERR "%s: IRQ lost\n", drive->name);
  373. return 0;
  374. }
  375. static void auide_ddma_tx_callback(int irq, void *param)
  376. {
  377. _auide_hwif *ahwif = (_auide_hwif*)param;
  378. ahwif->drive->waiting_for_dma = 0;
  379. }
  380. static void auide_ddma_rx_callback(int irq, void *param)
  381. {
  382. _auide_hwif *ahwif = (_auide_hwif*)param;
  383. ahwif->drive->waiting_for_dma = 0;
  384. }
  385. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  386. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  387. {
  388. dev->dev_id = dev_id;
  389. dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
  390. dev->dev_intlevel = 0;
  391. dev->dev_intpolarity = 0;
  392. dev->dev_tsize = tsize;
  393. dev->dev_devwidth = devwidth;
  394. dev->dev_flags = flags;
  395. }
  396. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  397. static int auide_dma_timeout(ide_drive_t *drive)
  398. {
  399. // printk("%s\n", __FUNCTION__);
  400. printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
  401. if (HWIF(drive)->ide_dma_test_irq(drive))
  402. return 0;
  403. return HWIF(drive)->ide_dma_end(drive);
  404. }
  405. static int auide_ddma_init(_auide_hwif *auide) {
  406. dbdev_tab_t source_dev_tab, target_dev_tab;
  407. u32 dev_id, tsize, devwidth, flags;
  408. ide_hwif_t *hwif = auide->hwif;
  409. dev_id = AU1XXX_ATA_DDMA_REQ;
  410. if (auide->white_list || auide->black_list) {
  411. tsize = 8;
  412. devwidth = 32;
  413. }
  414. else {
  415. tsize = 1;
  416. devwidth = 16;
  417. printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
  418. printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
  419. }
  420. #ifdef IDE_AU1XXX_BURSTMODE
  421. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  422. #else
  423. flags = DEV_FLAGS_SYNC;
  424. #endif
  425. /* setup dev_tab for tx channel */
  426. auide_init_dbdma_dev( &source_dev_tab,
  427. dev_id,
  428. tsize, devwidth, DEV_FLAGS_OUT | flags);
  429. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  430. auide_init_dbdma_dev( &source_dev_tab,
  431. dev_id,
  432. tsize, devwidth, DEV_FLAGS_IN | flags);
  433. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  434. /* We also need to add a target device for the DMA */
  435. auide_init_dbdma_dev( &target_dev_tab,
  436. (u32)DSCR_CMD0_ALWAYS,
  437. tsize, devwidth, DEV_FLAGS_ANYUSE);
  438. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  439. /* Get a channel for TX */
  440. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  441. auide->tx_dev_id,
  442. auide_ddma_tx_callback,
  443. (void*)auide);
  444. /* Get a channel for RX */
  445. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  446. auide->target_dev_id,
  447. auide_ddma_rx_callback,
  448. (void*)auide);
  449. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  450. NUM_DESCRIPTORS);
  451. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  452. NUM_DESCRIPTORS);
  453. hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,
  454. PRD_ENTRIES * PRD_BYTES, /* 1 Page */
  455. &hwif->dmatable_dma, GFP_KERNEL);
  456. au1xxx_dbdma_start( auide->tx_chan );
  457. au1xxx_dbdma_start( auide->rx_chan );
  458. return 0;
  459. }
  460. #else
  461. static int auide_ddma_init( _auide_hwif *auide )
  462. {
  463. dbdev_tab_t source_dev_tab;
  464. int flags;
  465. #ifdef IDE_AU1XXX_BURSTMODE
  466. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  467. #else
  468. flags = DEV_FLAGS_SYNC;
  469. #endif
  470. /* setup dev_tab for tx channel */
  471. auide_init_dbdma_dev( &source_dev_tab,
  472. (u32)DSCR_CMD0_ALWAYS,
  473. 8, 32, DEV_FLAGS_OUT | flags);
  474. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  475. auide_init_dbdma_dev( &source_dev_tab,
  476. (u32)DSCR_CMD0_ALWAYS,
  477. 8, 32, DEV_FLAGS_IN | flags);
  478. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  479. /* Get a channel for TX */
  480. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  481. auide->tx_dev_id,
  482. NULL,
  483. (void*)auide);
  484. /* Get a channel for RX */
  485. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  486. DSCR_CMD0_ALWAYS,
  487. NULL,
  488. (void*)auide);
  489. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  490. NUM_DESCRIPTORS);
  491. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  492. NUM_DESCRIPTORS);
  493. au1xxx_dbdma_start( auide->tx_chan );
  494. au1xxx_dbdma_start( auide->rx_chan );
  495. return 0;
  496. }
  497. #endif
  498. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  499. {
  500. int i;
  501. unsigned long *ata_regs = hw->io_ports;
  502. /* FIXME? */
  503. for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
  504. *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
  505. }
  506. /* set the Alternative Status register */
  507. *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
  508. }
  509. static int au_ide_probe(struct device *dev)
  510. {
  511. struct platform_device *pdev = to_platform_device(dev);
  512. _auide_hwif *ahwif = &auide_hwif;
  513. ide_hwif_t *hwif;
  514. struct resource *res;
  515. int ret = 0;
  516. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  517. char *mode = "MWDMA2";
  518. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  519. char *mode = "PIO+DDMA(offload)";
  520. #endif
  521. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  522. auide_hwif.dev = 0;
  523. ahwif->dev = dev;
  524. ahwif->irq = platform_get_irq(pdev, 0);
  525. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  526. if (res == NULL) {
  527. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  528. ret = -ENODEV;
  529. goto out;
  530. }
  531. if (ahwif->irq < 0) {
  532. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  533. ret = -ENODEV;
  534. goto out;
  535. }
  536. if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
  537. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  538. ret = -EBUSY;
  539. goto out;
  540. }
  541. ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
  542. if (ahwif->regbase == 0) {
  543. ret = -ENOMEM;
  544. goto out;
  545. }
  546. /* FIXME: This might possibly break PCMCIA IDE devices */
  547. hwif = &ide_hwifs[pdev->id];
  548. hw_regs_t *hw = &hwif->hw;
  549. hwif->irq = hw->irq = ahwif->irq;
  550. hwif->chipset = ide_au1xxx;
  551. auide_setup_ports(hw, ahwif);
  552. memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
  553. hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
  554. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  555. hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
  556. hwif->swdma_mask = 0x00;
  557. #else
  558. hwif->mwdma_mask = 0x0;
  559. hwif->swdma_mask = 0x0;
  560. #endif
  561. hwif->noprobe = 0;
  562. hwif->drives[0].unmask = 1;
  563. hwif->drives[1].unmask = 1;
  564. /* hold should be on in all cases */
  565. hwif->hold = 1;
  566. hwif->mmio = 1;
  567. /* If the user has selected DDMA assisted copies,
  568. then set up a few local I/O function entry points
  569. */
  570. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  571. hwif->INSW = auide_insw;
  572. hwif->OUTSW = auide_outsw;
  573. #endif
  574. hwif->tuneproc = &auide_tune_drive;
  575. hwif->speedproc = &auide_tune_chipset;
  576. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  577. hwif->ide_dma_off_quietly = &auide_dma_off_quietly;
  578. hwif->ide_dma_timeout = &auide_dma_timeout;
  579. hwif->ide_dma_check = &auide_dma_check;
  580. hwif->dma_exec_cmd = &auide_dma_exec_cmd;
  581. hwif->dma_start = &auide_dma_start;
  582. hwif->ide_dma_end = &auide_dma_end;
  583. hwif->dma_setup = &auide_dma_setup;
  584. hwif->ide_dma_test_irq = &auide_dma_test_irq;
  585. hwif->ide_dma_host_off = &auide_dma_host_off;
  586. hwif->ide_dma_host_on = &auide_dma_host_on;
  587. hwif->ide_dma_lostirq = &auide_dma_lostirq;
  588. hwif->ide_dma_on = &auide_dma_on;
  589. hwif->autodma = 1;
  590. hwif->drives[0].autodma = hwif->autodma;
  591. hwif->drives[1].autodma = hwif->autodma;
  592. hwif->atapi_dma = 1;
  593. #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  594. hwif->autodma = 0;
  595. hwif->channel = 0;
  596. hwif->hold = 1;
  597. hwif->select_data = 0; /* no chipset-specific code */
  598. hwif->config_data = 0; /* no chipset-specific code */
  599. hwif->drives[0].autodma = 0;
  600. hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
  601. #endif
  602. hwif->drives[0].no_io_32bit = 1;
  603. auide_hwif.hwif = hwif;
  604. hwif->hwif_data = &auide_hwif;
  605. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  606. auide_ddma_init(&auide_hwif);
  607. dbdma_init_done = 1;
  608. #endif
  609. probe_hwif_init(hwif);
  610. dev_set_drvdata(dev, hwif);
  611. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  612. out:
  613. return ret;
  614. }
  615. static int au_ide_remove(struct device *dev)
  616. {
  617. struct platform_device *pdev = to_platform_device(dev);
  618. struct resource *res;
  619. ide_hwif_t *hwif = dev_get_drvdata(dev);
  620. _auide_hwif *ahwif = &auide_hwif;
  621. ide_unregister(hwif - ide_hwifs);
  622. iounmap((void *)ahwif->regbase);
  623. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  624. release_mem_region(res->start, res->end - res->start);
  625. return 0;
  626. }
  627. static struct device_driver au1200_ide_driver = {
  628. .name = "au1200-ide",
  629. .bus = &platform_bus_type,
  630. .probe = au_ide_probe,
  631. .remove = au_ide_remove,
  632. };
  633. static int __init au_ide_init(void)
  634. {
  635. return driver_register(&au1200_ide_driver);
  636. }
  637. static void __exit au_ide_exit(void)
  638. {
  639. driver_unregister(&au1200_ide_driver);
  640. }
  641. MODULE_LICENSE("GPL");
  642. MODULE_DESCRIPTION("AU1200 IDE driver");
  643. module_init(au_ide_init);
  644. module_exit(au_ide_exit);