au1xxx-ide.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * BRIEF MODULE DESCRIPTION
  3. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  4. *
  5. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  6. *
  7. * This program is free software; you can redistribute it and/or modify it under
  8. * the terms of the GNU General Public License as published by the Free Software
  9. * Foundation; either version 2 of the License, or (at your option) any later
  10. * version.
  11. *
  12. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  13. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  14. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  15. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  16. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  17. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  18. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  19. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  20. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  21. * POSSIBILITY OF SUCH DAMAGE.
  22. *
  23. * You should have received a copy of the GNU General Public License along with
  24. * this program; if not, write to the Free Software Foundation, Inc.,
  25. * 675 Mass Ave, Cambridge, MA 02139, USA.
  26. *
  27. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  28. * Interface and Linux Device Driver" Application Note.
  29. */
  30. #include <linux/types.h>
  31. #include <linux/module.h>
  32. #include <linux/kernel.h>
  33. #include <linux/delay.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/init.h>
  36. #include <linux/ide.h>
  37. #include <linux/scatterlist.h>
  38. #include <asm/mach-au1x00/au1xxx.h>
  39. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  40. #include <asm/mach-au1x00/au1xxx_ide.h>
  41. #define DRV_NAME "au1200-ide"
  42. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  43. /* enable the burstmode in the dbdma */
  44. #define IDE_AU1XXX_BURSTMODE 1
  45. static _auide_hwif auide_hwif;
  46. static int dbdma_init_done;
  47. static int auide_ddma_init(_auide_hwif *auide);
  48. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  49. void auide_insw(unsigned long port, void *addr, u32 count)
  50. {
  51. _auide_hwif *ahwif = &auide_hwif;
  52. chan_tab_t *ctp;
  53. au1x_ddma_desc_t *dp;
  54. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  55. DDMA_FLAGS_NOIE)) {
  56. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  57. return;
  58. }
  59. ctp = *((chan_tab_t **)ahwif->rx_chan);
  60. dp = ctp->cur_ptr;
  61. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  62. ;
  63. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  64. }
  65. void auide_outsw(unsigned long port, void *addr, u32 count)
  66. {
  67. _auide_hwif *ahwif = &auide_hwif;
  68. chan_tab_t *ctp;
  69. au1x_ddma_desc_t *dp;
  70. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  71. count << 1, DDMA_FLAGS_NOIE)) {
  72. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  73. return;
  74. }
  75. ctp = *((chan_tab_t **)ahwif->tx_chan);
  76. dp = ctp->cur_ptr;
  77. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  78. ;
  79. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  80. }
  81. #endif
  82. static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  83. {
  84. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  85. /* set pio mode! */
  86. switch(pio) {
  87. case 0:
  88. mem_sttime = SBC_IDE_TIMING(PIO0);
  89. /* set configuration for RCS2# */
  90. mem_stcfg |= TS_MASK;
  91. mem_stcfg &= ~TCSOE_MASK;
  92. mem_stcfg &= ~TOECS_MASK;
  93. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  94. break;
  95. case 1:
  96. mem_sttime = SBC_IDE_TIMING(PIO1);
  97. /* set configuration for RCS2# */
  98. mem_stcfg |= TS_MASK;
  99. mem_stcfg &= ~TCSOE_MASK;
  100. mem_stcfg &= ~TOECS_MASK;
  101. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  102. break;
  103. case 2:
  104. mem_sttime = SBC_IDE_TIMING(PIO2);
  105. /* set configuration for RCS2# */
  106. mem_stcfg &= ~TS_MASK;
  107. mem_stcfg &= ~TCSOE_MASK;
  108. mem_stcfg &= ~TOECS_MASK;
  109. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  110. break;
  111. case 3:
  112. mem_sttime = SBC_IDE_TIMING(PIO3);
  113. /* set configuration for RCS2# */
  114. mem_stcfg &= ~TS_MASK;
  115. mem_stcfg &= ~TCSOE_MASK;
  116. mem_stcfg &= ~TOECS_MASK;
  117. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  118. break;
  119. case 4:
  120. mem_sttime = SBC_IDE_TIMING(PIO4);
  121. /* set configuration for RCS2# */
  122. mem_stcfg &= ~TS_MASK;
  123. mem_stcfg &= ~TCSOE_MASK;
  124. mem_stcfg &= ~TOECS_MASK;
  125. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  126. break;
  127. }
  128. au_writel(mem_sttime,MEM_STTIME2);
  129. au_writel(mem_stcfg,MEM_STCFG2);
  130. }
  131. static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
  132. {
  133. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  134. switch(speed) {
  135. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  136. case XFER_MW_DMA_2:
  137. mem_sttime = SBC_IDE_TIMING(MDMA2);
  138. /* set configuration for RCS2# */
  139. mem_stcfg &= ~TS_MASK;
  140. mem_stcfg &= ~TCSOE_MASK;
  141. mem_stcfg &= ~TOECS_MASK;
  142. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  143. break;
  144. case XFER_MW_DMA_1:
  145. mem_sttime = SBC_IDE_TIMING(MDMA1);
  146. /* set configuration for RCS2# */
  147. mem_stcfg &= ~TS_MASK;
  148. mem_stcfg &= ~TCSOE_MASK;
  149. mem_stcfg &= ~TOECS_MASK;
  150. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  151. break;
  152. case XFER_MW_DMA_0:
  153. mem_sttime = SBC_IDE_TIMING(MDMA0);
  154. /* set configuration for RCS2# */
  155. mem_stcfg |= TS_MASK;
  156. mem_stcfg &= ~TCSOE_MASK;
  157. mem_stcfg &= ~TOECS_MASK;
  158. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  159. break;
  160. #endif
  161. }
  162. au_writel(mem_sttime,MEM_STTIME2);
  163. au_writel(mem_stcfg,MEM_STCFG2);
  164. }
  165. /*
  166. * Multi-Word DMA + DbDMA functions
  167. */
  168. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  169. static int auide_build_dmatable(ide_drive_t *drive)
  170. {
  171. int i, iswrite, count = 0;
  172. ide_hwif_t *hwif = HWIF(drive);
  173. struct request *rq = HWGROUP(drive)->rq;
  174. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  175. struct scatterlist *sg;
  176. iswrite = (rq_data_dir(rq) == WRITE);
  177. /* Save for interrupt context */
  178. ahwif->drive = drive;
  179. hwif->sg_nents = i = ide_build_sglist(drive, rq);
  180. if (!i)
  181. return 0;
  182. /* fill the descriptors */
  183. sg = hwif->sg_table;
  184. while (i && sg_dma_len(sg)) {
  185. u32 cur_addr;
  186. u32 cur_len;
  187. cur_addr = sg_dma_address(sg);
  188. cur_len = sg_dma_len(sg);
  189. while (cur_len) {
  190. u32 flags = DDMA_FLAGS_NOIE;
  191. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  192. if (++count >= PRD_ENTRIES) {
  193. printk(KERN_WARNING "%s: DMA table too small\n",
  194. drive->name);
  195. goto use_pio_instead;
  196. }
  197. /* Lets enable intr for the last descriptor only */
  198. if (1==i)
  199. flags = DDMA_FLAGS_IE;
  200. else
  201. flags = DDMA_FLAGS_NOIE;
  202. if (iswrite) {
  203. if(!put_source_flags(ahwif->tx_chan,
  204. (void*) sg_virt(sg),
  205. tc, flags)) {
  206. printk(KERN_ERR "%s failed %d\n",
  207. __FUNCTION__, __LINE__);
  208. }
  209. } else
  210. {
  211. if(!put_dest_flags(ahwif->rx_chan,
  212. (void*) sg_virt(sg),
  213. tc, flags)) {
  214. printk(KERN_ERR "%s failed %d\n",
  215. __FUNCTION__, __LINE__);
  216. }
  217. }
  218. cur_addr += tc;
  219. cur_len -= tc;
  220. }
  221. sg = sg_next(sg);
  222. i--;
  223. }
  224. if (count)
  225. return 1;
  226. use_pio_instead:
  227. ide_destroy_dmatable(drive);
  228. return 0; /* revert to PIO for this request */
  229. }
  230. static int auide_dma_end(ide_drive_t *drive)
  231. {
  232. ide_hwif_t *hwif = HWIF(drive);
  233. if (hwif->sg_nents) {
  234. ide_destroy_dmatable(drive);
  235. hwif->sg_nents = 0;
  236. }
  237. return 0;
  238. }
  239. static void auide_dma_start(ide_drive_t *drive )
  240. {
  241. }
  242. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  243. {
  244. /* issue cmd to drive */
  245. ide_execute_command(drive, command, &ide_dma_intr,
  246. (2*WAIT_CMD), NULL);
  247. }
  248. static int auide_dma_setup(ide_drive_t *drive)
  249. {
  250. struct request *rq = HWGROUP(drive)->rq;
  251. if (!auide_build_dmatable(drive)) {
  252. ide_map_sg(drive, rq);
  253. return 1;
  254. }
  255. drive->waiting_for_dma = 1;
  256. return 0;
  257. }
  258. static u8 auide_mdma_filter(ide_drive_t *drive)
  259. {
  260. /*
  261. * FIXME: ->white_list and ->black_list are based on completely bogus
  262. * ->ide_dma_check implementation which didn't set neither the host
  263. * controller timings nor the device for the desired transfer mode.
  264. *
  265. * They should be either removed or 0x00 MWDMA mask should be
  266. * returned for devices on the ->black_list.
  267. */
  268. if (dbdma_init_done == 0) {
  269. auide_hwif.white_list = ide_in_drive_list(drive->id,
  270. dma_white_list);
  271. auide_hwif.black_list = ide_in_drive_list(drive->id,
  272. dma_black_list);
  273. auide_hwif.drive = drive;
  274. auide_ddma_init(&auide_hwif);
  275. dbdma_init_done = 1;
  276. }
  277. /* Is the drive in our DMA black list? */
  278. if (auide_hwif.black_list)
  279. printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
  280. drive->name, drive->id->model);
  281. return drive->hwif->mwdma_mask;
  282. }
  283. static int auide_dma_test_irq(ide_drive_t *drive)
  284. {
  285. if (drive->waiting_for_dma == 0)
  286. printk(KERN_WARNING "%s: ide_dma_test_irq \
  287. called while not waiting\n", drive->name);
  288. /* If dbdma didn't execute the STOP command yet, the
  289. * active bit is still set
  290. */
  291. drive->waiting_for_dma++;
  292. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  293. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  294. complete\n", drive->name);
  295. return 1;
  296. }
  297. udelay(10);
  298. return 0;
  299. }
  300. static void auide_dma_host_set(ide_drive_t *drive, int on)
  301. {
  302. }
  303. static void auide_dma_lost_irq(ide_drive_t *drive)
  304. {
  305. printk(KERN_ERR "%s: IRQ lost\n", drive->name);
  306. }
  307. static void auide_ddma_tx_callback(int irq, void *param)
  308. {
  309. _auide_hwif *ahwif = (_auide_hwif*)param;
  310. ahwif->drive->waiting_for_dma = 0;
  311. }
  312. static void auide_ddma_rx_callback(int irq, void *param)
  313. {
  314. _auide_hwif *ahwif = (_auide_hwif*)param;
  315. ahwif->drive->waiting_for_dma = 0;
  316. }
  317. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  318. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  319. {
  320. dev->dev_id = dev_id;
  321. dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
  322. dev->dev_intlevel = 0;
  323. dev->dev_intpolarity = 0;
  324. dev->dev_tsize = tsize;
  325. dev->dev_devwidth = devwidth;
  326. dev->dev_flags = flags;
  327. }
  328. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  329. static void auide_dma_timeout(ide_drive_t *drive)
  330. {
  331. ide_hwif_t *hwif = HWIF(drive);
  332. printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
  333. if (hwif->ide_dma_test_irq(drive))
  334. return;
  335. hwif->ide_dma_end(drive);
  336. }
  337. static int auide_ddma_init(_auide_hwif *auide) {
  338. dbdev_tab_t source_dev_tab, target_dev_tab;
  339. u32 dev_id, tsize, devwidth, flags;
  340. ide_hwif_t *hwif = auide->hwif;
  341. dev_id = AU1XXX_ATA_DDMA_REQ;
  342. if (auide->white_list || auide->black_list) {
  343. tsize = 8;
  344. devwidth = 32;
  345. }
  346. else {
  347. tsize = 1;
  348. devwidth = 16;
  349. printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
  350. printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
  351. }
  352. #ifdef IDE_AU1XXX_BURSTMODE
  353. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  354. #else
  355. flags = DEV_FLAGS_SYNC;
  356. #endif
  357. /* setup dev_tab for tx channel */
  358. auide_init_dbdma_dev( &source_dev_tab,
  359. dev_id,
  360. tsize, devwidth, DEV_FLAGS_OUT | flags);
  361. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  362. auide_init_dbdma_dev( &source_dev_tab,
  363. dev_id,
  364. tsize, devwidth, DEV_FLAGS_IN | flags);
  365. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  366. /* We also need to add a target device for the DMA */
  367. auide_init_dbdma_dev( &target_dev_tab,
  368. (u32)DSCR_CMD0_ALWAYS,
  369. tsize, devwidth, DEV_FLAGS_ANYUSE);
  370. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  371. /* Get a channel for TX */
  372. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  373. auide->tx_dev_id,
  374. auide_ddma_tx_callback,
  375. (void*)auide);
  376. /* Get a channel for RX */
  377. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  378. auide->target_dev_id,
  379. auide_ddma_rx_callback,
  380. (void*)auide);
  381. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  382. NUM_DESCRIPTORS);
  383. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  384. NUM_DESCRIPTORS);
  385. hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
  386. PRD_ENTRIES * PRD_BYTES, /* 1 Page */
  387. &hwif->dmatable_dma, GFP_KERNEL);
  388. au1xxx_dbdma_start( auide->tx_chan );
  389. au1xxx_dbdma_start( auide->rx_chan );
  390. return 0;
  391. }
  392. #else
  393. static int auide_ddma_init( _auide_hwif *auide )
  394. {
  395. dbdev_tab_t source_dev_tab;
  396. int flags;
  397. #ifdef IDE_AU1XXX_BURSTMODE
  398. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  399. #else
  400. flags = DEV_FLAGS_SYNC;
  401. #endif
  402. /* setup dev_tab for tx channel */
  403. auide_init_dbdma_dev( &source_dev_tab,
  404. (u32)DSCR_CMD0_ALWAYS,
  405. 8, 32, DEV_FLAGS_OUT | flags);
  406. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  407. auide_init_dbdma_dev( &source_dev_tab,
  408. (u32)DSCR_CMD0_ALWAYS,
  409. 8, 32, DEV_FLAGS_IN | flags);
  410. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  411. /* Get a channel for TX */
  412. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  413. auide->tx_dev_id,
  414. NULL,
  415. (void*)auide);
  416. /* Get a channel for RX */
  417. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  418. DSCR_CMD0_ALWAYS,
  419. NULL,
  420. (void*)auide);
  421. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  422. NUM_DESCRIPTORS);
  423. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  424. NUM_DESCRIPTORS);
  425. au1xxx_dbdma_start( auide->tx_chan );
  426. au1xxx_dbdma_start( auide->rx_chan );
  427. return 0;
  428. }
  429. #endif
  430. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  431. {
  432. int i;
  433. unsigned long *ata_regs = hw->io_ports;
  434. /* FIXME? */
  435. for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
  436. *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
  437. }
  438. /* set the Alternative Status register */
  439. *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
  440. }
  441. static const struct ide_port_info au1xxx_port_info = {
  442. .host_flags = IDE_HFLAG_POST_SET_MODE |
  443. IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
  444. IDE_HFLAG_NO_IO_32BIT |
  445. IDE_HFLAG_UNMASK_IRQS,
  446. .pio_mask = ATA_PIO4,
  447. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  448. .mwdma_mask = ATA_MWDMA2,
  449. #endif
  450. };
  451. static int au_ide_probe(struct device *dev)
  452. {
  453. struct platform_device *pdev = to_platform_device(dev);
  454. _auide_hwif *ahwif = &auide_hwif;
  455. ide_hwif_t *hwif;
  456. struct resource *res;
  457. int ret = 0;
  458. u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  459. hw_regs_t hw;
  460. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  461. char *mode = "MWDMA2";
  462. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  463. char *mode = "PIO+DDMA(offload)";
  464. #endif
  465. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  466. ahwif->irq = platform_get_irq(pdev, 0);
  467. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  468. if (res == NULL) {
  469. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  470. ret = -ENODEV;
  471. goto out;
  472. }
  473. if (ahwif->irq < 0) {
  474. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  475. ret = -ENODEV;
  476. goto out;
  477. }
  478. if (!request_mem_region(res->start, res->end - res->start + 1,
  479. pdev->name)) {
  480. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  481. ret = -EBUSY;
  482. goto out;
  483. }
  484. ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
  485. if (ahwif->regbase == 0) {
  486. ret = -ENOMEM;
  487. goto out;
  488. }
  489. hwif = ide_find_port();
  490. if (hwif == NULL) {
  491. ret = -ENOENT;
  492. goto out;
  493. }
  494. memset(&hw, 0, sizeof(hw));
  495. auide_setup_ports(&hw, ahwif);
  496. hw.irq = ahwif->irq;
  497. hw.dev = dev;
  498. hw.chipset = ide_au1xxx;
  499. ide_init_port_hw(hwif, &hw);
  500. hwif->dev = dev;
  501. hwif->mmio = 1;
  502. /* If the user has selected DDMA assisted copies,
  503. then set up a few local I/O function entry points
  504. */
  505. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  506. hwif->INSW = auide_insw;
  507. hwif->OUTSW = auide_outsw;
  508. #endif
  509. hwif->set_pio_mode = &au1xxx_set_pio_mode;
  510. hwif->set_dma_mode = &auide_set_dma_mode;
  511. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  512. hwif->dma_timeout = &auide_dma_timeout;
  513. hwif->mdma_filter = &auide_mdma_filter;
  514. hwif->dma_host_set = &auide_dma_host_set;
  515. hwif->dma_exec_cmd = &auide_dma_exec_cmd;
  516. hwif->dma_start = &auide_dma_start;
  517. hwif->ide_dma_end = &auide_dma_end;
  518. hwif->dma_setup = &auide_dma_setup;
  519. hwif->ide_dma_test_irq = &auide_dma_test_irq;
  520. hwif->dma_lost_irq = &auide_dma_lost_irq;
  521. #endif
  522. hwif->select_data = 0; /* no chipset-specific code */
  523. hwif->config_data = 0; /* no chipset-specific code */
  524. auide_hwif.hwif = hwif;
  525. hwif->hwif_data = &auide_hwif;
  526. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  527. auide_ddma_init(&auide_hwif);
  528. dbdma_init_done = 1;
  529. #endif
  530. idx[0] = hwif->index;
  531. ide_device_add(idx, &au1xxx_port_info);
  532. dev_set_drvdata(dev, hwif);
  533. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  534. out:
  535. return ret;
  536. }
  537. static int au_ide_remove(struct device *dev)
  538. {
  539. struct platform_device *pdev = to_platform_device(dev);
  540. struct resource *res;
  541. ide_hwif_t *hwif = dev_get_drvdata(dev);
  542. _auide_hwif *ahwif = &auide_hwif;
  543. ide_unregister(hwif->index);
  544. iounmap((void *)ahwif->regbase);
  545. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  546. release_mem_region(res->start, res->end - res->start + 1);
  547. return 0;
  548. }
  549. static struct device_driver au1200_ide_driver = {
  550. .name = "au1200-ide",
  551. .bus = &platform_bus_type,
  552. .probe = au_ide_probe,
  553. .remove = au_ide_remove,
  554. };
  555. static int __init au_ide_init(void)
  556. {
  557. return driver_register(&au1200_ide_driver);
  558. }
  559. static void __exit au_ide_exit(void)
  560. {
  561. driver_unregister(&au1200_ide_driver);
  562. }
  563. MODULE_LICENSE("GPL");
  564. MODULE_DESCRIPTION("AU1200 IDE driver");
  565. module_init(au_ide_init);
  566. module_exit(au_ide_exit);