au1xxx-ide.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * BRIEF MODULE DESCRIPTION
  3. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  4. *
  5. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  6. *
  7. * This program is free software; you can redistribute it and/or modify it under
  8. * the terms of the GNU General Public License as published by the Free Software
  9. * Foundation; either version 2 of the License, or (at your option) any later
  10. * version.
  11. *
  12. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  13. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  14. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  15. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  16. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  17. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  18. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  19. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  20. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  21. * POSSIBILITY OF SUCH DAMAGE.
  22. *
  23. * You should have received a copy of the GNU General Public License along with
  24. * this program; if not, write to the Free Software Foundation, Inc.,
  25. * 675 Mass Ave, Cambridge, MA 02139, USA.
  26. *
  27. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  28. * Interface and Linux Device Driver" Application Note.
  29. */
  30. #include <linux/types.h>
  31. #include <linux/module.h>
  32. #include <linux/kernel.h>
  33. #include <linux/delay.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/init.h>
  36. #include <linux/ide.h>
  37. #include <linux/scatterlist.h>
  38. #include <asm/mach-au1x00/au1xxx.h>
  39. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  40. #include <asm/mach-au1x00/au1xxx_ide.h>
  41. #define DRV_NAME "au1200-ide"
  42. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  43. /* enable the burstmode in the dbdma */
  44. #define IDE_AU1XXX_BURSTMODE 1
  45. static _auide_hwif auide_hwif;
  46. static int auide_ddma_init(_auide_hwif *auide);
  47. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  48. void auide_insw(unsigned long port, void *addr, u32 count)
  49. {
  50. _auide_hwif *ahwif = &auide_hwif;
  51. chan_tab_t *ctp;
  52. au1x_ddma_desc_t *dp;
  53. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  54. DDMA_FLAGS_NOIE)) {
  55. printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
  56. return;
  57. }
  58. ctp = *((chan_tab_t **)ahwif->rx_chan);
  59. dp = ctp->cur_ptr;
  60. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  61. ;
  62. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  63. }
  64. void auide_outsw(unsigned long port, void *addr, u32 count)
  65. {
  66. _auide_hwif *ahwif = &auide_hwif;
  67. chan_tab_t *ctp;
  68. au1x_ddma_desc_t *dp;
  69. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  70. count << 1, DDMA_FLAGS_NOIE)) {
  71. printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
  72. return;
  73. }
  74. ctp = *((chan_tab_t **)ahwif->tx_chan);
  75. dp = ctp->cur_ptr;
  76. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  77. ;
  78. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  79. }
  80. #endif
  81. static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  82. {
  83. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  84. /* set pio mode! */
  85. switch(pio) {
  86. case 0:
  87. mem_sttime = SBC_IDE_TIMING(PIO0);
  88. /* set configuration for RCS2# */
  89. mem_stcfg |= TS_MASK;
  90. mem_stcfg &= ~TCSOE_MASK;
  91. mem_stcfg &= ~TOECS_MASK;
  92. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  93. break;
  94. case 1:
  95. mem_sttime = SBC_IDE_TIMING(PIO1);
  96. /* set configuration for RCS2# */
  97. mem_stcfg |= TS_MASK;
  98. mem_stcfg &= ~TCSOE_MASK;
  99. mem_stcfg &= ~TOECS_MASK;
  100. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  101. break;
  102. case 2:
  103. mem_sttime = SBC_IDE_TIMING(PIO2);
  104. /* set configuration for RCS2# */
  105. mem_stcfg &= ~TS_MASK;
  106. mem_stcfg &= ~TCSOE_MASK;
  107. mem_stcfg &= ~TOECS_MASK;
  108. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  109. break;
  110. case 3:
  111. mem_sttime = SBC_IDE_TIMING(PIO3);
  112. /* set configuration for RCS2# */
  113. mem_stcfg &= ~TS_MASK;
  114. mem_stcfg &= ~TCSOE_MASK;
  115. mem_stcfg &= ~TOECS_MASK;
  116. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  117. break;
  118. case 4:
  119. mem_sttime = SBC_IDE_TIMING(PIO4);
  120. /* set configuration for RCS2# */
  121. mem_stcfg &= ~TS_MASK;
  122. mem_stcfg &= ~TCSOE_MASK;
  123. mem_stcfg &= ~TOECS_MASK;
  124. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  125. break;
  126. }
  127. au_writel(mem_sttime,MEM_STTIME2);
  128. au_writel(mem_stcfg,MEM_STCFG2);
  129. }
  130. static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
  131. {
  132. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  133. switch(speed) {
  134. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  135. case XFER_MW_DMA_2:
  136. mem_sttime = SBC_IDE_TIMING(MDMA2);
  137. /* set configuration for RCS2# */
  138. mem_stcfg &= ~TS_MASK;
  139. mem_stcfg &= ~TCSOE_MASK;
  140. mem_stcfg &= ~TOECS_MASK;
  141. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  142. break;
  143. case XFER_MW_DMA_1:
  144. mem_sttime = SBC_IDE_TIMING(MDMA1);
  145. /* set configuration for RCS2# */
  146. mem_stcfg &= ~TS_MASK;
  147. mem_stcfg &= ~TCSOE_MASK;
  148. mem_stcfg &= ~TOECS_MASK;
  149. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  150. break;
  151. case XFER_MW_DMA_0:
  152. mem_sttime = SBC_IDE_TIMING(MDMA0);
  153. /* set configuration for RCS2# */
  154. mem_stcfg |= TS_MASK;
  155. mem_stcfg &= ~TCSOE_MASK;
  156. mem_stcfg &= ~TOECS_MASK;
  157. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  158. break;
  159. #endif
  160. }
  161. au_writel(mem_sttime,MEM_STTIME2);
  162. au_writel(mem_stcfg,MEM_STCFG2);
  163. }
  164. /*
  165. * Multi-Word DMA + DbDMA functions
  166. */
  167. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  168. static int auide_build_dmatable(ide_drive_t *drive)
  169. {
  170. int i, iswrite, count = 0;
  171. ide_hwif_t *hwif = HWIF(drive);
  172. struct request *rq = HWGROUP(drive)->rq;
  173. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  174. struct scatterlist *sg;
  175. iswrite = (rq_data_dir(rq) == WRITE);
  176. /* Save for interrupt context */
  177. ahwif->drive = drive;
  178. hwif->sg_nents = i = ide_build_sglist(drive, rq);
  179. if (!i)
  180. return 0;
  181. /* fill the descriptors */
  182. sg = hwif->sg_table;
  183. while (i && sg_dma_len(sg)) {
  184. u32 cur_addr;
  185. u32 cur_len;
  186. cur_addr = sg_dma_address(sg);
  187. cur_len = sg_dma_len(sg);
  188. while (cur_len) {
  189. u32 flags = DDMA_FLAGS_NOIE;
  190. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  191. if (++count >= PRD_ENTRIES) {
  192. printk(KERN_WARNING "%s: DMA table too small\n",
  193. drive->name);
  194. goto use_pio_instead;
  195. }
  196. /* Lets enable intr for the last descriptor only */
  197. if (1==i)
  198. flags = DDMA_FLAGS_IE;
  199. else
  200. flags = DDMA_FLAGS_NOIE;
  201. if (iswrite) {
  202. if(!put_source_flags(ahwif->tx_chan,
  203. (void*) sg_virt(sg),
  204. tc, flags)) {
  205. printk(KERN_ERR "%s failed %d\n",
  206. __func__, __LINE__);
  207. }
  208. } else
  209. {
  210. if(!put_dest_flags(ahwif->rx_chan,
  211. (void*) sg_virt(sg),
  212. tc, flags)) {
  213. printk(KERN_ERR "%s failed %d\n",
  214. __func__, __LINE__);
  215. }
  216. }
  217. cur_addr += tc;
  218. cur_len -= tc;
  219. }
  220. sg = sg_next(sg);
  221. i--;
  222. }
  223. if (count)
  224. return 1;
  225. use_pio_instead:
  226. ide_destroy_dmatable(drive);
  227. return 0; /* revert to PIO for this request */
  228. }
  229. static int auide_dma_end(ide_drive_t *drive)
  230. {
  231. ide_hwif_t *hwif = HWIF(drive);
  232. if (hwif->sg_nents) {
  233. ide_destroy_dmatable(drive);
  234. hwif->sg_nents = 0;
  235. }
  236. return 0;
  237. }
  238. static void auide_dma_start(ide_drive_t *drive )
  239. {
  240. }
  241. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  242. {
  243. /* issue cmd to drive */
  244. ide_execute_command(drive, command, &ide_dma_intr,
  245. (2*WAIT_CMD), NULL);
  246. }
  247. static int auide_dma_setup(ide_drive_t *drive)
  248. {
  249. struct request *rq = HWGROUP(drive)->rq;
  250. if (!auide_build_dmatable(drive)) {
  251. ide_map_sg(drive, rq);
  252. return 1;
  253. }
  254. drive->waiting_for_dma = 1;
  255. return 0;
  256. }
  257. static int auide_dma_test_irq(ide_drive_t *drive)
  258. {
  259. if (drive->waiting_for_dma == 0)
  260. printk(KERN_WARNING "%s: ide_dma_test_irq \
  261. called while not waiting\n", drive->name);
  262. /* If dbdma didn't execute the STOP command yet, the
  263. * active bit is still set
  264. */
  265. drive->waiting_for_dma++;
  266. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  267. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  268. complete\n", drive->name);
  269. return 1;
  270. }
  271. udelay(10);
  272. return 0;
  273. }
  274. static void auide_dma_host_set(ide_drive_t *drive, int on)
  275. {
  276. }
  277. static void auide_dma_lost_irq(ide_drive_t *drive)
  278. {
  279. printk(KERN_ERR "%s: IRQ lost\n", drive->name);
  280. }
  281. static void auide_ddma_tx_callback(int irq, void *param)
  282. {
  283. _auide_hwif *ahwif = (_auide_hwif*)param;
  284. ahwif->drive->waiting_for_dma = 0;
  285. }
  286. static void auide_ddma_rx_callback(int irq, void *param)
  287. {
  288. _auide_hwif *ahwif = (_auide_hwif*)param;
  289. ahwif->drive->waiting_for_dma = 0;
  290. }
  291. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  292. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  293. {
  294. dev->dev_id = dev_id;
  295. dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
  296. dev->dev_intlevel = 0;
  297. dev->dev_intpolarity = 0;
  298. dev->dev_tsize = tsize;
  299. dev->dev_devwidth = devwidth;
  300. dev->dev_flags = flags;
  301. }
  302. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  303. static void auide_dma_timeout(ide_drive_t *drive)
  304. {
  305. ide_hwif_t *hwif = HWIF(drive);
  306. printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
  307. if (auide_dma_test_irq(drive))
  308. return;
  309. auide_dma_end(drive);
  310. }
  311. static const struct ide_dma_ops au1xxx_dma_ops = {
  312. .dma_host_set = auide_dma_host_set,
  313. .dma_setup = auide_dma_setup,
  314. .dma_exec_cmd = auide_dma_exec_cmd,
  315. .dma_start = auide_dma_start,
  316. .dma_end = auide_dma_end,
  317. .dma_test_irq = auide_dma_test_irq,
  318. .dma_lost_irq = auide_dma_lost_irq,
  319. .dma_timeout = auide_dma_timeout,
  320. };
  321. static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
  322. {
  323. _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
  324. dbdev_tab_t source_dev_tab, target_dev_tab;
  325. u32 dev_id, tsize, devwidth, flags;
  326. dev_id = AU1XXX_ATA_DDMA_REQ;
  327. tsize = 8; /* 1 */
  328. devwidth = 32; /* 16 */
  329. #ifdef IDE_AU1XXX_BURSTMODE
  330. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  331. #else
  332. flags = DEV_FLAGS_SYNC;
  333. #endif
  334. /* setup dev_tab for tx channel */
  335. auide_init_dbdma_dev( &source_dev_tab,
  336. dev_id,
  337. tsize, devwidth, DEV_FLAGS_OUT | flags);
  338. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  339. auide_init_dbdma_dev( &source_dev_tab,
  340. dev_id,
  341. tsize, devwidth, DEV_FLAGS_IN | flags);
  342. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  343. /* We also need to add a target device for the DMA */
  344. auide_init_dbdma_dev( &target_dev_tab,
  345. (u32)DSCR_CMD0_ALWAYS,
  346. tsize, devwidth, DEV_FLAGS_ANYUSE);
  347. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  348. /* Get a channel for TX */
  349. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  350. auide->tx_dev_id,
  351. auide_ddma_tx_callback,
  352. (void*)auide);
  353. /* Get a channel for RX */
  354. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  355. auide->target_dev_id,
  356. auide_ddma_rx_callback,
  357. (void*)auide);
  358. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  359. NUM_DESCRIPTORS);
  360. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  361. NUM_DESCRIPTORS);
  362. hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
  363. PRD_ENTRIES * PRD_BYTES, /* 1 Page */
  364. &hwif->dmatable_dma, GFP_KERNEL);
  365. au1xxx_dbdma_start( auide->tx_chan );
  366. au1xxx_dbdma_start( auide->rx_chan );
  367. return 0;
  368. }
  369. #else
  370. static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
  371. {
  372. _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
  373. dbdev_tab_t source_dev_tab;
  374. int flags;
  375. #ifdef IDE_AU1XXX_BURSTMODE
  376. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  377. #else
  378. flags = DEV_FLAGS_SYNC;
  379. #endif
  380. /* setup dev_tab for tx channel */
  381. auide_init_dbdma_dev( &source_dev_tab,
  382. (u32)DSCR_CMD0_ALWAYS,
  383. 8, 32, DEV_FLAGS_OUT | flags);
  384. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  385. auide_init_dbdma_dev( &source_dev_tab,
  386. (u32)DSCR_CMD0_ALWAYS,
  387. 8, 32, DEV_FLAGS_IN | flags);
  388. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  389. /* Get a channel for TX */
  390. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  391. auide->tx_dev_id,
  392. NULL,
  393. (void*)auide);
  394. /* Get a channel for RX */
  395. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  396. DSCR_CMD0_ALWAYS,
  397. NULL,
  398. (void*)auide);
  399. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  400. NUM_DESCRIPTORS);
  401. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  402. NUM_DESCRIPTORS);
  403. au1xxx_dbdma_start( auide->tx_chan );
  404. au1xxx_dbdma_start( auide->rx_chan );
  405. return 0;
  406. }
  407. #endif
  408. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  409. {
  410. int i;
  411. unsigned long *ata_regs = hw->io_ports;
  412. /* FIXME? */
  413. for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
  414. *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
  415. }
  416. /* set the Alternative Status register */
  417. *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
  418. }
  419. static const struct ide_port_ops au1xxx_port_ops = {
  420. .set_pio_mode = au1xxx_set_pio_mode,
  421. .set_dma_mode = auide_set_dma_mode,
  422. };
  423. static const struct ide_port_info au1xxx_port_info = {
  424. .init_dma = auide_ddma_init,
  425. .port_ops = &au1xxx_port_ops,
  426. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  427. .dma_ops = &au1xxx_dma_ops,
  428. #endif
  429. .host_flags = IDE_HFLAG_POST_SET_MODE |
  430. IDE_HFLAG_NO_IO_32BIT |
  431. IDE_HFLAG_UNMASK_IRQS,
  432. .pio_mask = ATA_PIO4,
  433. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  434. .mwdma_mask = ATA_MWDMA2,
  435. #endif
  436. };
  437. static int au_ide_probe(struct device *dev)
  438. {
  439. struct platform_device *pdev = to_platform_device(dev);
  440. _auide_hwif *ahwif = &auide_hwif;
  441. ide_hwif_t *hwif;
  442. struct resource *res;
  443. int ret = 0;
  444. u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  445. hw_regs_t hw;
  446. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  447. char *mode = "MWDMA2";
  448. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  449. char *mode = "PIO+DDMA(offload)";
  450. #endif
  451. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  452. ahwif->irq = platform_get_irq(pdev, 0);
  453. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  454. if (res == NULL) {
  455. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  456. ret = -ENODEV;
  457. goto out;
  458. }
  459. if (ahwif->irq < 0) {
  460. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  461. ret = -ENODEV;
  462. goto out;
  463. }
  464. if (!request_mem_region(res->start, res->end - res->start + 1,
  465. pdev->name)) {
  466. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  467. ret = -EBUSY;
  468. goto out;
  469. }
  470. ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
  471. if (ahwif->regbase == 0) {
  472. ret = -ENOMEM;
  473. goto out;
  474. }
  475. hwif = ide_find_port();
  476. if (hwif == NULL) {
  477. ret = -ENOENT;
  478. goto out;
  479. }
  480. memset(&hw, 0, sizeof(hw));
  481. auide_setup_ports(&hw, ahwif);
  482. hw.irq = ahwif->irq;
  483. hw.dev = dev;
  484. hw.chipset = ide_au1xxx;
  485. ide_init_port_hw(hwif, &hw);
  486. hwif->dev = dev;
  487. /* If the user has selected DDMA assisted copies,
  488. then set up a few local I/O function entry points
  489. */
  490. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  491. hwif->INSW = auide_insw;
  492. hwif->OUTSW = auide_outsw;
  493. #endif
  494. hwif->select_data = 0; /* no chipset-specific code */
  495. hwif->config_data = 0; /* no chipset-specific code */
  496. auide_hwif.hwif = hwif;
  497. hwif->hwif_data = &auide_hwif;
  498. idx[0] = hwif->index;
  499. ide_device_add(idx, &au1xxx_port_info);
  500. dev_set_drvdata(dev, hwif);
  501. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  502. out:
  503. return ret;
  504. }
  505. static int au_ide_remove(struct device *dev)
  506. {
  507. struct platform_device *pdev = to_platform_device(dev);
  508. struct resource *res;
  509. ide_hwif_t *hwif = dev_get_drvdata(dev);
  510. _auide_hwif *ahwif = &auide_hwif;
  511. ide_unregister(hwif->index);
  512. iounmap((void *)ahwif->regbase);
  513. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  514. release_mem_region(res->start, res->end - res->start + 1);
  515. return 0;
  516. }
  517. static struct device_driver au1200_ide_driver = {
  518. .name = "au1200-ide",
  519. .bus = &platform_bus_type,
  520. .probe = au_ide_probe,
  521. .remove = au_ide_remove,
  522. };
  523. static int __init au_ide_init(void)
  524. {
  525. return driver_register(&au1200_ide_driver);
  526. }
  527. static void __exit au_ide_exit(void)
  528. {
  529. driver_unregister(&au1200_ide_driver);
  530. }
  531. MODULE_LICENSE("GPL");
  532. MODULE_DESCRIPTION("AU1200 IDE driver");
  533. module_init(au_ide_init);
  534. module_exit(au_ide_exit);