au1xxx-ide.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. /*
  2. * BRIEF MODULE DESCRIPTION
  3. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  4. *
  5. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  6. *
  7. * This program is free software; you can redistribute it and/or modify it under
  8. * the terms of the GNU General Public License as published by the Free Software
  9. * Foundation; either version 2 of the License, or (at your option) any later
  10. * version.
  11. *
  12. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  13. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  14. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  15. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  16. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  17. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  18. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  19. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  20. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  21. * POSSIBILITY OF SUCH DAMAGE.
  22. *
  23. * You should have received a copy of the GNU General Public License along with
  24. * this program; if not, write to the Free Software Foundation, Inc.,
  25. * 675 Mass Ave, Cambridge, MA 02139, USA.
  26. *
  27. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  28. * Interface and Linux Device Driver" Application Note.
  29. */
  30. #include <linux/types.h>
  31. #include <linux/module.h>
  32. #include <linux/kernel.h>
  33. #include <linux/delay.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/init.h>
  36. #include <linux/ide.h>
  37. #include <linux/scatterlist.h>
  38. #include <asm/mach-au1x00/au1xxx.h>
  39. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  40. #include <asm/mach-au1x00/au1xxx_ide.h>
  41. #define DRV_NAME "au1200-ide"
  42. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  43. /* enable the burstmode in the dbdma */
  44. #define IDE_AU1XXX_BURSTMODE 1
  45. static _auide_hwif auide_hwif;
  46. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  47. void auide_insw(unsigned long port, void *addr, u32 count)
  48. {
  49. _auide_hwif *ahwif = &auide_hwif;
  50. chan_tab_t *ctp;
  51. au1x_ddma_desc_t *dp;
  52. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  53. DDMA_FLAGS_NOIE)) {
  54. printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
  55. return;
  56. }
  57. ctp = *((chan_tab_t **)ahwif->rx_chan);
  58. dp = ctp->cur_ptr;
  59. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  60. ;
  61. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  62. }
  63. void auide_outsw(unsigned long port, void *addr, u32 count)
  64. {
  65. _auide_hwif *ahwif = &auide_hwif;
  66. chan_tab_t *ctp;
  67. au1x_ddma_desc_t *dp;
  68. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  69. count << 1, DDMA_FLAGS_NOIE)) {
  70. printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
  71. return;
  72. }
  73. ctp = *((chan_tab_t **)ahwif->tx_chan);
  74. dp = ctp->cur_ptr;
  75. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  76. ;
  77. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  78. }
  79. static void au1xxx_input_data(ide_drive_t *drive, struct request *rq,
  80. void *buf, unsigned int len)
  81. {
  82. auide_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
  83. }
  84. static void au1xxx_output_data(ide_drive_t *drive, struct request *rq,
  85. void *buf, unsigned int len)
  86. {
  87. auide_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
  88. }
  89. #endif
  90. static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  91. {
  92. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  93. /* set pio mode! */
  94. switch(pio) {
  95. case 0:
  96. mem_sttime = SBC_IDE_TIMING(PIO0);
  97. /* set configuration for RCS2# */
  98. mem_stcfg |= TS_MASK;
  99. mem_stcfg &= ~TCSOE_MASK;
  100. mem_stcfg &= ~TOECS_MASK;
  101. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  102. break;
  103. case 1:
  104. mem_sttime = SBC_IDE_TIMING(PIO1);
  105. /* set configuration for RCS2# */
  106. mem_stcfg |= TS_MASK;
  107. mem_stcfg &= ~TCSOE_MASK;
  108. mem_stcfg &= ~TOECS_MASK;
  109. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  110. break;
  111. case 2:
  112. mem_sttime = SBC_IDE_TIMING(PIO2);
  113. /* set configuration for RCS2# */
  114. mem_stcfg &= ~TS_MASK;
  115. mem_stcfg &= ~TCSOE_MASK;
  116. mem_stcfg &= ~TOECS_MASK;
  117. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  118. break;
  119. case 3:
  120. mem_sttime = SBC_IDE_TIMING(PIO3);
  121. /* set configuration for RCS2# */
  122. mem_stcfg &= ~TS_MASK;
  123. mem_stcfg &= ~TCSOE_MASK;
  124. mem_stcfg &= ~TOECS_MASK;
  125. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  126. break;
  127. case 4:
  128. mem_sttime = SBC_IDE_TIMING(PIO4);
  129. /* set configuration for RCS2# */
  130. mem_stcfg &= ~TS_MASK;
  131. mem_stcfg &= ~TCSOE_MASK;
  132. mem_stcfg &= ~TOECS_MASK;
  133. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  134. break;
  135. }
  136. au_writel(mem_sttime,MEM_STTIME2);
  137. au_writel(mem_stcfg,MEM_STCFG2);
  138. }
  139. static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
  140. {
  141. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  142. switch(speed) {
  143. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  144. case XFER_MW_DMA_2:
  145. mem_sttime = SBC_IDE_TIMING(MDMA2);
  146. /* set configuration for RCS2# */
  147. mem_stcfg &= ~TS_MASK;
  148. mem_stcfg &= ~TCSOE_MASK;
  149. mem_stcfg &= ~TOECS_MASK;
  150. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  151. break;
  152. case XFER_MW_DMA_1:
  153. mem_sttime = SBC_IDE_TIMING(MDMA1);
  154. /* set configuration for RCS2# */
  155. mem_stcfg &= ~TS_MASK;
  156. mem_stcfg &= ~TCSOE_MASK;
  157. mem_stcfg &= ~TOECS_MASK;
  158. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  159. break;
  160. case XFER_MW_DMA_0:
  161. mem_sttime = SBC_IDE_TIMING(MDMA0);
  162. /* set configuration for RCS2# */
  163. mem_stcfg |= TS_MASK;
  164. mem_stcfg &= ~TCSOE_MASK;
  165. mem_stcfg &= ~TOECS_MASK;
  166. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  167. break;
  168. #endif
  169. }
  170. au_writel(mem_sttime,MEM_STTIME2);
  171. au_writel(mem_stcfg,MEM_STCFG2);
  172. }
  173. /*
  174. * Multi-Word DMA + DbDMA functions
  175. */
  176. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  177. static int auide_build_dmatable(ide_drive_t *drive)
  178. {
  179. int i, iswrite, count = 0;
  180. ide_hwif_t *hwif = HWIF(drive);
  181. struct request *rq = HWGROUP(drive)->rq;
  182. _auide_hwif *ahwif = &auide_hwif;
  183. struct scatterlist *sg;
  184. iswrite = (rq_data_dir(rq) == WRITE);
  185. /* Save for interrupt context */
  186. ahwif->drive = drive;
  187. hwif->sg_nents = i = ide_build_sglist(drive, rq);
  188. if (!i)
  189. return 0;
  190. /* fill the descriptors */
  191. sg = hwif->sg_table;
  192. while (i && sg_dma_len(sg)) {
  193. u32 cur_addr;
  194. u32 cur_len;
  195. cur_addr = sg_dma_address(sg);
  196. cur_len = sg_dma_len(sg);
  197. while (cur_len) {
  198. u32 flags = DDMA_FLAGS_NOIE;
  199. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  200. if (++count >= PRD_ENTRIES) {
  201. printk(KERN_WARNING "%s: DMA table too small\n",
  202. drive->name);
  203. goto use_pio_instead;
  204. }
  205. /* Lets enable intr for the last descriptor only */
  206. if (1==i)
  207. flags = DDMA_FLAGS_IE;
  208. else
  209. flags = DDMA_FLAGS_NOIE;
  210. if (iswrite) {
  211. if(!put_source_flags(ahwif->tx_chan,
  212. (void*) sg_virt(sg),
  213. tc, flags)) {
  214. printk(KERN_ERR "%s failed %d\n",
  215. __func__, __LINE__);
  216. }
  217. } else
  218. {
  219. if(!put_dest_flags(ahwif->rx_chan,
  220. (void*) sg_virt(sg),
  221. tc, flags)) {
  222. printk(KERN_ERR "%s failed %d\n",
  223. __func__, __LINE__);
  224. }
  225. }
  226. cur_addr += tc;
  227. cur_len -= tc;
  228. }
  229. sg = sg_next(sg);
  230. i--;
  231. }
  232. if (count)
  233. return 1;
  234. use_pio_instead:
  235. ide_destroy_dmatable(drive);
  236. return 0; /* revert to PIO for this request */
  237. }
  238. static int auide_dma_end(ide_drive_t *drive)
  239. {
  240. ide_hwif_t *hwif = HWIF(drive);
  241. if (hwif->sg_nents) {
  242. ide_destroy_dmatable(drive);
  243. hwif->sg_nents = 0;
  244. }
  245. return 0;
  246. }
  247. static void auide_dma_start(ide_drive_t *drive )
  248. {
  249. }
  250. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  251. {
  252. /* issue cmd to drive */
  253. ide_execute_command(drive, command, &ide_dma_intr,
  254. (2*WAIT_CMD), NULL);
  255. }
  256. static int auide_dma_setup(ide_drive_t *drive)
  257. {
  258. struct request *rq = HWGROUP(drive)->rq;
  259. if (!auide_build_dmatable(drive)) {
  260. ide_map_sg(drive, rq);
  261. return 1;
  262. }
  263. drive->waiting_for_dma = 1;
  264. return 0;
  265. }
  266. static int auide_dma_test_irq(ide_drive_t *drive)
  267. {
  268. /* If dbdma didn't execute the STOP command yet, the
  269. * active bit is still set
  270. */
  271. drive->waiting_for_dma++;
  272. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  273. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  274. complete\n", drive->name);
  275. return 1;
  276. }
  277. udelay(10);
  278. return 0;
  279. }
  280. static void auide_dma_host_set(ide_drive_t *drive, int on)
  281. {
  282. }
  283. static void auide_ddma_tx_callback(int irq, void *param)
  284. {
  285. _auide_hwif *ahwif = (_auide_hwif*)param;
  286. ahwif->drive->waiting_for_dma = 0;
  287. }
  288. static void auide_ddma_rx_callback(int irq, void *param)
  289. {
  290. _auide_hwif *ahwif = (_auide_hwif*)param;
  291. ahwif->drive->waiting_for_dma = 0;
  292. }
  293. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  294. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  295. {
  296. dev->dev_id = dev_id;
  297. dev->dev_physaddr = (u32)IDE_PHYS_ADDR;
  298. dev->dev_intlevel = 0;
  299. dev->dev_intpolarity = 0;
  300. dev->dev_tsize = tsize;
  301. dev->dev_devwidth = devwidth;
  302. dev->dev_flags = flags;
  303. }
  304. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  305. static const struct ide_dma_ops au1xxx_dma_ops = {
  306. .dma_host_set = auide_dma_host_set,
  307. .dma_setup = auide_dma_setup,
  308. .dma_exec_cmd = auide_dma_exec_cmd,
  309. .dma_start = auide_dma_start,
  310. .dma_end = auide_dma_end,
  311. .dma_test_irq = auide_dma_test_irq,
  312. .dma_lost_irq = ide_dma_lost_irq,
  313. .dma_timeout = ide_dma_timeout,
  314. };
  315. static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
  316. {
  317. _auide_hwif *auide = &auide_hwif;
  318. dbdev_tab_t source_dev_tab, target_dev_tab;
  319. u32 dev_id, tsize, devwidth, flags;
  320. dev_id = IDE_DDMA_REQ;
  321. tsize = 8; /* 1 */
  322. devwidth = 32; /* 16 */
  323. #ifdef IDE_AU1XXX_BURSTMODE
  324. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  325. #else
  326. flags = DEV_FLAGS_SYNC;
  327. #endif
  328. /* setup dev_tab for tx channel */
  329. auide_init_dbdma_dev( &source_dev_tab,
  330. dev_id,
  331. tsize, devwidth, DEV_FLAGS_OUT | flags);
  332. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  333. auide_init_dbdma_dev( &source_dev_tab,
  334. dev_id,
  335. tsize, devwidth, DEV_FLAGS_IN | flags);
  336. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  337. /* We also need to add a target device for the DMA */
  338. auide_init_dbdma_dev( &target_dev_tab,
  339. (u32)DSCR_CMD0_ALWAYS,
  340. tsize, devwidth, DEV_FLAGS_ANYUSE);
  341. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  342. /* Get a channel for TX */
  343. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  344. auide->tx_dev_id,
  345. auide_ddma_tx_callback,
  346. (void*)auide);
  347. /* Get a channel for RX */
  348. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  349. auide->target_dev_id,
  350. auide_ddma_rx_callback,
  351. (void*)auide);
  352. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  353. NUM_DESCRIPTORS);
  354. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  355. NUM_DESCRIPTORS);
  356. /* FIXME: check return value */
  357. (void)ide_allocate_dma_engine(hwif);
  358. au1xxx_dbdma_start( auide->tx_chan );
  359. au1xxx_dbdma_start( auide->rx_chan );
  360. return 0;
  361. }
  362. #else
  363. static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
  364. {
  365. _auide_hwif *auide = &auide_hwif;
  366. dbdev_tab_t source_dev_tab;
  367. int flags;
  368. #ifdef IDE_AU1XXX_BURSTMODE
  369. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  370. #else
  371. flags = DEV_FLAGS_SYNC;
  372. #endif
  373. /* setup dev_tab for tx channel */
  374. auide_init_dbdma_dev( &source_dev_tab,
  375. (u32)DSCR_CMD0_ALWAYS,
  376. 8, 32, DEV_FLAGS_OUT | flags);
  377. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  378. auide_init_dbdma_dev( &source_dev_tab,
  379. (u32)DSCR_CMD0_ALWAYS,
  380. 8, 32, DEV_FLAGS_IN | flags);
  381. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  382. /* Get a channel for TX */
  383. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  384. auide->tx_dev_id,
  385. NULL,
  386. (void*)auide);
  387. /* Get a channel for RX */
  388. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  389. DSCR_CMD0_ALWAYS,
  390. NULL,
  391. (void*)auide);
  392. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  393. NUM_DESCRIPTORS);
  394. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  395. NUM_DESCRIPTORS);
  396. au1xxx_dbdma_start( auide->tx_chan );
  397. au1xxx_dbdma_start( auide->rx_chan );
  398. return 0;
  399. }
  400. #endif
  401. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  402. {
  403. int i;
  404. unsigned long *ata_regs = hw->io_ports_array;
  405. /* FIXME? */
  406. for (i = 0; i < 8; i++)
  407. *ata_regs++ = ahwif->regbase + (i << IDE_REG_SHIFT);
  408. /* set the Alternative Status register */
  409. *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
  410. }
  411. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  412. static const struct ide_tp_ops au1xxx_tp_ops = {
  413. .exec_command = ide_exec_command,
  414. .read_status = ide_read_status,
  415. .read_altstatus = ide_read_altstatus,
  416. .read_sff_dma_status = ide_read_sff_dma_status,
  417. .set_irq = ide_set_irq,
  418. .tf_load = ide_tf_load,
  419. .tf_read = ide_tf_read,
  420. .input_data = au1xxx_input_data,
  421. .output_data = au1xxx_output_data,
  422. };
  423. #endif
  424. static const struct ide_port_ops au1xxx_port_ops = {
  425. .set_pio_mode = au1xxx_set_pio_mode,
  426. .set_dma_mode = auide_set_dma_mode,
  427. };
  428. static const struct ide_port_info au1xxx_port_info = {
  429. .init_dma = auide_ddma_init,
  430. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  431. .tp_ops = &au1xxx_tp_ops,
  432. #endif
  433. .port_ops = &au1xxx_port_ops,
  434. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  435. .dma_ops = &au1xxx_dma_ops,
  436. #endif
  437. .host_flags = IDE_HFLAG_POST_SET_MODE |
  438. IDE_HFLAG_NO_IO_32BIT |
  439. IDE_HFLAG_UNMASK_IRQS,
  440. .pio_mask = ATA_PIO4,
  441. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  442. .mwdma_mask = ATA_MWDMA2,
  443. #endif
  444. };
  445. static int au_ide_probe(struct device *dev)
  446. {
  447. struct platform_device *pdev = to_platform_device(dev);
  448. _auide_hwif *ahwif = &auide_hwif;
  449. struct resource *res;
  450. struct ide_host *host;
  451. int ret = 0;
  452. hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
  453. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  454. char *mode = "MWDMA2";
  455. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  456. char *mode = "PIO+DDMA(offload)";
  457. #endif
  458. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  459. ahwif->irq = platform_get_irq(pdev, 0);
  460. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  461. if (res == NULL) {
  462. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  463. ret = -ENODEV;
  464. goto out;
  465. }
  466. if (ahwif->irq < 0) {
  467. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  468. ret = -ENODEV;
  469. goto out;
  470. }
  471. if (!request_mem_region(res->start, res->end - res->start + 1,
  472. pdev->name)) {
  473. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  474. ret = -EBUSY;
  475. goto out;
  476. }
  477. ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
  478. if (ahwif->regbase == 0) {
  479. ret = -ENOMEM;
  480. goto out;
  481. }
  482. memset(&hw, 0, sizeof(hw));
  483. auide_setup_ports(&hw, ahwif);
  484. hw.irq = ahwif->irq;
  485. hw.dev = dev;
  486. hw.chipset = ide_au1xxx;
  487. ret = ide_host_add(&au1xxx_port_info, hws, &host);
  488. if (ret)
  489. goto out;
  490. auide_hwif.hwif = host->ports[0];
  491. dev_set_drvdata(dev, host);
  492. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  493. out:
  494. return ret;
  495. }
  496. static int au_ide_remove(struct device *dev)
  497. {
  498. struct platform_device *pdev = to_platform_device(dev);
  499. struct resource *res;
  500. struct ide_host *host = dev_get_drvdata(dev);
  501. _auide_hwif *ahwif = &auide_hwif;
  502. ide_host_remove(host);
  503. iounmap((void *)ahwif->regbase);
  504. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  505. release_mem_region(res->start, res->end - res->start + 1);
  506. return 0;
  507. }
  508. static struct device_driver au1200_ide_driver = {
  509. .name = "au1200-ide",
  510. .bus = &platform_bus_type,
  511. .probe = au_ide_probe,
  512. .remove = au_ide_remove,
  513. };
  514. static int __init au_ide_init(void)
  515. {
  516. return driver_register(&au1200_ide_driver);
  517. }
  518. static void __exit au_ide_exit(void)
  519. {
  520. driver_unregister(&au1200_ide_driver);
  521. }
  522. MODULE_LICENSE("GPL");
  523. MODULE_DESCRIPTION("AU1200 IDE driver");
  524. module_init(au_ide_init);
  525. module_exit(au_ide_exit);