au1xxx-ide.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * BRIEF MODULE DESCRIPTION
  3. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  4. *
  5. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  6. *
  7. * This program is free software; you can redistribute it and/or modify it under
  8. * the terms of the GNU General Public License as published by the Free Software
  9. * Foundation; either version 2 of the License, or (at your option) any later
  10. * version.
  11. *
  12. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  13. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  14. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  15. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  16. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  17. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  18. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  19. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  20. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  21. * POSSIBILITY OF SUCH DAMAGE.
  22. *
  23. * You should have received a copy of the GNU General Public License along with
  24. * this program; if not, write to the Free Software Foundation, Inc.,
  25. * 675 Mass Ave, Cambridge, MA 02139, USA.
  26. *
  27. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  28. * Interface and Linux Device Driver" Application Note.
  29. */
  30. #include <linux/types.h>
  31. #include <linux/module.h>
  32. #include <linux/kernel.h>
  33. #include <linux/delay.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/init.h>
  36. #include <linux/ide.h>
  37. #include <linux/sysdev.h>
  38. #include <linux/dma-mapping.h>
  39. #include "ide-timing.h"
  40. #include <asm/io.h>
  41. #include <asm/mach-au1x00/au1xxx.h>
  42. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  43. #include <asm/mach-au1x00/au1xxx_ide.h>
  44. #define DRV_NAME "au1200-ide"
  45. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  46. /* enable the burstmode in the dbdma */
  47. #define IDE_AU1XXX_BURSTMODE 1
  48. static _auide_hwif auide_hwif;
  49. static int dbdma_init_done;
  50. static int auide_ddma_init(_auide_hwif *auide);
  51. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  52. void auide_insw(unsigned long port, void *addr, u32 count)
  53. {
  54. _auide_hwif *ahwif = &auide_hwif;
  55. chan_tab_t *ctp;
  56. au1x_ddma_desc_t *dp;
  57. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  58. DDMA_FLAGS_NOIE)) {
  59. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  60. return;
  61. }
  62. ctp = *((chan_tab_t **)ahwif->rx_chan);
  63. dp = ctp->cur_ptr;
  64. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  65. ;
  66. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  67. }
  68. void auide_outsw(unsigned long port, void *addr, u32 count)
  69. {
  70. _auide_hwif *ahwif = &auide_hwif;
  71. chan_tab_t *ctp;
  72. au1x_ddma_desc_t *dp;
  73. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  74. count << 1, DDMA_FLAGS_NOIE)) {
  75. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  76. return;
  77. }
  78. ctp = *((chan_tab_t **)ahwif->tx_chan);
  79. dp = ctp->cur_ptr;
  80. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  81. ;
  82. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  83. }
  84. #endif
  85. static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  86. {
  87. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  88. /* set pio mode! */
  89. switch(pio) {
  90. case 0:
  91. mem_sttime = SBC_IDE_TIMING(PIO0);
  92. /* set configuration for RCS2# */
  93. mem_stcfg |= TS_MASK;
  94. mem_stcfg &= ~TCSOE_MASK;
  95. mem_stcfg &= ~TOECS_MASK;
  96. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  97. break;
  98. case 1:
  99. mem_sttime = SBC_IDE_TIMING(PIO1);
  100. /* set configuration for RCS2# */
  101. mem_stcfg |= TS_MASK;
  102. mem_stcfg &= ~TCSOE_MASK;
  103. mem_stcfg &= ~TOECS_MASK;
  104. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  105. break;
  106. case 2:
  107. mem_sttime = SBC_IDE_TIMING(PIO2);
  108. /* set configuration for RCS2# */
  109. mem_stcfg &= ~TS_MASK;
  110. mem_stcfg &= ~TCSOE_MASK;
  111. mem_stcfg &= ~TOECS_MASK;
  112. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  113. break;
  114. case 3:
  115. mem_sttime = SBC_IDE_TIMING(PIO3);
  116. /* set configuration for RCS2# */
  117. mem_stcfg &= ~TS_MASK;
  118. mem_stcfg &= ~TCSOE_MASK;
  119. mem_stcfg &= ~TOECS_MASK;
  120. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  121. break;
  122. case 4:
  123. mem_sttime = SBC_IDE_TIMING(PIO4);
  124. /* set configuration for RCS2# */
  125. mem_stcfg &= ~TS_MASK;
  126. mem_stcfg &= ~TCSOE_MASK;
  127. mem_stcfg &= ~TOECS_MASK;
  128. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  129. break;
  130. }
  131. au_writel(mem_sttime,MEM_STTIME2);
  132. au_writel(mem_stcfg,MEM_STCFG2);
  133. }
  134. static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
  135. {
  136. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  137. switch(speed) {
  138. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  139. case XFER_MW_DMA_2:
  140. mem_sttime = SBC_IDE_TIMING(MDMA2);
  141. /* set configuration for RCS2# */
  142. mem_stcfg &= ~TS_MASK;
  143. mem_stcfg &= ~TCSOE_MASK;
  144. mem_stcfg &= ~TOECS_MASK;
  145. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  146. break;
  147. case XFER_MW_DMA_1:
  148. mem_sttime = SBC_IDE_TIMING(MDMA1);
  149. /* set configuration for RCS2# */
  150. mem_stcfg &= ~TS_MASK;
  151. mem_stcfg &= ~TCSOE_MASK;
  152. mem_stcfg &= ~TOECS_MASK;
  153. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  154. break;
  155. case XFER_MW_DMA_0:
  156. mem_sttime = SBC_IDE_TIMING(MDMA0);
  157. /* set configuration for RCS2# */
  158. mem_stcfg |= TS_MASK;
  159. mem_stcfg &= ~TCSOE_MASK;
  160. mem_stcfg &= ~TOECS_MASK;
  161. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  162. break;
  163. #endif
  164. }
  165. au_writel(mem_sttime,MEM_STTIME2);
  166. au_writel(mem_stcfg,MEM_STCFG2);
  167. }
  168. /*
  169. * Multi-Word DMA + DbDMA functions
  170. */
  171. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  172. static int auide_build_dmatable(ide_drive_t *drive)
  173. {
  174. int i, iswrite, count = 0;
  175. ide_hwif_t *hwif = HWIF(drive);
  176. struct request *rq = HWGROUP(drive)->rq;
  177. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  178. struct scatterlist *sg;
  179. iswrite = (rq_data_dir(rq) == WRITE);
  180. /* Save for interrupt context */
  181. ahwif->drive = drive;
  182. hwif->sg_nents = i = ide_build_sglist(drive, rq);
  183. if (!i)
  184. return 0;
  185. /* fill the descriptors */
  186. sg = hwif->sg_table;
  187. while (i && sg_dma_len(sg)) {
  188. u32 cur_addr;
  189. u32 cur_len;
  190. cur_addr = sg_dma_address(sg);
  191. cur_len = sg_dma_len(sg);
  192. while (cur_len) {
  193. u32 flags = DDMA_FLAGS_NOIE;
  194. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  195. if (++count >= PRD_ENTRIES) {
  196. printk(KERN_WARNING "%s: DMA table too small\n",
  197. drive->name);
  198. goto use_pio_instead;
  199. }
  200. /* Lets enable intr for the last descriptor only */
  201. if (1==i)
  202. flags = DDMA_FLAGS_IE;
  203. else
  204. flags = DDMA_FLAGS_NOIE;
  205. if (iswrite) {
  206. if(!put_source_flags(ahwif->tx_chan,
  207. (void*) sg_virt(sg),
  208. tc, flags)) {
  209. printk(KERN_ERR "%s failed %d\n",
  210. __FUNCTION__, __LINE__);
  211. }
  212. } else
  213. {
  214. if(!put_dest_flags(ahwif->rx_chan,
  215. (void*) sg_virt(sg),
  216. tc, flags)) {
  217. printk(KERN_ERR "%s failed %d\n",
  218. __FUNCTION__, __LINE__);
  219. }
  220. }
  221. cur_addr += tc;
  222. cur_len -= tc;
  223. }
  224. sg = sg_next(sg);
  225. i--;
  226. }
  227. if (count)
  228. return 1;
  229. use_pio_instead:
  230. ide_destroy_dmatable(drive);
  231. return 0; /* revert to PIO for this request */
  232. }
  233. static int auide_dma_end(ide_drive_t *drive)
  234. {
  235. ide_hwif_t *hwif = HWIF(drive);
  236. if (hwif->sg_nents) {
  237. ide_destroy_dmatable(drive);
  238. hwif->sg_nents = 0;
  239. }
  240. return 0;
  241. }
  242. static void auide_dma_start(ide_drive_t *drive )
  243. {
  244. }
  245. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  246. {
  247. /* issue cmd to drive */
  248. ide_execute_command(drive, command, &ide_dma_intr,
  249. (2*WAIT_CMD), NULL);
  250. }
  251. static int auide_dma_setup(ide_drive_t *drive)
  252. {
  253. struct request *rq = HWGROUP(drive)->rq;
  254. if (!auide_build_dmatable(drive)) {
  255. ide_map_sg(drive, rq);
  256. return 1;
  257. }
  258. drive->waiting_for_dma = 1;
  259. return 0;
  260. }
  261. static u8 auide_mdma_filter(ide_drive_t *drive)
  262. {
  263. /*
  264. * FIXME: ->white_list and ->black_list are based on completely bogus
  265. * ->ide_dma_check implementation which didn't set neither the host
  266. * controller timings nor the device for the desired transfer mode.
  267. *
  268. * They should be either removed or 0x00 MWDMA mask should be
  269. * returned for devices on the ->black_list.
  270. */
  271. if (dbdma_init_done == 0) {
  272. auide_hwif.white_list = ide_in_drive_list(drive->id,
  273. dma_white_list);
  274. auide_hwif.black_list = ide_in_drive_list(drive->id,
  275. dma_black_list);
  276. auide_hwif.drive = drive;
  277. auide_ddma_init(&auide_hwif);
  278. dbdma_init_done = 1;
  279. }
  280. /* Is the drive in our DMA black list? */
  281. if (auide_hwif.black_list)
  282. printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
  283. drive->name, drive->id->model);
  284. return drive->hwif->mwdma_mask;
  285. }
  286. static int auide_dma_test_irq(ide_drive_t *drive)
  287. {
  288. if (drive->waiting_for_dma == 0)
  289. printk(KERN_WARNING "%s: ide_dma_test_irq \
  290. called while not waiting\n", drive->name);
  291. /* If dbdma didn't execute the STOP command yet, the
  292. * active bit is still set
  293. */
  294. drive->waiting_for_dma++;
  295. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  296. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  297. complete\n", drive->name);
  298. return 1;
  299. }
  300. udelay(10);
  301. return 0;
  302. }
  303. static void auide_dma_host_set(ide_drive_t *drive, int on)
  304. {
  305. }
  306. static void auide_dma_lost_irq(ide_drive_t *drive)
  307. {
  308. printk(KERN_ERR "%s: IRQ lost\n", drive->name);
  309. }
  310. static void auide_ddma_tx_callback(int irq, void *param)
  311. {
  312. _auide_hwif *ahwif = (_auide_hwif*)param;
  313. ahwif->drive->waiting_for_dma = 0;
  314. }
  315. static void auide_ddma_rx_callback(int irq, void *param)
  316. {
  317. _auide_hwif *ahwif = (_auide_hwif*)param;
  318. ahwif->drive->waiting_for_dma = 0;
  319. }
  320. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  321. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  322. {
  323. dev->dev_id = dev_id;
  324. dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
  325. dev->dev_intlevel = 0;
  326. dev->dev_intpolarity = 0;
  327. dev->dev_tsize = tsize;
  328. dev->dev_devwidth = devwidth;
  329. dev->dev_flags = flags;
  330. }
  331. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  332. static void auide_dma_timeout(ide_drive_t *drive)
  333. {
  334. ide_hwif_t *hwif = HWIF(drive);
  335. printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
  336. if (hwif->ide_dma_test_irq(drive))
  337. return;
  338. hwif->ide_dma_end(drive);
  339. }
  340. static int auide_ddma_init(_auide_hwif *auide) {
  341. dbdev_tab_t source_dev_tab, target_dev_tab;
  342. u32 dev_id, tsize, devwidth, flags;
  343. ide_hwif_t *hwif = auide->hwif;
  344. dev_id = AU1XXX_ATA_DDMA_REQ;
  345. if (auide->white_list || auide->black_list) {
  346. tsize = 8;
  347. devwidth = 32;
  348. }
  349. else {
  350. tsize = 1;
  351. devwidth = 16;
  352. printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
  353. printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
  354. }
  355. #ifdef IDE_AU1XXX_BURSTMODE
  356. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  357. #else
  358. flags = DEV_FLAGS_SYNC;
  359. #endif
  360. /* setup dev_tab for tx channel */
  361. auide_init_dbdma_dev( &source_dev_tab,
  362. dev_id,
  363. tsize, devwidth, DEV_FLAGS_OUT | flags);
  364. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  365. auide_init_dbdma_dev( &source_dev_tab,
  366. dev_id,
  367. tsize, devwidth, DEV_FLAGS_IN | flags);
  368. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  369. /* We also need to add a target device for the DMA */
  370. auide_init_dbdma_dev( &target_dev_tab,
  371. (u32)DSCR_CMD0_ALWAYS,
  372. tsize, devwidth, DEV_FLAGS_ANYUSE);
  373. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  374. /* Get a channel for TX */
  375. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  376. auide->tx_dev_id,
  377. auide_ddma_tx_callback,
  378. (void*)auide);
  379. /* Get a channel for RX */
  380. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  381. auide->target_dev_id,
  382. auide_ddma_rx_callback,
  383. (void*)auide);
  384. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  385. NUM_DESCRIPTORS);
  386. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  387. NUM_DESCRIPTORS);
  388. hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
  389. PRD_ENTRIES * PRD_BYTES, /* 1 Page */
  390. &hwif->dmatable_dma, GFP_KERNEL);
  391. au1xxx_dbdma_start( auide->tx_chan );
  392. au1xxx_dbdma_start( auide->rx_chan );
  393. return 0;
  394. }
  395. #else
  396. static int auide_ddma_init( _auide_hwif *auide )
  397. {
  398. dbdev_tab_t source_dev_tab;
  399. int flags;
  400. #ifdef IDE_AU1XXX_BURSTMODE
  401. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  402. #else
  403. flags = DEV_FLAGS_SYNC;
  404. #endif
  405. /* setup dev_tab for tx channel */
  406. auide_init_dbdma_dev( &source_dev_tab,
  407. (u32)DSCR_CMD0_ALWAYS,
  408. 8, 32, DEV_FLAGS_OUT | flags);
  409. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  410. auide_init_dbdma_dev( &source_dev_tab,
  411. (u32)DSCR_CMD0_ALWAYS,
  412. 8, 32, DEV_FLAGS_IN | flags);
  413. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  414. /* Get a channel for TX */
  415. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  416. auide->tx_dev_id,
  417. NULL,
  418. (void*)auide);
  419. /* Get a channel for RX */
  420. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  421. DSCR_CMD0_ALWAYS,
  422. NULL,
  423. (void*)auide);
  424. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  425. NUM_DESCRIPTORS);
  426. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  427. NUM_DESCRIPTORS);
  428. au1xxx_dbdma_start( auide->tx_chan );
  429. au1xxx_dbdma_start( auide->rx_chan );
  430. return 0;
  431. }
  432. #endif
  433. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  434. {
  435. int i;
  436. unsigned long *ata_regs = hw->io_ports;
  437. /* FIXME? */
  438. for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
  439. *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
  440. }
  441. /* set the Alternative Status register */
  442. *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
  443. }
  444. static const struct ide_port_info au1xxx_port_info = {
  445. .host_flags = IDE_HFLAG_POST_SET_MODE |
  446. IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
  447. IDE_HFLAG_NO_IO_32BIT |
  448. IDE_HFLAG_UNMASK_IRQS,
  449. .pio_mask = ATA_PIO4,
  450. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  451. .mwdma_mask = ATA_MWDMA2,
  452. #endif
  453. };
  454. static int au_ide_probe(struct device *dev)
  455. {
  456. struct platform_device *pdev = to_platform_device(dev);
  457. _auide_hwif *ahwif = &auide_hwif;
  458. ide_hwif_t *hwif;
  459. struct resource *res;
  460. int ret = 0;
  461. u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  462. hw_regs_t hw;
  463. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  464. char *mode = "MWDMA2";
  465. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  466. char *mode = "PIO+DDMA(offload)";
  467. #endif
  468. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  469. ahwif->irq = platform_get_irq(pdev, 0);
  470. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  471. if (res == NULL) {
  472. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  473. ret = -ENODEV;
  474. goto out;
  475. }
  476. if (ahwif->irq < 0) {
  477. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  478. ret = -ENODEV;
  479. goto out;
  480. }
  481. if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
  482. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  483. ret = -EBUSY;
  484. goto out;
  485. }
  486. ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
  487. if (ahwif->regbase == 0) {
  488. ret = -ENOMEM;
  489. goto out;
  490. }
  491. /* FIXME: This might possibly break PCMCIA IDE devices */
  492. hwif = &ide_hwifs[pdev->id];
  493. memset(&hw, 0, sizeof(hw));
  494. auide_setup_ports(&hw, ahwif);
  495. hw.irq = ahwif->irq;
  496. hw.dev = dev;
  497. hw.chipset = ide_au1xxx;
  498. ide_init_port_hw(hwif, &hw);
  499. hwif->dev = dev;
  500. /* hold should be on in all cases */
  501. hwif->hold = 1;
  502. hwif->mmio = 1;
  503. /* If the user has selected DDMA assisted copies,
  504. then set up a few local I/O function entry points
  505. */
  506. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  507. hwif->INSW = auide_insw;
  508. hwif->OUTSW = auide_outsw;
  509. #endif
  510. hwif->set_pio_mode = &au1xxx_set_pio_mode;
  511. hwif->set_dma_mode = &auide_set_dma_mode;
  512. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  513. hwif->dma_timeout = &auide_dma_timeout;
  514. hwif->mdma_filter = &auide_mdma_filter;
  515. hwif->dma_host_set = &auide_dma_host_set;
  516. hwif->dma_exec_cmd = &auide_dma_exec_cmd;
  517. hwif->dma_start = &auide_dma_start;
  518. hwif->ide_dma_end = &auide_dma_end;
  519. hwif->dma_setup = &auide_dma_setup;
  520. hwif->ide_dma_test_irq = &auide_dma_test_irq;
  521. hwif->dma_lost_irq = &auide_dma_lost_irq;
  522. #endif
  523. hwif->select_data = 0; /* no chipset-specific code */
  524. hwif->config_data = 0; /* no chipset-specific code */
  525. auide_hwif.hwif = hwif;
  526. hwif->hwif_data = &auide_hwif;
  527. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  528. auide_ddma_init(&auide_hwif);
  529. dbdma_init_done = 1;
  530. #endif
  531. idx[0] = hwif->index;
  532. ide_device_add(idx, &au1xxx_port_info);
  533. dev_set_drvdata(dev, hwif);
  534. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  535. out:
  536. return ret;
  537. }
  538. static int au_ide_remove(struct device *dev)
  539. {
  540. struct platform_device *pdev = to_platform_device(dev);
  541. struct resource *res;
  542. ide_hwif_t *hwif = dev_get_drvdata(dev);
  543. _auide_hwif *ahwif = &auide_hwif;
  544. ide_unregister(hwif->index, 0, 0);
  545. iounmap((void *)ahwif->regbase);
  546. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  547. release_mem_region(res->start, res->end - res->start);
  548. return 0;
  549. }
  550. static struct device_driver au1200_ide_driver = {
  551. .name = "au1200-ide",
  552. .bus = &platform_bus_type,
  553. .probe = au_ide_probe,
  554. .remove = au_ide_remove,
  555. };
  556. static int __init au_ide_init(void)
  557. {
  558. return driver_register(&au1200_ide_driver);
  559. }
  560. static void __exit au_ide_exit(void)
  561. {
  562. driver_unregister(&au1200_ide_driver);
  563. }
  564. MODULE_LICENSE("GPL");
  565. MODULE_DESCRIPTION("AU1200 IDE driver");
  566. module_init(au_ide_init);
  567. module_exit(au_ide_exit);