au1xxx-ide.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
  3. *
  4. * BRIEF MODULE DESCRIPTION
  5. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  6. *
  7. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  8. *
  9. * This program is free software; you can redistribute it and/or modify it under
  10. * the terms of the GNU General Public License as published by the Free Software
  11. * Foundation; either version 2 of the License, or (at your option) any later
  12. * version.
  13. *
  14. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  15. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  16. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  17. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  18. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  19. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  20. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  21. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  22. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  23. * POSSIBILITY OF SUCH DAMAGE.
  24. *
  25. * You should have received a copy of the GNU General Public License along with
  26. * this program; if not, write to the Free Software Foundation, Inc.,
  27. * 675 Mass Ave, Cambridge, MA 02139, USA.
  28. *
  29. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  30. * Interface and Linux Device Driver" Application Note.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/module.h>
  34. #include <linux/kernel.h>
  35. #include <linux/delay.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/init.h>
  38. #include <linux/ide.h>
  39. #include <linux/sysdev.h>
  40. #include <linux/dma-mapping.h>
  41. #include "ide-timing.h"
  42. #include <asm/io.h>
  43. #include <asm/mach-au1x00/au1xxx.h>
  44. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  45. #include <asm/mach-au1x00/au1xxx_ide.h>
  46. #define DRV_NAME "au1200-ide"
  47. #define DRV_VERSION "1.0"
  48. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  49. /* enable the burstmode in the dbdma */
  50. #define IDE_AU1XXX_BURSTMODE 1
  51. static _auide_hwif auide_hwif;
  52. static int dbdma_init_done;
  53. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  54. void auide_insw(unsigned long port, void *addr, u32 count)
  55. {
  56. _auide_hwif *ahwif = &auide_hwif;
  57. chan_tab_t *ctp;
  58. au1x_ddma_desc_t *dp;
  59. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  60. DDMA_FLAGS_NOIE)) {
  61. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  62. return;
  63. }
  64. ctp = *((chan_tab_t **)ahwif->rx_chan);
  65. dp = ctp->cur_ptr;
  66. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  67. ;
  68. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  69. }
  70. void auide_outsw(unsigned long port, void *addr, u32 count)
  71. {
  72. _auide_hwif *ahwif = &auide_hwif;
  73. chan_tab_t *ctp;
  74. au1x_ddma_desc_t *dp;
  75. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  76. count << 1, DDMA_FLAGS_NOIE)) {
  77. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  78. return;
  79. }
  80. ctp = *((chan_tab_t **)ahwif->tx_chan);
  81. dp = ctp->cur_ptr;
  82. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  83. ;
  84. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  85. }
  86. #endif
  87. static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  88. {
  89. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  90. /* set pio mode! */
  91. switch(pio) {
  92. case 0:
  93. mem_sttime = SBC_IDE_TIMING(PIO0);
  94. /* set configuration for RCS2# */
  95. mem_stcfg |= TS_MASK;
  96. mem_stcfg &= ~TCSOE_MASK;
  97. mem_stcfg &= ~TOECS_MASK;
  98. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  99. break;
  100. case 1:
  101. mem_sttime = SBC_IDE_TIMING(PIO1);
  102. /* set configuration for RCS2# */
  103. mem_stcfg |= TS_MASK;
  104. mem_stcfg &= ~TCSOE_MASK;
  105. mem_stcfg &= ~TOECS_MASK;
  106. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  107. break;
  108. case 2:
  109. mem_sttime = SBC_IDE_TIMING(PIO2);
  110. /* set configuration for RCS2# */
  111. mem_stcfg &= ~TS_MASK;
  112. mem_stcfg &= ~TCSOE_MASK;
  113. mem_stcfg &= ~TOECS_MASK;
  114. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  115. break;
  116. case 3:
  117. mem_sttime = SBC_IDE_TIMING(PIO3);
  118. /* set configuration for RCS2# */
  119. mem_stcfg &= ~TS_MASK;
  120. mem_stcfg &= ~TCSOE_MASK;
  121. mem_stcfg &= ~TOECS_MASK;
  122. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  123. break;
  124. case 4:
  125. mem_sttime = SBC_IDE_TIMING(PIO4);
  126. /* set configuration for RCS2# */
  127. mem_stcfg &= ~TS_MASK;
  128. mem_stcfg &= ~TCSOE_MASK;
  129. mem_stcfg &= ~TOECS_MASK;
  130. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  131. break;
  132. }
  133. au_writel(mem_sttime,MEM_STTIME2);
  134. au_writel(mem_stcfg,MEM_STCFG2);
  135. }
  136. static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
  137. {
  138. int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
  139. switch(speed) {
  140. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  141. case XFER_MW_DMA_2:
  142. mem_sttime = SBC_IDE_TIMING(MDMA2);
  143. /* set configuration for RCS2# */
  144. mem_stcfg &= ~TS_MASK;
  145. mem_stcfg &= ~TCSOE_MASK;
  146. mem_stcfg &= ~TOECS_MASK;
  147. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  148. break;
  149. case XFER_MW_DMA_1:
  150. mem_sttime = SBC_IDE_TIMING(MDMA1);
  151. /* set configuration for RCS2# */
  152. mem_stcfg &= ~TS_MASK;
  153. mem_stcfg &= ~TCSOE_MASK;
  154. mem_stcfg &= ~TOECS_MASK;
  155. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  156. break;
  157. case XFER_MW_DMA_0:
  158. mem_sttime = SBC_IDE_TIMING(MDMA0);
  159. /* set configuration for RCS2# */
  160. mem_stcfg |= TS_MASK;
  161. mem_stcfg &= ~TCSOE_MASK;
  162. mem_stcfg &= ~TOECS_MASK;
  163. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  164. break;
  165. #endif
  166. default:
  167. return;
  168. }
  169. au_writel(mem_sttime,MEM_STTIME2);
  170. au_writel(mem_stcfg,MEM_STCFG2);
  171. }
  172. /*
  173. * Multi-Word DMA + DbDMA functions
  174. */
  175. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  176. static int auide_build_sglist(ide_drive_t *drive, struct request *rq)
  177. {
  178. ide_hwif_t *hwif = drive->hwif;
  179. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  180. struct scatterlist *sg = hwif->sg_table;
  181. ide_map_sg(drive, rq);
  182. if (rq_data_dir(rq) == READ)
  183. hwif->sg_dma_direction = DMA_FROM_DEVICE;
  184. else
  185. hwif->sg_dma_direction = DMA_TO_DEVICE;
  186. return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
  187. hwif->sg_dma_direction);
  188. }
  189. static int auide_build_dmatable(ide_drive_t *drive)
  190. {
  191. int i, iswrite, count = 0;
  192. ide_hwif_t *hwif = HWIF(drive);
  193. struct request *rq = HWGROUP(drive)->rq;
  194. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  195. struct scatterlist *sg;
  196. iswrite = (rq_data_dir(rq) == WRITE);
  197. /* Save for interrupt context */
  198. ahwif->drive = drive;
  199. /* Build sglist */
  200. hwif->sg_nents = i = auide_build_sglist(drive, rq);
  201. if (!i)
  202. return 0;
  203. /* fill the descriptors */
  204. sg = hwif->sg_table;
  205. while (i && sg_dma_len(sg)) {
  206. u32 cur_addr;
  207. u32 cur_len;
  208. cur_addr = sg_dma_address(sg);
  209. cur_len = sg_dma_len(sg);
  210. while (cur_len) {
  211. u32 flags = DDMA_FLAGS_NOIE;
  212. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  213. if (++count >= PRD_ENTRIES) {
  214. printk(KERN_WARNING "%s: DMA table too small\n",
  215. drive->name);
  216. goto use_pio_instead;
  217. }
  218. /* Lets enable intr for the last descriptor only */
  219. if (1==i)
  220. flags = DDMA_FLAGS_IE;
  221. else
  222. flags = DDMA_FLAGS_NOIE;
  223. if (iswrite) {
  224. if(!put_source_flags(ahwif->tx_chan,
  225. (void*)(page_address(sg->page)
  226. + sg->offset),
  227. tc, flags)) {
  228. printk(KERN_ERR "%s failed %d\n",
  229. __FUNCTION__, __LINE__);
  230. }
  231. } else
  232. {
  233. if(!put_dest_flags(ahwif->rx_chan,
  234. (void*)(page_address(sg->page)
  235. + sg->offset),
  236. tc, flags)) {
  237. printk(KERN_ERR "%s failed %d\n",
  238. __FUNCTION__, __LINE__);
  239. }
  240. }
  241. cur_addr += tc;
  242. cur_len -= tc;
  243. }
  244. sg = sg_next(sg);
  245. i--;
  246. }
  247. if (count)
  248. return 1;
  249. use_pio_instead:
  250. dma_unmap_sg(ahwif->dev,
  251. hwif->sg_table,
  252. hwif->sg_nents,
  253. hwif->sg_dma_direction);
  254. return 0; /* revert to PIO for this request */
  255. }
  256. static int auide_dma_end(ide_drive_t *drive)
  257. {
  258. ide_hwif_t *hwif = HWIF(drive);
  259. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  260. if (hwif->sg_nents) {
  261. dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,
  262. hwif->sg_dma_direction);
  263. hwif->sg_nents = 0;
  264. }
  265. return 0;
  266. }
  267. static void auide_dma_start(ide_drive_t *drive )
  268. {
  269. }
  270. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  271. {
  272. /* issue cmd to drive */
  273. ide_execute_command(drive, command, &ide_dma_intr,
  274. (2*WAIT_CMD), NULL);
  275. }
  276. static int auide_dma_setup(ide_drive_t *drive)
  277. {
  278. struct request *rq = HWGROUP(drive)->rq;
  279. if (!auide_build_dmatable(drive)) {
  280. ide_map_sg(drive, rq);
  281. return 1;
  282. }
  283. drive->waiting_for_dma = 1;
  284. return 0;
  285. }
  286. static u8 auide_mdma_filter(ide_drive_t *drive)
  287. {
  288. /*
  289. * FIXME: ->white_list and ->black_list are based on completely bogus
  290. * ->ide_dma_check implementation which didn't set neither the host
  291. * controller timings nor the device for the desired transfer mode.
  292. *
  293. * They should be either removed or 0x00 MWDMA mask should be
  294. * returned for devices on the ->black_list.
  295. */
  296. if (dbdma_init_done == 0) {
  297. auide_hwif.white_list = ide_in_drive_list(drive->id,
  298. dma_white_list);
  299. auide_hwif.black_list = ide_in_drive_list(drive->id,
  300. dma_black_list);
  301. auide_hwif.drive = drive;
  302. auide_ddma_init(&auide_hwif);
  303. dbdma_init_done = 1;
  304. }
  305. /* Is the drive in our DMA black list? */
  306. if (auide_hwif.black_list)
  307. printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
  308. drive->name, drive->id->model);
  309. return drive->hwif->mwdma_mask;
  310. }
  311. static int auide_dma_test_irq(ide_drive_t *drive)
  312. {
  313. if (drive->waiting_for_dma == 0)
  314. printk(KERN_WARNING "%s: ide_dma_test_irq \
  315. called while not waiting\n", drive->name);
  316. /* If dbdma didn't execute the STOP command yet, the
  317. * active bit is still set
  318. */
  319. drive->waiting_for_dma++;
  320. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  321. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  322. complete\n", drive->name);
  323. return 1;
  324. }
  325. udelay(10);
  326. return 0;
  327. }
  328. static void auide_dma_host_on(ide_drive_t *drive)
  329. {
  330. }
  331. static int auide_dma_on(ide_drive_t *drive)
  332. {
  333. drive->using_dma = 1;
  334. return 0;
  335. }
  336. static void auide_dma_host_off(ide_drive_t *drive)
  337. {
  338. }
  339. static void auide_dma_off_quietly(ide_drive_t *drive)
  340. {
  341. drive->using_dma = 0;
  342. }
  343. static void auide_dma_lost_irq(ide_drive_t *drive)
  344. {
  345. printk(KERN_ERR "%s: IRQ lost\n", drive->name);
  346. }
  347. static void auide_ddma_tx_callback(int irq, void *param)
  348. {
  349. _auide_hwif *ahwif = (_auide_hwif*)param;
  350. ahwif->drive->waiting_for_dma = 0;
  351. }
  352. static void auide_ddma_rx_callback(int irq, void *param)
  353. {
  354. _auide_hwif *ahwif = (_auide_hwif*)param;
  355. ahwif->drive->waiting_for_dma = 0;
  356. }
  357. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  358. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  359. {
  360. dev->dev_id = dev_id;
  361. dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
  362. dev->dev_intlevel = 0;
  363. dev->dev_intpolarity = 0;
  364. dev->dev_tsize = tsize;
  365. dev->dev_devwidth = devwidth;
  366. dev->dev_flags = flags;
  367. }
  368. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  369. static void auide_dma_timeout(ide_drive_t *drive)
  370. {
  371. ide_hwif_t *hwif = HWIF(drive);
  372. printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
  373. if (hwif->ide_dma_test_irq(drive))
  374. return;
  375. hwif->ide_dma_end(drive);
  376. }
  377. static int auide_ddma_init(_auide_hwif *auide) {
  378. dbdev_tab_t source_dev_tab, target_dev_tab;
  379. u32 dev_id, tsize, devwidth, flags;
  380. ide_hwif_t *hwif = auide->hwif;
  381. dev_id = AU1XXX_ATA_DDMA_REQ;
  382. if (auide->white_list || auide->black_list) {
  383. tsize = 8;
  384. devwidth = 32;
  385. }
  386. else {
  387. tsize = 1;
  388. devwidth = 16;
  389. printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
  390. printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
  391. }
  392. #ifdef IDE_AU1XXX_BURSTMODE
  393. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  394. #else
  395. flags = DEV_FLAGS_SYNC;
  396. #endif
  397. /* setup dev_tab for tx channel */
  398. auide_init_dbdma_dev( &source_dev_tab,
  399. dev_id,
  400. tsize, devwidth, DEV_FLAGS_OUT | flags);
  401. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  402. auide_init_dbdma_dev( &source_dev_tab,
  403. dev_id,
  404. tsize, devwidth, DEV_FLAGS_IN | flags);
  405. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  406. /* We also need to add a target device for the DMA */
  407. auide_init_dbdma_dev( &target_dev_tab,
  408. (u32)DSCR_CMD0_ALWAYS,
  409. tsize, devwidth, DEV_FLAGS_ANYUSE);
  410. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  411. /* Get a channel for TX */
  412. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  413. auide->tx_dev_id,
  414. auide_ddma_tx_callback,
  415. (void*)auide);
  416. /* Get a channel for RX */
  417. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  418. auide->target_dev_id,
  419. auide_ddma_rx_callback,
  420. (void*)auide);
  421. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  422. NUM_DESCRIPTORS);
  423. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  424. NUM_DESCRIPTORS);
  425. hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,
  426. PRD_ENTRIES * PRD_BYTES, /* 1 Page */
  427. &hwif->dmatable_dma, GFP_KERNEL);
  428. au1xxx_dbdma_start( auide->tx_chan );
  429. au1xxx_dbdma_start( auide->rx_chan );
  430. return 0;
  431. }
  432. #else
  433. static int auide_ddma_init( _auide_hwif *auide )
  434. {
  435. dbdev_tab_t source_dev_tab;
  436. int flags;
  437. #ifdef IDE_AU1XXX_BURSTMODE
  438. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  439. #else
  440. flags = DEV_FLAGS_SYNC;
  441. #endif
  442. /* setup dev_tab for tx channel */
  443. auide_init_dbdma_dev( &source_dev_tab,
  444. (u32)DSCR_CMD0_ALWAYS,
  445. 8, 32, DEV_FLAGS_OUT | flags);
  446. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  447. auide_init_dbdma_dev( &source_dev_tab,
  448. (u32)DSCR_CMD0_ALWAYS,
  449. 8, 32, DEV_FLAGS_IN | flags);
  450. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  451. /* Get a channel for TX */
  452. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  453. auide->tx_dev_id,
  454. NULL,
  455. (void*)auide);
  456. /* Get a channel for RX */
  457. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  458. DSCR_CMD0_ALWAYS,
  459. NULL,
  460. (void*)auide);
  461. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  462. NUM_DESCRIPTORS);
  463. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  464. NUM_DESCRIPTORS);
  465. au1xxx_dbdma_start( auide->tx_chan );
  466. au1xxx_dbdma_start( auide->rx_chan );
  467. return 0;
  468. }
  469. #endif
  470. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  471. {
  472. int i;
  473. unsigned long *ata_regs = hw->io_ports;
  474. /* FIXME? */
  475. for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
  476. *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
  477. }
  478. /* set the Alternative Status register */
  479. *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
  480. }
  481. static int au_ide_probe(struct device *dev)
  482. {
  483. struct platform_device *pdev = to_platform_device(dev);
  484. _auide_hwif *ahwif = &auide_hwif;
  485. ide_hwif_t *hwif;
  486. struct resource *res;
  487. int ret = 0;
  488. u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  489. hw_regs_t hw;
  490. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  491. char *mode = "MWDMA2";
  492. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  493. char *mode = "PIO+DDMA(offload)";
  494. #endif
  495. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  496. auide_hwif.dev = 0;
  497. ahwif->dev = dev;
  498. ahwif->irq = platform_get_irq(pdev, 0);
  499. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  500. if (res == NULL) {
  501. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  502. ret = -ENODEV;
  503. goto out;
  504. }
  505. if (ahwif->irq < 0) {
  506. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  507. ret = -ENODEV;
  508. goto out;
  509. }
  510. if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
  511. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  512. ret = -EBUSY;
  513. goto out;
  514. }
  515. ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
  516. if (ahwif->regbase == 0) {
  517. ret = -ENOMEM;
  518. goto out;
  519. }
  520. /* FIXME: This might possibly break PCMCIA IDE devices */
  521. hwif = &ide_hwifs[pdev->id];
  522. hwif->irq = ahwif->irq;
  523. hwif->chipset = ide_au1xxx;
  524. memset(&hw, 0, sizeof(hw));
  525. auide_setup_ports(&hw, ahwif);
  526. memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
  527. hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
  528. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  529. hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
  530. hwif->swdma_mask = 0x00;
  531. #else
  532. hwif->mwdma_mask = 0x0;
  533. hwif->swdma_mask = 0x0;
  534. #endif
  535. hwif->pio_mask = ATA_PIO4;
  536. hwif->host_flags = IDE_HFLAG_POST_SET_MODE;
  537. hwif->noprobe = 0;
  538. hwif->drives[0].unmask = 1;
  539. hwif->drives[1].unmask = 1;
  540. /* hold should be on in all cases */
  541. hwif->hold = 1;
  542. hwif->mmio = 1;
  543. /* If the user has selected DDMA assisted copies,
  544. then set up a few local I/O function entry points
  545. */
  546. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  547. hwif->INSW = auide_insw;
  548. hwif->OUTSW = auide_outsw;
  549. #endif
  550. hwif->set_pio_mode = &au1xxx_set_pio_mode;
  551. hwif->set_dma_mode = &auide_set_dma_mode;
  552. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  553. hwif->dma_off_quietly = &auide_dma_off_quietly;
  554. hwif->dma_timeout = &auide_dma_timeout;
  555. hwif->mdma_filter = &auide_mdma_filter;
  556. hwif->dma_exec_cmd = &auide_dma_exec_cmd;
  557. hwif->dma_start = &auide_dma_start;
  558. hwif->ide_dma_end = &auide_dma_end;
  559. hwif->dma_setup = &auide_dma_setup;
  560. hwif->ide_dma_test_irq = &auide_dma_test_irq;
  561. hwif->dma_host_off = &auide_dma_host_off;
  562. hwif->dma_host_on = &auide_dma_host_on;
  563. hwif->dma_lost_irq = &auide_dma_lost_irq;
  564. hwif->ide_dma_on = &auide_dma_on;
  565. #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  566. hwif->channel = 0;
  567. hwif->hold = 1;
  568. hwif->select_data = 0; /* no chipset-specific code */
  569. hwif->config_data = 0; /* no chipset-specific code */
  570. hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
  571. hwif->drives[1].autotune = 1;
  572. #endif
  573. hwif->drives[0].no_io_32bit = 1;
  574. hwif->drives[1].no_io_32bit = 1;
  575. auide_hwif.hwif = hwif;
  576. hwif->hwif_data = &auide_hwif;
  577. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  578. auide_ddma_init(&auide_hwif);
  579. dbdma_init_done = 1;
  580. #endif
  581. idx[0] = hwif->index;
  582. ide_device_add(idx);
  583. dev_set_drvdata(dev, hwif);
  584. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  585. out:
  586. return ret;
  587. }
  588. static int au_ide_remove(struct device *dev)
  589. {
  590. struct platform_device *pdev = to_platform_device(dev);
  591. struct resource *res;
  592. ide_hwif_t *hwif = dev_get_drvdata(dev);
  593. _auide_hwif *ahwif = &auide_hwif;
  594. ide_unregister(hwif - ide_hwifs);
  595. iounmap((void *)ahwif->regbase);
  596. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  597. release_mem_region(res->start, res->end - res->start);
  598. return 0;
  599. }
  600. static struct device_driver au1200_ide_driver = {
  601. .name = "au1200-ide",
  602. .bus = &platform_bus_type,
  603. .probe = au_ide_probe,
  604. .remove = au_ide_remove,
  605. };
  606. static int __init au_ide_init(void)
  607. {
  608. return driver_register(&au1200_ide_driver);
  609. }
  610. static void __exit au_ide_exit(void)
  611. {
  612. driver_unregister(&au1200_ide_driver);
  613. }
  614. MODULE_LICENSE("GPL");
  615. MODULE_DESCRIPTION("AU1200 IDE driver");
  616. module_init(au_ide_init);
  617. module_exit(au_ide_exit);