au1xxx-ide.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
  3. *
  4. * BRIEF MODULE DESCRIPTION
  5. * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  6. *
  7. * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
  8. *
  9. * This program is free software; you can redistribute it and/or modify it under
  10. * the terms of the GNU General Public License as published by the Free Software
  11. * Foundation; either version 2 of the License, or (at your option) any later
  12. * version.
  13. *
  14. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  15. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  16. * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
  17. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  18. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  19. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  20. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  21. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  22. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  23. * POSSIBILITY OF SUCH DAMAGE.
  24. *
  25. * You should have received a copy of the GNU General Public License along with
  26. * this program; if not, write to the Free Software Foundation, Inc.,
  27. * 675 Mass Ave, Cambridge, MA 02139, USA.
  28. *
  29. * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
  30. * Interface and Linux Device Driver" Application Note.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/module.h>
  34. #include <linux/kernel.h>
  35. #include <linux/delay.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/init.h>
  38. #include <linux/ide.h>
  39. #include <linux/sysdev.h>
  40. #include <linux/dma-mapping.h>
  41. #include "ide-timing.h"
  42. #include <asm/io.h>
  43. #include <asm/mach-au1x00/au1xxx.h>
  44. #include <asm/mach-au1x00/au1xxx_dbdma.h>
  45. #include <asm/mach-au1x00/au1xxx_ide.h>
  46. #define DRV_NAME "au1200-ide"
  47. #define DRV_VERSION "1.0"
  48. #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
  49. /* enable the burstmode in the dbdma */
  50. #define IDE_AU1XXX_BURSTMODE 1
  51. static _auide_hwif auide_hwif;
  52. static int dbdma_init_done;
  53. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  54. void auide_insw(unsigned long port, void *addr, u32 count)
  55. {
  56. _auide_hwif *ahwif = &auide_hwif;
  57. chan_tab_t *ctp;
  58. au1x_ddma_desc_t *dp;
  59. if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
  60. DDMA_FLAGS_NOIE)) {
  61. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  62. return;
  63. }
  64. ctp = *((chan_tab_t **)ahwif->rx_chan);
  65. dp = ctp->cur_ptr;
  66. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  67. ;
  68. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  69. }
  70. void auide_outsw(unsigned long port, void *addr, u32 count)
  71. {
  72. _auide_hwif *ahwif = &auide_hwif;
  73. chan_tab_t *ctp;
  74. au1x_ddma_desc_t *dp;
  75. if(!put_source_flags(ahwif->tx_chan, (void*)addr,
  76. count << 1, DDMA_FLAGS_NOIE)) {
  77. printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
  78. return;
  79. }
  80. ctp = *((chan_tab_t **)ahwif->tx_chan);
  81. dp = ctp->cur_ptr;
  82. while (dp->dscr_cmd0 & DSCR_CMD0_V)
  83. ;
  84. ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
  85. }
  86. #endif
  87. static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  88. {
  89. int mem_sttime;
  90. int mem_stcfg;
  91. u8 speed;
  92. mem_sttime = 0;
  93. mem_stcfg = au_readl(MEM_STCFG2);
  94. /* set pio mode! */
  95. switch(pio) {
  96. case 0:
  97. mem_sttime = SBC_IDE_TIMING(PIO0);
  98. /* set configuration for RCS2# */
  99. mem_stcfg |= TS_MASK;
  100. mem_stcfg &= ~TCSOE_MASK;
  101. mem_stcfg &= ~TOECS_MASK;
  102. mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
  103. break;
  104. case 1:
  105. mem_sttime = SBC_IDE_TIMING(PIO1);
  106. /* set configuration for RCS2# */
  107. mem_stcfg |= TS_MASK;
  108. mem_stcfg &= ~TCSOE_MASK;
  109. mem_stcfg &= ~TOECS_MASK;
  110. mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
  111. break;
  112. case 2:
  113. mem_sttime = SBC_IDE_TIMING(PIO2);
  114. /* set configuration for RCS2# */
  115. mem_stcfg &= ~TS_MASK;
  116. mem_stcfg &= ~TCSOE_MASK;
  117. mem_stcfg &= ~TOECS_MASK;
  118. mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
  119. break;
  120. case 3:
  121. mem_sttime = SBC_IDE_TIMING(PIO3);
  122. /* set configuration for RCS2# */
  123. mem_stcfg &= ~TS_MASK;
  124. mem_stcfg &= ~TCSOE_MASK;
  125. mem_stcfg &= ~TOECS_MASK;
  126. mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
  127. break;
  128. case 4:
  129. mem_sttime = SBC_IDE_TIMING(PIO4);
  130. /* set configuration for RCS2# */
  131. mem_stcfg &= ~TS_MASK;
  132. mem_stcfg &= ~TCSOE_MASK;
  133. mem_stcfg &= ~TOECS_MASK;
  134. mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
  135. break;
  136. }
  137. au_writel(mem_sttime,MEM_STTIME2);
  138. au_writel(mem_stcfg,MEM_STCFG2);
  139. speed = pio + XFER_PIO_0;
  140. ide_config_drive_speed(drive, speed);
  141. }
  142. static int auide_tune_chipset(ide_drive_t *drive, const u8 speed)
  143. {
  144. int mem_sttime;
  145. int mem_stcfg;
  146. mem_sttime = 0;
  147. mem_stcfg = au_readl(MEM_STCFG2);
  148. switch(speed) {
  149. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  150. case XFER_MW_DMA_2:
  151. mem_sttime = SBC_IDE_TIMING(MDMA2);
  152. /* set configuration for RCS2# */
  153. mem_stcfg &= ~TS_MASK;
  154. mem_stcfg &= ~TCSOE_MASK;
  155. mem_stcfg &= ~TOECS_MASK;
  156. mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
  157. break;
  158. case XFER_MW_DMA_1:
  159. mem_sttime = SBC_IDE_TIMING(MDMA1);
  160. /* set configuration for RCS2# */
  161. mem_stcfg &= ~TS_MASK;
  162. mem_stcfg &= ~TCSOE_MASK;
  163. mem_stcfg &= ~TOECS_MASK;
  164. mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
  165. break;
  166. case XFER_MW_DMA_0:
  167. mem_sttime = SBC_IDE_TIMING(MDMA0);
  168. /* set configuration for RCS2# */
  169. mem_stcfg |= TS_MASK;
  170. mem_stcfg &= ~TCSOE_MASK;
  171. mem_stcfg &= ~TOECS_MASK;
  172. mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
  173. break;
  174. #endif
  175. default:
  176. return 1;
  177. }
  178. if (ide_config_drive_speed(drive, speed))
  179. return 1;
  180. au_writel(mem_sttime,MEM_STTIME2);
  181. au_writel(mem_stcfg,MEM_STCFG2);
  182. return 0;
  183. }
  184. /*
  185. * Multi-Word DMA + DbDMA functions
  186. */
  187. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  188. static int auide_build_sglist(ide_drive_t *drive, struct request *rq)
  189. {
  190. ide_hwif_t *hwif = drive->hwif;
  191. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  192. struct scatterlist *sg = hwif->sg_table;
  193. ide_map_sg(drive, rq);
  194. if (rq_data_dir(rq) == READ)
  195. hwif->sg_dma_direction = DMA_FROM_DEVICE;
  196. else
  197. hwif->sg_dma_direction = DMA_TO_DEVICE;
  198. return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
  199. hwif->sg_dma_direction);
  200. }
  201. static int auide_build_dmatable(ide_drive_t *drive)
  202. {
  203. int i, iswrite, count = 0;
  204. ide_hwif_t *hwif = HWIF(drive);
  205. struct request *rq = HWGROUP(drive)->rq;
  206. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  207. struct scatterlist *sg;
  208. iswrite = (rq_data_dir(rq) == WRITE);
  209. /* Save for interrupt context */
  210. ahwif->drive = drive;
  211. /* Build sglist */
  212. hwif->sg_nents = i = auide_build_sglist(drive, rq);
  213. if (!i)
  214. return 0;
  215. /* fill the descriptors */
  216. sg = hwif->sg_table;
  217. while (i && sg_dma_len(sg)) {
  218. u32 cur_addr;
  219. u32 cur_len;
  220. cur_addr = sg_dma_address(sg);
  221. cur_len = sg_dma_len(sg);
  222. while (cur_len) {
  223. u32 flags = DDMA_FLAGS_NOIE;
  224. unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
  225. if (++count >= PRD_ENTRIES) {
  226. printk(KERN_WARNING "%s: DMA table too small\n",
  227. drive->name);
  228. goto use_pio_instead;
  229. }
  230. /* Lets enable intr for the last descriptor only */
  231. if (1==i)
  232. flags = DDMA_FLAGS_IE;
  233. else
  234. flags = DDMA_FLAGS_NOIE;
  235. if (iswrite) {
  236. if(!put_source_flags(ahwif->tx_chan,
  237. (void*)(page_address(sg->page)
  238. + sg->offset),
  239. tc, flags)) {
  240. printk(KERN_ERR "%s failed %d\n",
  241. __FUNCTION__, __LINE__);
  242. }
  243. } else
  244. {
  245. if(!put_dest_flags(ahwif->rx_chan,
  246. (void*)(page_address(sg->page)
  247. + sg->offset),
  248. tc, flags)) {
  249. printk(KERN_ERR "%s failed %d\n",
  250. __FUNCTION__, __LINE__);
  251. }
  252. }
  253. cur_addr += tc;
  254. cur_len -= tc;
  255. }
  256. sg++;
  257. i--;
  258. }
  259. if (count)
  260. return 1;
  261. use_pio_instead:
  262. dma_unmap_sg(ahwif->dev,
  263. hwif->sg_table,
  264. hwif->sg_nents,
  265. hwif->sg_dma_direction);
  266. return 0; /* revert to PIO for this request */
  267. }
  268. static int auide_dma_end(ide_drive_t *drive)
  269. {
  270. ide_hwif_t *hwif = HWIF(drive);
  271. _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
  272. if (hwif->sg_nents) {
  273. dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,
  274. hwif->sg_dma_direction);
  275. hwif->sg_nents = 0;
  276. }
  277. return 0;
  278. }
  279. static void auide_dma_start(ide_drive_t *drive )
  280. {
  281. }
  282. static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  283. {
  284. /* issue cmd to drive */
  285. ide_execute_command(drive, command, &ide_dma_intr,
  286. (2*WAIT_CMD), NULL);
  287. }
  288. static int auide_dma_setup(ide_drive_t *drive)
  289. {
  290. struct request *rq = HWGROUP(drive)->rq;
  291. if (!auide_build_dmatable(drive)) {
  292. ide_map_sg(drive, rq);
  293. return 1;
  294. }
  295. drive->waiting_for_dma = 1;
  296. return 0;
  297. }
  298. static int auide_dma_check(ide_drive_t *drive)
  299. {
  300. u8 speed = ide_max_dma_mode(drive);
  301. if( dbdma_init_done == 0 ){
  302. auide_hwif.white_list = ide_in_drive_list(drive->id,
  303. dma_white_list);
  304. auide_hwif.black_list = ide_in_drive_list(drive->id,
  305. dma_black_list);
  306. auide_hwif.drive = drive;
  307. auide_ddma_init(&auide_hwif);
  308. dbdma_init_done = 1;
  309. }
  310. /* Is the drive in our DMA black list? */
  311. if ( auide_hwif.black_list ) {
  312. drive->using_dma = 0;
  313. /* Borrowed the warning message from ide-dma.c */
  314. printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
  315. drive->name, drive->id->model);
  316. }
  317. else
  318. drive->using_dma = 1;
  319. if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
  320. return 0;
  321. return -1;
  322. }
  323. static int auide_dma_test_irq(ide_drive_t *drive)
  324. {
  325. if (drive->waiting_for_dma == 0)
  326. printk(KERN_WARNING "%s: ide_dma_test_irq \
  327. called while not waiting\n", drive->name);
  328. /* If dbdma didn't execute the STOP command yet, the
  329. * active bit is still set
  330. */
  331. drive->waiting_for_dma++;
  332. if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
  333. printk(KERN_WARNING "%s: timeout waiting for ddma to \
  334. complete\n", drive->name);
  335. return 1;
  336. }
  337. udelay(10);
  338. return 0;
  339. }
  340. static void auide_dma_host_on(ide_drive_t *drive)
  341. {
  342. }
  343. static int auide_dma_on(ide_drive_t *drive)
  344. {
  345. drive->using_dma = 1;
  346. return 0;
  347. }
  348. static void auide_dma_host_off(ide_drive_t *drive)
  349. {
  350. }
  351. static void auide_dma_off_quietly(ide_drive_t *drive)
  352. {
  353. drive->using_dma = 0;
  354. }
  355. static void auide_dma_lost_irq(ide_drive_t *drive)
  356. {
  357. printk(KERN_ERR "%s: IRQ lost\n", drive->name);
  358. }
  359. static void auide_ddma_tx_callback(int irq, void *param)
  360. {
  361. _auide_hwif *ahwif = (_auide_hwif*)param;
  362. ahwif->drive->waiting_for_dma = 0;
  363. }
  364. static void auide_ddma_rx_callback(int irq, void *param)
  365. {
  366. _auide_hwif *ahwif = (_auide_hwif*)param;
  367. ahwif->drive->waiting_for_dma = 0;
  368. }
  369. #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  370. static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
  371. {
  372. dev->dev_id = dev_id;
  373. dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
  374. dev->dev_intlevel = 0;
  375. dev->dev_intpolarity = 0;
  376. dev->dev_tsize = tsize;
  377. dev->dev_devwidth = devwidth;
  378. dev->dev_flags = flags;
  379. }
  380. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  381. static void auide_dma_timeout(ide_drive_t *drive)
  382. {
  383. ide_hwif_t *hwif = HWIF(drive);
  384. printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
  385. if (hwif->ide_dma_test_irq(drive))
  386. return;
  387. hwif->ide_dma_end(drive);
  388. }
  389. static int auide_ddma_init(_auide_hwif *auide) {
  390. dbdev_tab_t source_dev_tab, target_dev_tab;
  391. u32 dev_id, tsize, devwidth, flags;
  392. ide_hwif_t *hwif = auide->hwif;
  393. dev_id = AU1XXX_ATA_DDMA_REQ;
  394. if (auide->white_list || auide->black_list) {
  395. tsize = 8;
  396. devwidth = 32;
  397. }
  398. else {
  399. tsize = 1;
  400. devwidth = 16;
  401. printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
  402. printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
  403. }
  404. #ifdef IDE_AU1XXX_BURSTMODE
  405. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  406. #else
  407. flags = DEV_FLAGS_SYNC;
  408. #endif
  409. /* setup dev_tab for tx channel */
  410. auide_init_dbdma_dev( &source_dev_tab,
  411. dev_id,
  412. tsize, devwidth, DEV_FLAGS_OUT | flags);
  413. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  414. auide_init_dbdma_dev( &source_dev_tab,
  415. dev_id,
  416. tsize, devwidth, DEV_FLAGS_IN | flags);
  417. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  418. /* We also need to add a target device for the DMA */
  419. auide_init_dbdma_dev( &target_dev_tab,
  420. (u32)DSCR_CMD0_ALWAYS,
  421. tsize, devwidth, DEV_FLAGS_ANYUSE);
  422. auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
  423. /* Get a channel for TX */
  424. auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
  425. auide->tx_dev_id,
  426. auide_ddma_tx_callback,
  427. (void*)auide);
  428. /* Get a channel for RX */
  429. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  430. auide->target_dev_id,
  431. auide_ddma_rx_callback,
  432. (void*)auide);
  433. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  434. NUM_DESCRIPTORS);
  435. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  436. NUM_DESCRIPTORS);
  437. hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,
  438. PRD_ENTRIES * PRD_BYTES, /* 1 Page */
  439. &hwif->dmatable_dma, GFP_KERNEL);
  440. au1xxx_dbdma_start( auide->tx_chan );
  441. au1xxx_dbdma_start( auide->rx_chan );
  442. return 0;
  443. }
  444. #else
  445. static int auide_ddma_init( _auide_hwif *auide )
  446. {
  447. dbdev_tab_t source_dev_tab;
  448. int flags;
  449. #ifdef IDE_AU1XXX_BURSTMODE
  450. flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
  451. #else
  452. flags = DEV_FLAGS_SYNC;
  453. #endif
  454. /* setup dev_tab for tx channel */
  455. auide_init_dbdma_dev( &source_dev_tab,
  456. (u32)DSCR_CMD0_ALWAYS,
  457. 8, 32, DEV_FLAGS_OUT | flags);
  458. auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  459. auide_init_dbdma_dev( &source_dev_tab,
  460. (u32)DSCR_CMD0_ALWAYS,
  461. 8, 32, DEV_FLAGS_IN | flags);
  462. auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
  463. /* Get a channel for TX */
  464. auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
  465. auide->tx_dev_id,
  466. NULL,
  467. (void*)auide);
  468. /* Get a channel for RX */
  469. auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
  470. DSCR_CMD0_ALWAYS,
  471. NULL,
  472. (void*)auide);
  473. auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
  474. NUM_DESCRIPTORS);
  475. auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
  476. NUM_DESCRIPTORS);
  477. au1xxx_dbdma_start( auide->tx_chan );
  478. au1xxx_dbdma_start( auide->rx_chan );
  479. return 0;
  480. }
  481. #endif
  482. static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
  483. {
  484. int i;
  485. unsigned long *ata_regs = hw->io_ports;
  486. /* FIXME? */
  487. for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
  488. *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
  489. }
  490. /* set the Alternative Status register */
  491. *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
  492. }
  493. static int au_ide_probe(struct device *dev)
  494. {
  495. struct platform_device *pdev = to_platform_device(dev);
  496. _auide_hwif *ahwif = &auide_hwif;
  497. ide_hwif_t *hwif;
  498. struct resource *res;
  499. hw_regs_t *hw;
  500. int ret = 0;
  501. #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
  502. char *mode = "MWDMA2";
  503. #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
  504. char *mode = "PIO+DDMA(offload)";
  505. #endif
  506. memset(&auide_hwif, 0, sizeof(_auide_hwif));
  507. auide_hwif.dev = 0;
  508. ahwif->dev = dev;
  509. ahwif->irq = platform_get_irq(pdev, 0);
  510. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  511. if (res == NULL) {
  512. pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
  513. ret = -ENODEV;
  514. goto out;
  515. }
  516. if (ahwif->irq < 0) {
  517. pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
  518. ret = -ENODEV;
  519. goto out;
  520. }
  521. if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
  522. pr_debug("%s: request_mem_region failed\n", DRV_NAME);
  523. ret = -EBUSY;
  524. goto out;
  525. }
  526. ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
  527. if (ahwif->regbase == 0) {
  528. ret = -ENOMEM;
  529. goto out;
  530. }
  531. /* FIXME: This might possibly break PCMCIA IDE devices */
  532. hwif = &ide_hwifs[pdev->id];
  533. hw = &hwif->hw;
  534. hwif->irq = hw->irq = ahwif->irq;
  535. hwif->chipset = ide_au1xxx;
  536. auide_setup_ports(hw, ahwif);
  537. memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
  538. hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
  539. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  540. hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
  541. hwif->swdma_mask = 0x00;
  542. #else
  543. hwif->mwdma_mask = 0x0;
  544. hwif->swdma_mask = 0x0;
  545. #endif
  546. hwif->pio_mask = ATA_PIO4;
  547. hwif->noprobe = 0;
  548. hwif->drives[0].unmask = 1;
  549. hwif->drives[1].unmask = 1;
  550. /* hold should be on in all cases */
  551. hwif->hold = 1;
  552. hwif->mmio = 1;
  553. /* If the user has selected DDMA assisted copies,
  554. then set up a few local I/O function entry points
  555. */
  556. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  557. hwif->INSW = auide_insw;
  558. hwif->OUTSW = auide_outsw;
  559. #endif
  560. hwif->set_pio_mode = &au1xxx_set_pio_mode;
  561. hwif->speedproc = &auide_tune_chipset;
  562. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
  563. hwif->dma_off_quietly = &auide_dma_off_quietly;
  564. hwif->dma_timeout = &auide_dma_timeout;
  565. hwif->ide_dma_check = &auide_dma_check;
  566. hwif->dma_exec_cmd = &auide_dma_exec_cmd;
  567. hwif->dma_start = &auide_dma_start;
  568. hwif->ide_dma_end = &auide_dma_end;
  569. hwif->dma_setup = &auide_dma_setup;
  570. hwif->ide_dma_test_irq = &auide_dma_test_irq;
  571. hwif->dma_host_off = &auide_dma_host_off;
  572. hwif->dma_host_on = &auide_dma_host_on;
  573. hwif->dma_lost_irq = &auide_dma_lost_irq;
  574. hwif->ide_dma_on = &auide_dma_on;
  575. hwif->autodma = 1;
  576. hwif->drives[0].autodma = hwif->autodma;
  577. hwif->drives[1].autodma = hwif->autodma;
  578. hwif->atapi_dma = 1;
  579. #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
  580. hwif->autodma = 0;
  581. hwif->channel = 0;
  582. hwif->hold = 1;
  583. hwif->select_data = 0; /* no chipset-specific code */
  584. hwif->config_data = 0; /* no chipset-specific code */
  585. hwif->drives[0].autodma = 0;
  586. hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
  587. #endif
  588. hwif->drives[0].no_io_32bit = 1;
  589. auide_hwif.hwif = hwif;
  590. hwif->hwif_data = &auide_hwif;
  591. #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
  592. auide_ddma_init(&auide_hwif);
  593. dbdma_init_done = 1;
  594. #endif
  595. probe_hwif_init(hwif);
  596. ide_proc_register_port(hwif);
  597. dev_set_drvdata(dev, hwif);
  598. printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
  599. out:
  600. return ret;
  601. }
  602. static int au_ide_remove(struct device *dev)
  603. {
  604. struct platform_device *pdev = to_platform_device(dev);
  605. struct resource *res;
  606. ide_hwif_t *hwif = dev_get_drvdata(dev);
  607. _auide_hwif *ahwif = &auide_hwif;
  608. ide_unregister(hwif - ide_hwifs);
  609. iounmap((void *)ahwif->regbase);
  610. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  611. release_mem_region(res->start, res->end - res->start);
  612. return 0;
  613. }
  614. static struct device_driver au1200_ide_driver = {
  615. .name = "au1200-ide",
  616. .bus = &platform_bus_type,
  617. .probe = au_ide_probe,
  618. .remove = au_ide_remove,
  619. };
  620. static int __init au_ide_init(void)
  621. {
  622. return driver_register(&au1200_ide_driver);
  623. }
  624. static void __exit au_ide_exit(void)
  625. {
  626. driver_unregister(&au1200_ide_driver);
  627. }
  628. MODULE_LICENSE("GPL");
  629. MODULE_DESCRIPTION("AU1200 IDE driver");
  630. module_init(au_ide_init);
  631. module_exit(au_ide_exit);