pata_amd.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*
  2. * pata_amd.c - AMD PATA for new ATA layer
  3. * (C) 2005-2006 Red Hat Inc
  4. * Alan Cox <alan@redhat.com>
  5. *
  6. * Based on pata-sil680. Errata information is taken from data sheets
  7. * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
  8. * claimed by sata-nv.c.
  9. *
  10. * TODO:
  11. * Variable system clock when/if it makes sense
  12. * Power management on ports
  13. *
  14. *
  15. * Documentation publically available.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/init.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <scsi/scsi_host.h>
  24. #include <linux/libata.h>
  25. #define DRV_NAME "pata_amd"
  26. #define DRV_VERSION "0.3.10"
  27. /**
  28. * timing_setup - shared timing computation and load
  29. * @ap: ATA port being set up
  30. * @adev: drive being configured
  31. * @offset: port offset
  32. * @speed: target speed
  33. * @clock: clock multiplier (number of times 33MHz for this part)
  34. *
  35. * Perform the actual timing set up for Nvidia or AMD PATA devices.
  36. * The actual devices vary so they all call into this helper function
  37. * providing the clock multipler and offset (because AMD and Nvidia put
  38. * the ports at different locations).
  39. */
  40. static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
  41. {
  42. static const unsigned char amd_cyc2udma[] = {
  43. 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
  44. };
  45. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  46. struct ata_device *peer = ata_dev_pair(adev);
  47. int dn = ap->port_no * 2 + adev->devno;
  48. struct ata_timing at, apeer;
  49. int T, UT;
  50. const int amd_clock = 33333; /* KHz. */
  51. u8 t;
  52. T = 1000000000 / amd_clock;
  53. UT = T / min_t(int, max_t(int, clock, 1), 2);
  54. if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
  55. dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
  56. return;
  57. }
  58. if (peer) {
  59. /* This may be over conservative */
  60. if (peer->dma_mode) {
  61. ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
  62. ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
  63. }
  64. ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
  65. ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
  66. }
  67. if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
  68. if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
  69. /*
  70. * Now do the setup work
  71. */
  72. /* Configure the address set up timing */
  73. pci_read_config_byte(pdev, offset + 0x0C, &t);
  74. t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
  75. pci_write_config_byte(pdev, offset + 0x0C , t);
  76. /* Configure the 8bit I/O timing */
  77. pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
  78. ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
  79. /* Drive timing */
  80. pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
  81. ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
  82. switch (clock) {
  83. case 1:
  84. t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
  85. break;
  86. case 2:
  87. t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
  88. break;
  89. case 3:
  90. t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
  91. break;
  92. case 4:
  93. t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
  94. break;
  95. default:
  96. return;
  97. }
  98. /* UDMA timing */
  99. if (at.udma)
  100. pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
  101. }
  102. /**
  103. * amd_pre_reset - perform reset handling
  104. * @link: ATA link
  105. * @deadline: deadline jiffies for the operation
  106. *
  107. * Reset sequence checking enable bits to see which ports are
  108. * active.
  109. */
  110. static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
  111. {
  112. static const struct pci_bits amd_enable_bits[] = {
  113. { 0x40, 1, 0x02, 0x02 },
  114. { 0x40, 1, 0x01, 0x01 }
  115. };
  116. struct ata_port *ap = link->ap;
  117. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  118. if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
  119. return -ENOENT;
  120. return ata_std_prereset(link, deadline);
  121. }
  122. static void amd_error_handler(struct ata_port *ap)
  123. {
  124. return ata_bmdma_drive_eh(ap, amd_pre_reset,
  125. ata_std_softreset, NULL,
  126. ata_std_postreset);
  127. }
  128. static int amd_cable_detect(struct ata_port *ap)
  129. {
  130. static const u32 bitmask[2] = {0x03, 0x0C};
  131. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  132. u8 ata66;
  133. pci_read_config_byte(pdev, 0x42, &ata66);
  134. if (ata66 & bitmask[ap->port_no])
  135. return ATA_CBL_PATA80;
  136. return ATA_CBL_PATA40;
  137. }
  138. /**
  139. * amd33_set_piomode - set initial PIO mode data
  140. * @ap: ATA interface
  141. * @adev: ATA device
  142. *
  143. * Program the AMD registers for PIO mode.
  144. */
  145. static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
  146. {
  147. timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
  148. }
  149. static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
  150. {
  151. timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
  152. }
  153. static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
  154. {
  155. timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
  156. }
  157. static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
  158. {
  159. timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
  160. }
  161. /**
  162. * amd33_set_dmamode - set initial DMA mode data
  163. * @ap: ATA interface
  164. * @adev: ATA device
  165. *
  166. * Program the MWDMA/UDMA modes for the AMD and Nvidia
  167. * chipset.
  168. */
  169. static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  170. {
  171. timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
  172. }
  173. static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  174. {
  175. timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
  176. }
  177. static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  178. {
  179. timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
  180. }
  181. static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  182. {
  183. timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
  184. }
  185. /**
  186. * nv_probe_init - cable detection
  187. * @lin: ATA link
  188. *
  189. * Perform cable detection. The BIOS stores this in PCI config
  190. * space for us.
  191. */
  192. static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
  193. {
  194. static const struct pci_bits nv_enable_bits[] = {
  195. { 0x50, 1, 0x02, 0x02 },
  196. { 0x50, 1, 0x01, 0x01 }
  197. };
  198. struct ata_port *ap = link->ap;
  199. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  200. if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
  201. return -ENOENT;
  202. return ata_std_prereset(link, deadline);
  203. }
  204. static void nv_error_handler(struct ata_port *ap)
  205. {
  206. ata_bmdma_drive_eh(ap, nv_pre_reset,
  207. ata_std_softreset, NULL,
  208. ata_std_postreset);
  209. }
  210. static int nv_cable_detect(struct ata_port *ap)
  211. {
  212. static const u8 bitmask[2] = {0x03, 0x0C};
  213. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  214. u8 ata66;
  215. u16 udma;
  216. int cbl;
  217. pci_read_config_byte(pdev, 0x52, &ata66);
  218. if (ata66 & bitmask[ap->port_no])
  219. cbl = ATA_CBL_PATA80;
  220. else
  221. cbl = ATA_CBL_PATA40;
  222. /* We now have to double check because the Nvidia boxes BIOS
  223. doesn't always set the cable bits but does set mode bits */
  224. pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
  225. if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
  226. cbl = ATA_CBL_PATA80;
  227. /* And a triple check across suspend/resume with ACPI around */
  228. if (ata_acpi_cbl_80wire(ap))
  229. cbl = ATA_CBL_PATA80;
  230. return cbl;
  231. }
  232. /**
  233. * nv100_set_piomode - set initial PIO mode data
  234. * @ap: ATA interface
  235. * @adev: ATA device
  236. *
  237. * Program the AMD registers for PIO mode.
  238. */
  239. static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
  240. {
  241. timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
  242. }
  243. static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
  244. {
  245. timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
  246. }
  247. /**
  248. * nv100_set_dmamode - set initial DMA mode data
  249. * @ap: ATA interface
  250. * @adev: ATA device
  251. *
  252. * Program the MWDMA/UDMA modes for the AMD and Nvidia
  253. * chipset.
  254. */
  255. static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  256. {
  257. timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
  258. }
  259. static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  260. {
  261. timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
  262. }
  263. static struct scsi_host_template amd_sht = {
  264. .module = THIS_MODULE,
  265. .name = DRV_NAME,
  266. .ioctl = ata_scsi_ioctl,
  267. .queuecommand = ata_scsi_queuecmd,
  268. .can_queue = ATA_DEF_QUEUE,
  269. .this_id = ATA_SHT_THIS_ID,
  270. .sg_tablesize = LIBATA_MAX_PRD,
  271. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  272. .emulated = ATA_SHT_EMULATED,
  273. .use_clustering = ATA_SHT_USE_CLUSTERING,
  274. .proc_name = DRV_NAME,
  275. .dma_boundary = ATA_DMA_BOUNDARY,
  276. .slave_configure = ata_scsi_slave_config,
  277. .slave_destroy = ata_scsi_slave_destroy,
  278. .bios_param = ata_std_bios_param,
  279. };
  280. static struct ata_port_operations amd33_port_ops = {
  281. .set_piomode = amd33_set_piomode,
  282. .set_dmamode = amd33_set_dmamode,
  283. .mode_filter = ata_pci_default_filter,
  284. .tf_load = ata_tf_load,
  285. .tf_read = ata_tf_read,
  286. .check_status = ata_check_status,
  287. .exec_command = ata_exec_command,
  288. .dev_select = ata_std_dev_select,
  289. .freeze = ata_bmdma_freeze,
  290. .thaw = ata_bmdma_thaw,
  291. .error_handler = amd_error_handler,
  292. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  293. .cable_detect = ata_cable_40wire,
  294. .bmdma_setup = ata_bmdma_setup,
  295. .bmdma_start = ata_bmdma_start,
  296. .bmdma_stop = ata_bmdma_stop,
  297. .bmdma_status = ata_bmdma_status,
  298. .qc_prep = ata_qc_prep,
  299. .qc_issue = ata_qc_issue_prot,
  300. .data_xfer = ata_data_xfer,
  301. .irq_handler = ata_interrupt,
  302. .irq_clear = ata_bmdma_irq_clear,
  303. .irq_on = ata_irq_on,
  304. .port_start = ata_sff_port_start,
  305. };
  306. static struct ata_port_operations amd66_port_ops = {
  307. .set_piomode = amd66_set_piomode,
  308. .set_dmamode = amd66_set_dmamode,
  309. .mode_filter = ata_pci_default_filter,
  310. .tf_load = ata_tf_load,
  311. .tf_read = ata_tf_read,
  312. .check_status = ata_check_status,
  313. .exec_command = ata_exec_command,
  314. .dev_select = ata_std_dev_select,
  315. .freeze = ata_bmdma_freeze,
  316. .thaw = ata_bmdma_thaw,
  317. .error_handler = amd_error_handler,
  318. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  319. .cable_detect = ata_cable_unknown,
  320. .bmdma_setup = ata_bmdma_setup,
  321. .bmdma_start = ata_bmdma_start,
  322. .bmdma_stop = ata_bmdma_stop,
  323. .bmdma_status = ata_bmdma_status,
  324. .qc_prep = ata_qc_prep,
  325. .qc_issue = ata_qc_issue_prot,
  326. .data_xfer = ata_data_xfer,
  327. .irq_handler = ata_interrupt,
  328. .irq_clear = ata_bmdma_irq_clear,
  329. .irq_on = ata_irq_on,
  330. .port_start = ata_sff_port_start,
  331. };
  332. static struct ata_port_operations amd100_port_ops = {
  333. .set_piomode = amd100_set_piomode,
  334. .set_dmamode = amd100_set_dmamode,
  335. .mode_filter = ata_pci_default_filter,
  336. .tf_load = ata_tf_load,
  337. .tf_read = ata_tf_read,
  338. .check_status = ata_check_status,
  339. .exec_command = ata_exec_command,
  340. .dev_select = ata_std_dev_select,
  341. .freeze = ata_bmdma_freeze,
  342. .thaw = ata_bmdma_thaw,
  343. .error_handler = amd_error_handler,
  344. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  345. .cable_detect = ata_cable_unknown,
  346. .bmdma_setup = ata_bmdma_setup,
  347. .bmdma_start = ata_bmdma_start,
  348. .bmdma_stop = ata_bmdma_stop,
  349. .bmdma_status = ata_bmdma_status,
  350. .qc_prep = ata_qc_prep,
  351. .qc_issue = ata_qc_issue_prot,
  352. .data_xfer = ata_data_xfer,
  353. .irq_handler = ata_interrupt,
  354. .irq_clear = ata_bmdma_irq_clear,
  355. .irq_on = ata_irq_on,
  356. .port_start = ata_sff_port_start,
  357. };
  358. static struct ata_port_operations amd133_port_ops = {
  359. .set_piomode = amd133_set_piomode,
  360. .set_dmamode = amd133_set_dmamode,
  361. .mode_filter = ata_pci_default_filter,
  362. .tf_load = ata_tf_load,
  363. .tf_read = ata_tf_read,
  364. .check_status = ata_check_status,
  365. .exec_command = ata_exec_command,
  366. .dev_select = ata_std_dev_select,
  367. .freeze = ata_bmdma_freeze,
  368. .thaw = ata_bmdma_thaw,
  369. .error_handler = amd_error_handler,
  370. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  371. .cable_detect = amd_cable_detect,
  372. .bmdma_setup = ata_bmdma_setup,
  373. .bmdma_start = ata_bmdma_start,
  374. .bmdma_stop = ata_bmdma_stop,
  375. .bmdma_status = ata_bmdma_status,
  376. .qc_prep = ata_qc_prep,
  377. .qc_issue = ata_qc_issue_prot,
  378. .data_xfer = ata_data_xfer,
  379. .irq_handler = ata_interrupt,
  380. .irq_clear = ata_bmdma_irq_clear,
  381. .irq_on = ata_irq_on,
  382. .port_start = ata_sff_port_start,
  383. };
  384. static struct ata_port_operations nv100_port_ops = {
  385. .set_piomode = nv100_set_piomode,
  386. .set_dmamode = nv100_set_dmamode,
  387. .mode_filter = ata_pci_default_filter,
  388. .tf_load = ata_tf_load,
  389. .tf_read = ata_tf_read,
  390. .check_status = ata_check_status,
  391. .exec_command = ata_exec_command,
  392. .dev_select = ata_std_dev_select,
  393. .freeze = ata_bmdma_freeze,
  394. .thaw = ata_bmdma_thaw,
  395. .error_handler = nv_error_handler,
  396. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  397. .cable_detect = nv_cable_detect,
  398. .bmdma_setup = ata_bmdma_setup,
  399. .bmdma_start = ata_bmdma_start,
  400. .bmdma_stop = ata_bmdma_stop,
  401. .bmdma_status = ata_bmdma_status,
  402. .qc_prep = ata_qc_prep,
  403. .qc_issue = ata_qc_issue_prot,
  404. .data_xfer = ata_data_xfer,
  405. .irq_handler = ata_interrupt,
  406. .irq_clear = ata_bmdma_irq_clear,
  407. .irq_on = ata_irq_on,
  408. .port_start = ata_sff_port_start,
  409. };
  410. static struct ata_port_operations nv133_port_ops = {
  411. .set_piomode = nv133_set_piomode,
  412. .set_dmamode = nv133_set_dmamode,
  413. .mode_filter = ata_pci_default_filter,
  414. .tf_load = ata_tf_load,
  415. .tf_read = ata_tf_read,
  416. .check_status = ata_check_status,
  417. .exec_command = ata_exec_command,
  418. .dev_select = ata_std_dev_select,
  419. .freeze = ata_bmdma_freeze,
  420. .thaw = ata_bmdma_thaw,
  421. .error_handler = nv_error_handler,
  422. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  423. .cable_detect = nv_cable_detect,
  424. .bmdma_setup = ata_bmdma_setup,
  425. .bmdma_start = ata_bmdma_start,
  426. .bmdma_stop = ata_bmdma_stop,
  427. .bmdma_status = ata_bmdma_status,
  428. .qc_prep = ata_qc_prep,
  429. .qc_issue = ata_qc_issue_prot,
  430. .data_xfer = ata_data_xfer,
  431. .irq_handler = ata_interrupt,
  432. .irq_clear = ata_bmdma_irq_clear,
  433. .irq_on = ata_irq_on,
  434. .port_start = ata_sff_port_start,
  435. };
  436. static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
  437. {
  438. static const struct ata_port_info info[10] = {
  439. { /* 0: AMD 7401 */
  440. .sht = &amd_sht,
  441. .flags = ATA_FLAG_SLAVE_POSS,
  442. .pio_mask = 0x1f,
  443. .mwdma_mask = 0x07, /* No SWDMA */
  444. .udma_mask = 0x07, /* UDMA 33 */
  445. .port_ops = &amd33_port_ops
  446. },
  447. { /* 1: Early AMD7409 - no swdma */
  448. .sht = &amd_sht,
  449. .flags = ATA_FLAG_SLAVE_POSS,
  450. .pio_mask = 0x1f,
  451. .mwdma_mask = 0x07,
  452. .udma_mask = ATA_UDMA4, /* UDMA 66 */
  453. .port_ops = &amd66_port_ops
  454. },
  455. { /* 2: AMD 7409, no swdma errata */
  456. .sht = &amd_sht,
  457. .flags = ATA_FLAG_SLAVE_POSS,
  458. .pio_mask = 0x1f,
  459. .mwdma_mask = 0x07,
  460. .udma_mask = ATA_UDMA4, /* UDMA 66 */
  461. .port_ops = &amd66_port_ops
  462. },
  463. { /* 3: AMD 7411 */
  464. .sht = &amd_sht,
  465. .flags = ATA_FLAG_SLAVE_POSS,
  466. .pio_mask = 0x1f,
  467. .mwdma_mask = 0x07,
  468. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  469. .port_ops = &amd100_port_ops
  470. },
  471. { /* 4: AMD 7441 */
  472. .sht = &amd_sht,
  473. .flags = ATA_FLAG_SLAVE_POSS,
  474. .pio_mask = 0x1f,
  475. .mwdma_mask = 0x07,
  476. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  477. .port_ops = &amd100_port_ops
  478. },
  479. { /* 5: AMD 8111*/
  480. .sht = &amd_sht,
  481. .flags = ATA_FLAG_SLAVE_POSS,
  482. .pio_mask = 0x1f,
  483. .mwdma_mask = 0x07,
  484. .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
  485. .port_ops = &amd133_port_ops
  486. },
  487. { /* 6: AMD 8111 UDMA 100 (Serenade) */
  488. .sht = &amd_sht,
  489. .flags = ATA_FLAG_SLAVE_POSS,
  490. .pio_mask = 0x1f,
  491. .mwdma_mask = 0x07,
  492. .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */
  493. .port_ops = &amd133_port_ops
  494. },
  495. { /* 7: Nvidia Nforce */
  496. .sht = &amd_sht,
  497. .flags = ATA_FLAG_SLAVE_POSS,
  498. .pio_mask = 0x1f,
  499. .mwdma_mask = 0x07,
  500. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  501. .port_ops = &nv100_port_ops
  502. },
  503. { /* 8: Nvidia Nforce2 and later */
  504. .sht = &amd_sht,
  505. .flags = ATA_FLAG_SLAVE_POSS,
  506. .pio_mask = 0x1f,
  507. .mwdma_mask = 0x07,
  508. .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
  509. .port_ops = &nv133_port_ops
  510. },
  511. { /* 9: AMD CS5536 (Geode companion) */
  512. .sht = &amd_sht,
  513. .flags = ATA_FLAG_SLAVE_POSS,
  514. .pio_mask = 0x1f,
  515. .mwdma_mask = 0x07,
  516. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  517. .port_ops = &amd100_port_ops
  518. }
  519. };
  520. const struct ata_port_info *ppi[] = { NULL, NULL };
  521. static int printed_version;
  522. int type = id->driver_data;
  523. u8 fifo;
  524. if (!printed_version++)
  525. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  526. pci_read_config_byte(pdev, 0x41, &fifo);
  527. /* Check for AMD7409 without swdma errata and if found adjust type */
  528. if (type == 1 && pdev->revision > 0x7)
  529. type = 2;
  530. /* Check for AMD7411 */
  531. if (type == 3)
  532. /* FIFO is broken */
  533. pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
  534. else
  535. pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
  536. /* Serenade ? */
  537. if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
  538. pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
  539. type = 6; /* UDMA 100 only */
  540. if (type < 3)
  541. ata_pci_clear_simplex(pdev);
  542. /* And fire it up */
  543. ppi[0] = &info[type];
  544. return ata_pci_init_one(pdev, ppi);
  545. }
  546. #ifdef CONFIG_PM
  547. static int amd_reinit_one(struct pci_dev *pdev)
  548. {
  549. if (pdev->vendor == PCI_VENDOR_ID_AMD) {
  550. u8 fifo;
  551. pci_read_config_byte(pdev, 0x41, &fifo);
  552. if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
  553. /* FIFO is broken */
  554. pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
  555. else
  556. pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
  557. if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
  558. pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
  559. ata_pci_clear_simplex(pdev);
  560. }
  561. return ata_pci_device_resume(pdev);
  562. }
  563. #endif
  564. static const struct pci_device_id amd[] = {
  565. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
  566. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
  567. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
  568. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
  569. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
  570. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
  571. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
  572. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
  573. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
  574. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
  575. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
  576. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
  577. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
  578. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
  579. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
  580. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
  581. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
  582. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
  583. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
  584. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
  585. { },
  586. };
  587. static struct pci_driver amd_pci_driver = {
  588. .name = DRV_NAME,
  589. .id_table = amd,
  590. .probe = amd_init_one,
  591. .remove = ata_pci_remove_one,
  592. #ifdef CONFIG_PM
  593. .suspend = ata_pci_device_suspend,
  594. .resume = amd_reinit_one,
  595. #endif
  596. };
  597. static int __init amd_init(void)
  598. {
  599. return pci_register_driver(&amd_pci_driver);
  600. }
  601. static void __exit amd_exit(void)
  602. {
  603. pci_unregister_driver(&amd_pci_driver);
  604. }
  605. MODULE_AUTHOR("Alan Cox");
  606. MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
  607. MODULE_LICENSE("GPL");
  608. MODULE_DEVICE_TABLE(pci, amd);
  609. MODULE_VERSION(DRV_VERSION);
  610. module_init(amd_init);
  611. module_exit(amd_exit);