pata_amd.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * pata_amd.c - AMD PATA for new ATA layer
  3. * (C) 2005-2006 Red Hat Inc
  4. * Alan Cox <alan@redhat.com>
  5. *
  6. * Based on pata-sil680. Errata information is taken from data sheets
  7. * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
  8. * claimed by sata-nv.c.
  9. *
  10. * TODO:
  11. * Variable system clock when/if it makes sense
  12. * Power management on ports
  13. *
  14. *
  15. * Documentation publically available.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/init.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <scsi/scsi_host.h>
  24. #include <linux/libata.h>
  25. #define DRV_NAME "pata_amd"
  26. #define DRV_VERSION "0.3.9"
  27. /**
  28. * timing_setup - shared timing computation and load
  29. * @ap: ATA port being set up
  30. * @adev: drive being configured
  31. * @offset: port offset
  32. * @speed: target speed
  33. * @clock: clock multiplier (number of times 33MHz for this part)
  34. *
  35. * Perform the actual timing set up for Nvidia or AMD PATA devices.
  36. * The actual devices vary so they all call into this helper function
  37. * providing the clock multipler and offset (because AMD and Nvidia put
  38. * the ports at different locations).
  39. */
  40. static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
  41. {
  42. static const unsigned char amd_cyc2udma[] = {
  43. 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
  44. };
  45. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  46. struct ata_device *peer = ata_dev_pair(adev);
  47. int dn = ap->port_no * 2 + adev->devno;
  48. struct ata_timing at, apeer;
  49. int T, UT;
  50. const int amd_clock = 33333; /* KHz. */
  51. u8 t;
  52. T = 1000000000 / amd_clock;
  53. UT = T / min_t(int, max_t(int, clock, 1), 2);
  54. if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
  55. dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
  56. return;
  57. }
  58. if (peer) {
  59. /* This may be over conservative */
  60. if (peer->dma_mode) {
  61. ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
  62. ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
  63. }
  64. ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
  65. ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
  66. }
  67. if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
  68. if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
  69. /*
  70. * Now do the setup work
  71. */
  72. /* Configure the address set up timing */
  73. pci_read_config_byte(pdev, offset + 0x0C, &t);
  74. t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
  75. pci_write_config_byte(pdev, offset + 0x0C , t);
  76. /* Configure the 8bit I/O timing */
  77. pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
  78. ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
  79. /* Drive timing */
  80. pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
  81. ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
  82. switch (clock) {
  83. case 1:
  84. t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
  85. break;
  86. case 2:
  87. t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
  88. break;
  89. case 3:
  90. t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
  91. break;
  92. case 4:
  93. t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
  94. break;
  95. default:
  96. return;
  97. }
  98. /* UDMA timing */
  99. pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
  100. }
  101. /**
  102. * amd_pre_reset - perform reset handling
  103. * @link: ATA link
  104. * @deadline: deadline jiffies for the operation
  105. *
  106. * Reset sequence checking enable bits to see which ports are
  107. * active.
  108. */
  109. static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
  110. {
  111. static const struct pci_bits amd_enable_bits[] = {
  112. { 0x40, 1, 0x02, 0x02 },
  113. { 0x40, 1, 0x01, 0x01 }
  114. };
  115. struct ata_port *ap = link->ap;
  116. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  117. if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
  118. return -ENOENT;
  119. return ata_std_prereset(link, deadline);
  120. }
  121. static void amd_error_handler(struct ata_port *ap)
  122. {
  123. return ata_bmdma_drive_eh(ap, amd_pre_reset,
  124. ata_std_softreset, NULL,
  125. ata_std_postreset);
  126. }
  127. static int amd_cable_detect(struct ata_port *ap)
  128. {
  129. static const u32 bitmask[2] = {0x03, 0x0C};
  130. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  131. u8 ata66;
  132. pci_read_config_byte(pdev, 0x42, &ata66);
  133. if (ata66 & bitmask[ap->port_no])
  134. return ATA_CBL_PATA80;
  135. return ATA_CBL_PATA40;
  136. }
  137. /**
  138. * amd33_set_piomode - set initial PIO mode data
  139. * @ap: ATA interface
  140. * @adev: ATA device
  141. *
  142. * Program the AMD registers for PIO mode.
  143. */
  144. static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
  145. {
  146. timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
  147. }
  148. static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
  149. {
  150. timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
  151. }
  152. static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
  153. {
  154. timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
  155. }
  156. static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
  157. {
  158. timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
  159. }
  160. /**
  161. * amd33_set_dmamode - set initial DMA mode data
  162. * @ap: ATA interface
  163. * @adev: ATA device
  164. *
  165. * Program the MWDMA/UDMA modes for the AMD and Nvidia
  166. * chipset.
  167. */
  168. static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  169. {
  170. timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
  171. }
  172. static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  173. {
  174. timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
  175. }
  176. static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  177. {
  178. timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
  179. }
  180. static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  181. {
  182. timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
  183. }
  184. /**
  185. * nv_probe_init - cable detection
  186. * @lin: ATA link
  187. *
  188. * Perform cable detection. The BIOS stores this in PCI config
  189. * space for us.
  190. */
  191. static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
  192. {
  193. static const struct pci_bits nv_enable_bits[] = {
  194. { 0x50, 1, 0x02, 0x02 },
  195. { 0x50, 1, 0x01, 0x01 }
  196. };
  197. struct ata_port *ap = link->ap;
  198. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  199. if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
  200. return -ENOENT;
  201. return ata_std_prereset(link, deadline);
  202. }
  203. static void nv_error_handler(struct ata_port *ap)
  204. {
  205. ata_bmdma_drive_eh(ap, nv_pre_reset,
  206. ata_std_softreset, NULL,
  207. ata_std_postreset);
  208. }
  209. static int nv_cable_detect(struct ata_port *ap)
  210. {
  211. static const u8 bitmask[2] = {0x03, 0x0C};
  212. struct pci_dev *pdev = to_pci_dev(ap->host->dev);
  213. u8 ata66;
  214. u16 udma;
  215. int cbl;
  216. pci_read_config_byte(pdev, 0x52, &ata66);
  217. if (ata66 & bitmask[ap->port_no])
  218. cbl = ATA_CBL_PATA80;
  219. else
  220. cbl = ATA_CBL_PATA40;
  221. /* We now have to double check because the Nvidia boxes BIOS
  222. doesn't always set the cable bits but does set mode bits */
  223. pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
  224. if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
  225. cbl = ATA_CBL_PATA80;
  226. /* And a triple check across suspend/resume with ACPI around */
  227. if (ata_acpi_cbl_80wire(ap))
  228. cbl = ATA_CBL_PATA80;
  229. return cbl;
  230. }
  231. /**
  232. * nv100_set_piomode - set initial PIO mode data
  233. * @ap: ATA interface
  234. * @adev: ATA device
  235. *
  236. * Program the AMD registers for PIO mode.
  237. */
  238. static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
  239. {
  240. timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
  241. }
  242. static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
  243. {
  244. timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
  245. }
  246. /**
  247. * nv100_set_dmamode - set initial DMA mode data
  248. * @ap: ATA interface
  249. * @adev: ATA device
  250. *
  251. * Program the MWDMA/UDMA modes for the AMD and Nvidia
  252. * chipset.
  253. */
  254. static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  255. {
  256. timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
  257. }
  258. static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  259. {
  260. timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
  261. }
  262. static struct scsi_host_template amd_sht = {
  263. .module = THIS_MODULE,
  264. .name = DRV_NAME,
  265. .ioctl = ata_scsi_ioctl,
  266. .queuecommand = ata_scsi_queuecmd,
  267. .can_queue = ATA_DEF_QUEUE,
  268. .this_id = ATA_SHT_THIS_ID,
  269. .sg_tablesize = LIBATA_MAX_PRD,
  270. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  271. .emulated = ATA_SHT_EMULATED,
  272. .use_clustering = ATA_SHT_USE_CLUSTERING,
  273. .proc_name = DRV_NAME,
  274. .dma_boundary = ATA_DMA_BOUNDARY,
  275. .slave_configure = ata_scsi_slave_config,
  276. .slave_destroy = ata_scsi_slave_destroy,
  277. .bios_param = ata_std_bios_param,
  278. };
  279. static struct ata_port_operations amd33_port_ops = {
  280. .set_piomode = amd33_set_piomode,
  281. .set_dmamode = amd33_set_dmamode,
  282. .mode_filter = ata_pci_default_filter,
  283. .tf_load = ata_tf_load,
  284. .tf_read = ata_tf_read,
  285. .check_status = ata_check_status,
  286. .exec_command = ata_exec_command,
  287. .dev_select = ata_std_dev_select,
  288. .freeze = ata_bmdma_freeze,
  289. .thaw = ata_bmdma_thaw,
  290. .error_handler = amd_error_handler,
  291. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  292. .cable_detect = ata_cable_40wire,
  293. .bmdma_setup = ata_bmdma_setup,
  294. .bmdma_start = ata_bmdma_start,
  295. .bmdma_stop = ata_bmdma_stop,
  296. .bmdma_status = ata_bmdma_status,
  297. .qc_prep = ata_qc_prep,
  298. .qc_issue = ata_qc_issue_prot,
  299. .data_xfer = ata_data_xfer,
  300. .irq_handler = ata_interrupt,
  301. .irq_clear = ata_bmdma_irq_clear,
  302. .irq_on = ata_irq_on,
  303. .port_start = ata_sff_port_start,
  304. };
  305. static struct ata_port_operations amd66_port_ops = {
  306. .set_piomode = amd66_set_piomode,
  307. .set_dmamode = amd66_set_dmamode,
  308. .mode_filter = ata_pci_default_filter,
  309. .tf_load = ata_tf_load,
  310. .tf_read = ata_tf_read,
  311. .check_status = ata_check_status,
  312. .exec_command = ata_exec_command,
  313. .dev_select = ata_std_dev_select,
  314. .freeze = ata_bmdma_freeze,
  315. .thaw = ata_bmdma_thaw,
  316. .error_handler = amd_error_handler,
  317. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  318. .cable_detect = ata_cable_unknown,
  319. .bmdma_setup = ata_bmdma_setup,
  320. .bmdma_start = ata_bmdma_start,
  321. .bmdma_stop = ata_bmdma_stop,
  322. .bmdma_status = ata_bmdma_status,
  323. .qc_prep = ata_qc_prep,
  324. .qc_issue = ata_qc_issue_prot,
  325. .data_xfer = ata_data_xfer,
  326. .irq_handler = ata_interrupt,
  327. .irq_clear = ata_bmdma_irq_clear,
  328. .irq_on = ata_irq_on,
  329. .port_start = ata_sff_port_start,
  330. };
  331. static struct ata_port_operations amd100_port_ops = {
  332. .set_piomode = amd100_set_piomode,
  333. .set_dmamode = amd100_set_dmamode,
  334. .mode_filter = ata_pci_default_filter,
  335. .tf_load = ata_tf_load,
  336. .tf_read = ata_tf_read,
  337. .check_status = ata_check_status,
  338. .exec_command = ata_exec_command,
  339. .dev_select = ata_std_dev_select,
  340. .freeze = ata_bmdma_freeze,
  341. .thaw = ata_bmdma_thaw,
  342. .error_handler = amd_error_handler,
  343. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  344. .cable_detect = ata_cable_unknown,
  345. .bmdma_setup = ata_bmdma_setup,
  346. .bmdma_start = ata_bmdma_start,
  347. .bmdma_stop = ata_bmdma_stop,
  348. .bmdma_status = ata_bmdma_status,
  349. .qc_prep = ata_qc_prep,
  350. .qc_issue = ata_qc_issue_prot,
  351. .data_xfer = ata_data_xfer,
  352. .irq_handler = ata_interrupt,
  353. .irq_clear = ata_bmdma_irq_clear,
  354. .irq_on = ata_irq_on,
  355. .port_start = ata_sff_port_start,
  356. };
  357. static struct ata_port_operations amd133_port_ops = {
  358. .set_piomode = amd133_set_piomode,
  359. .set_dmamode = amd133_set_dmamode,
  360. .mode_filter = ata_pci_default_filter,
  361. .tf_load = ata_tf_load,
  362. .tf_read = ata_tf_read,
  363. .check_status = ata_check_status,
  364. .exec_command = ata_exec_command,
  365. .dev_select = ata_std_dev_select,
  366. .freeze = ata_bmdma_freeze,
  367. .thaw = ata_bmdma_thaw,
  368. .error_handler = amd_error_handler,
  369. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  370. .cable_detect = amd_cable_detect,
  371. .bmdma_setup = ata_bmdma_setup,
  372. .bmdma_start = ata_bmdma_start,
  373. .bmdma_stop = ata_bmdma_stop,
  374. .bmdma_status = ata_bmdma_status,
  375. .qc_prep = ata_qc_prep,
  376. .qc_issue = ata_qc_issue_prot,
  377. .data_xfer = ata_data_xfer,
  378. .irq_handler = ata_interrupt,
  379. .irq_clear = ata_bmdma_irq_clear,
  380. .irq_on = ata_irq_on,
  381. .port_start = ata_sff_port_start,
  382. };
  383. static struct ata_port_operations nv100_port_ops = {
  384. .set_piomode = nv100_set_piomode,
  385. .set_dmamode = nv100_set_dmamode,
  386. .mode_filter = ata_pci_default_filter,
  387. .tf_load = ata_tf_load,
  388. .tf_read = ata_tf_read,
  389. .check_status = ata_check_status,
  390. .exec_command = ata_exec_command,
  391. .dev_select = ata_std_dev_select,
  392. .freeze = ata_bmdma_freeze,
  393. .thaw = ata_bmdma_thaw,
  394. .error_handler = nv_error_handler,
  395. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  396. .cable_detect = nv_cable_detect,
  397. .bmdma_setup = ata_bmdma_setup,
  398. .bmdma_start = ata_bmdma_start,
  399. .bmdma_stop = ata_bmdma_stop,
  400. .bmdma_status = ata_bmdma_status,
  401. .qc_prep = ata_qc_prep,
  402. .qc_issue = ata_qc_issue_prot,
  403. .data_xfer = ata_data_xfer,
  404. .irq_handler = ata_interrupt,
  405. .irq_clear = ata_bmdma_irq_clear,
  406. .irq_on = ata_irq_on,
  407. .port_start = ata_sff_port_start,
  408. };
  409. static struct ata_port_operations nv133_port_ops = {
  410. .set_piomode = nv133_set_piomode,
  411. .set_dmamode = nv133_set_dmamode,
  412. .mode_filter = ata_pci_default_filter,
  413. .tf_load = ata_tf_load,
  414. .tf_read = ata_tf_read,
  415. .check_status = ata_check_status,
  416. .exec_command = ata_exec_command,
  417. .dev_select = ata_std_dev_select,
  418. .freeze = ata_bmdma_freeze,
  419. .thaw = ata_bmdma_thaw,
  420. .error_handler = nv_error_handler,
  421. .post_internal_cmd = ata_bmdma_post_internal_cmd,
  422. .cable_detect = nv_cable_detect,
  423. .bmdma_setup = ata_bmdma_setup,
  424. .bmdma_start = ata_bmdma_start,
  425. .bmdma_stop = ata_bmdma_stop,
  426. .bmdma_status = ata_bmdma_status,
  427. .qc_prep = ata_qc_prep,
  428. .qc_issue = ata_qc_issue_prot,
  429. .data_xfer = ata_data_xfer,
  430. .irq_handler = ata_interrupt,
  431. .irq_clear = ata_bmdma_irq_clear,
  432. .irq_on = ata_irq_on,
  433. .port_start = ata_sff_port_start,
  434. };
  435. static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
  436. {
  437. static const struct ata_port_info info[10] = {
  438. { /* 0: AMD 7401 */
  439. .sht = &amd_sht,
  440. .flags = ATA_FLAG_SLAVE_POSS,
  441. .pio_mask = 0x1f,
  442. .mwdma_mask = 0x07, /* No SWDMA */
  443. .udma_mask = 0x07, /* UDMA 33 */
  444. .port_ops = &amd33_port_ops
  445. },
  446. { /* 1: Early AMD7409 - no swdma */
  447. .sht = &amd_sht,
  448. .flags = ATA_FLAG_SLAVE_POSS,
  449. .pio_mask = 0x1f,
  450. .mwdma_mask = 0x07,
  451. .udma_mask = ATA_UDMA4, /* UDMA 66 */
  452. .port_ops = &amd66_port_ops
  453. },
  454. { /* 2: AMD 7409, no swdma errata */
  455. .sht = &amd_sht,
  456. .flags = ATA_FLAG_SLAVE_POSS,
  457. .pio_mask = 0x1f,
  458. .mwdma_mask = 0x07,
  459. .udma_mask = ATA_UDMA4, /* UDMA 66 */
  460. .port_ops = &amd66_port_ops
  461. },
  462. { /* 3: AMD 7411 */
  463. .sht = &amd_sht,
  464. .flags = ATA_FLAG_SLAVE_POSS,
  465. .pio_mask = 0x1f,
  466. .mwdma_mask = 0x07,
  467. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  468. .port_ops = &amd100_port_ops
  469. },
  470. { /* 4: AMD 7441 */
  471. .sht = &amd_sht,
  472. .flags = ATA_FLAG_SLAVE_POSS,
  473. .pio_mask = 0x1f,
  474. .mwdma_mask = 0x07,
  475. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  476. .port_ops = &amd100_port_ops
  477. },
  478. { /* 5: AMD 8111*/
  479. .sht = &amd_sht,
  480. .flags = ATA_FLAG_SLAVE_POSS,
  481. .pio_mask = 0x1f,
  482. .mwdma_mask = 0x07,
  483. .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
  484. .port_ops = &amd133_port_ops
  485. },
  486. { /* 6: AMD 8111 UDMA 100 (Serenade) */
  487. .sht = &amd_sht,
  488. .flags = ATA_FLAG_SLAVE_POSS,
  489. .pio_mask = 0x1f,
  490. .mwdma_mask = 0x07,
  491. .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */
  492. .port_ops = &amd133_port_ops
  493. },
  494. { /* 7: Nvidia Nforce */
  495. .sht = &amd_sht,
  496. .flags = ATA_FLAG_SLAVE_POSS,
  497. .pio_mask = 0x1f,
  498. .mwdma_mask = 0x07,
  499. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  500. .port_ops = &nv100_port_ops
  501. },
  502. { /* 8: Nvidia Nforce2 and later */
  503. .sht = &amd_sht,
  504. .flags = ATA_FLAG_SLAVE_POSS,
  505. .pio_mask = 0x1f,
  506. .mwdma_mask = 0x07,
  507. .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
  508. .port_ops = &nv133_port_ops
  509. },
  510. { /* 9: AMD CS5536 (Geode companion) */
  511. .sht = &amd_sht,
  512. .flags = ATA_FLAG_SLAVE_POSS,
  513. .pio_mask = 0x1f,
  514. .mwdma_mask = 0x07,
  515. .udma_mask = ATA_UDMA5, /* UDMA 100 */
  516. .port_ops = &amd100_port_ops
  517. }
  518. };
  519. const struct ata_port_info *ppi[] = { NULL, NULL };
  520. static int printed_version;
  521. int type = id->driver_data;
  522. u8 fifo;
  523. if (!printed_version++)
  524. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  525. pci_read_config_byte(pdev, 0x41, &fifo);
  526. /* Check for AMD7409 without swdma errata and if found adjust type */
  527. if (type == 1 && pdev->revision > 0x7)
  528. type = 2;
  529. /* Check for AMD7411 */
  530. if (type == 3)
  531. /* FIFO is broken */
  532. pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
  533. else
  534. pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
  535. /* Serenade ? */
  536. if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
  537. pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
  538. type = 6; /* UDMA 100 only */
  539. if (type < 3)
  540. ata_pci_clear_simplex(pdev);
  541. /* And fire it up */
  542. ppi[0] = &info[type];
  543. return ata_pci_init_one(pdev, ppi);
  544. }
  545. #ifdef CONFIG_PM
  546. static int amd_reinit_one(struct pci_dev *pdev)
  547. {
  548. if (pdev->vendor == PCI_VENDOR_ID_AMD) {
  549. u8 fifo;
  550. pci_read_config_byte(pdev, 0x41, &fifo);
  551. if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
  552. /* FIFO is broken */
  553. pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
  554. else
  555. pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
  556. if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
  557. pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
  558. ata_pci_clear_simplex(pdev);
  559. }
  560. return ata_pci_device_resume(pdev);
  561. }
  562. #endif
  563. static const struct pci_device_id amd[] = {
  564. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
  565. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
  566. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
  567. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
  568. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
  569. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
  570. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
  571. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
  572. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
  573. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
  574. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
  575. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
  576. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
  577. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
  578. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
  579. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
  580. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
  581. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
  582. { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
  583. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
  584. { },
  585. };
  586. static struct pci_driver amd_pci_driver = {
  587. .name = DRV_NAME,
  588. .id_table = amd,
  589. .probe = amd_init_one,
  590. .remove = ata_pci_remove_one,
  591. #ifdef CONFIG_PM
  592. .suspend = ata_pci_device_suspend,
  593. .resume = amd_reinit_one,
  594. #endif
  595. };
  596. static int __init amd_init(void)
  597. {
  598. return pci_register_driver(&amd_pci_driver);
  599. }
  600. static void __exit amd_exit(void)
  601. {
  602. pci_unregister_driver(&amd_pci_driver);
  603. }
  604. MODULE_AUTHOR("Alan Cox");
  605. MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
  606. MODULE_LICENSE("GPL");
  607. MODULE_DEVICE_TABLE(pci, amd);
  608. MODULE_VERSION(DRV_VERSION);
  609. module_init(amd_init);
  610. module_exit(amd_exit);