serverworks.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. /*
  2. * linux/drivers/ide/pci/serverworks.c Version 0.9 Mar 4 2007
  3. *
  4. * Copyright (C) 1998-2000 Michel Aubry
  5. * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
  6. * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
  7. * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
  8. * Portions copyright (c) 2001 Sun Microsystems
  9. *
  10. *
  11. * RCC/ServerWorks IDE driver for Linux
  12. *
  13. * OSB4: `Open South Bridge' IDE Interface (fn 1)
  14. * supports UDMA mode 2 (33 MB/s)
  15. *
  16. * CSB5: `Champion South Bridge' IDE Interface (fn 1)
  17. * all revisions support UDMA mode 4 (66 MB/s)
  18. * revision A2.0 and up support UDMA mode 5 (100 MB/s)
  19. *
  20. * *** The CSB5 does not provide ANY register ***
  21. * *** to detect 80-conductor cable presence. ***
  22. *
  23. * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
  24. *
  25. * HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE
  26. * controller same as the CSB6. Single channel ATA100 only.
  27. *
  28. * Documentation:
  29. * Available under NDA only. Errata info very hard to get.
  30. *
  31. */
  32. #include <linux/types.h>
  33. #include <linux/module.h>
  34. #include <linux/kernel.h>
  35. #include <linux/ioport.h>
  36. #include <linux/pci.h>
  37. #include <linux/hdreg.h>
  38. #include <linux/ide.h>
  39. #include <linux/init.h>
  40. #include <linux/delay.h>
  41. #include <asm/io.h>
  42. #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
  43. #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
  44. /* Seagate Barracuda ATA IV Family drives in UDMA mode 5
  45. * can overrun their FIFOs when used with the CSB5 */
  46. static const char *svwks_bad_ata100[] = {
  47. "ST320011A",
  48. "ST340016A",
  49. "ST360021A",
  50. "ST380021A",
  51. NULL
  52. };
  53. static u8 svwks_revision = 0;
  54. static struct pci_dev *isa_dev;
  55. static int check_in_drive_lists (ide_drive_t *drive, const char **list)
  56. {
  57. while (*list)
  58. if (!strcmp(*list++, drive->id->model))
  59. return 1;
  60. return 0;
  61. }
  62. static u8 svwks_udma_filter(ide_drive_t *drive)
  63. {
  64. struct pci_dev *dev = HWIF(drive)->pci_dev;
  65. u8 mask = 0;
  66. if (!svwks_revision)
  67. pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
  68. if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
  69. return 0x1f;
  70. if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
  71. u32 reg = 0;
  72. if (isa_dev)
  73. pci_read_config_dword(isa_dev, 0x64, &reg);
  74. /*
  75. * Don't enable UDMA on disk devices for the moment
  76. */
  77. if(drive->media == ide_disk)
  78. return 0;
  79. /* Check the OSB4 DMA33 enable bit */
  80. return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
  81. } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) {
  82. return 0x07;
  83. } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) {
  84. u8 btr = 0, mode;
  85. pci_read_config_byte(dev, 0x5A, &btr);
  86. mode = btr & 0x3;
  87. /* If someone decides to do UDMA133 on CSB5 the same
  88. issue will bite so be inclusive */
  89. if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
  90. mode = 2;
  91. switch(mode) {
  92. case 2: mask = 0x1f; break;
  93. case 1: mask = 0x07; break;
  94. default: mask = 0x00; break;
  95. }
  96. }
  97. if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
  98. (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
  99. (!(PCI_FUNC(dev->devfn) & 1)))
  100. mask = 0x1f;
  101. return mask;
  102. }
  103. static u8 svwks_csb_check (struct pci_dev *dev)
  104. {
  105. switch (dev->device) {
  106. case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
  107. case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
  108. case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
  109. case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
  110. return 1;
  111. default:
  112. break;
  113. }
  114. return 0;
  115. }
  116. static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
  117. {
  118. static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
  119. static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
  120. static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
  121. static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
  122. static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
  123. ide_hwif_t *hwif = HWIF(drive);
  124. struct pci_dev *dev = hwif->pci_dev;
  125. u8 speed = ide_rate_filter(drive, xferspeed);
  126. u8 pio = ide_get_best_pio_mode(drive, 255, 4, NULL);
  127. u8 unit = (drive->select.b.unit & 0x01);
  128. u8 csb5 = svwks_csb_check(dev);
  129. u8 ultra_enable = 0, ultra_timing = 0;
  130. u8 dma_timing = 0, pio_timing = 0;
  131. u16 csb5_pio = 0;
  132. /* If we are about to put a disk into UDMA mode we screwed up.
  133. Our code assumes we never _ever_ do this on an OSB4 */
  134. if(dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4 &&
  135. drive->media == ide_disk && speed >= XFER_UDMA_0)
  136. BUG();
  137. pci_read_config_byte(dev, drive_pci[drive->dn], &pio_timing);
  138. pci_read_config_byte(dev, drive_pci2[drive->dn], &dma_timing);
  139. pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
  140. pci_read_config_word(dev, 0x4A, &csb5_pio);
  141. pci_read_config_byte(dev, 0x54, &ultra_enable);
  142. /* Per Specified Design by OEM, and ASIC Architect */
  143. if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
  144. (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
  145. if (!drive->init_speed) {
  146. u8 dma_stat = inb(hwif->dma_status);
  147. dma_pio:
  148. if (((ultra_enable << (7-drive->dn) & 0x80) == 0x80) &&
  149. ((dma_stat & (1<<(5+unit))) == (1<<(5+unit)))) {
  150. drive->current_speed = drive->init_speed = XFER_UDMA_0 + udma_modes[(ultra_timing >> (4*unit)) & ~(0xF0)];
  151. return 0;
  152. } else if ((dma_timing) &&
  153. ((dma_stat&(1<<(5+unit)))==(1<<(5+unit)))) {
  154. u8 dmaspeed = dma_timing;
  155. dma_timing &= ~0xFF;
  156. if ((dmaspeed & 0x20) == 0x20)
  157. dmaspeed = XFER_MW_DMA_2;
  158. else if ((dmaspeed & 0x21) == 0x21)
  159. dmaspeed = XFER_MW_DMA_1;
  160. else if ((dmaspeed & 0x77) == 0x77)
  161. dmaspeed = XFER_MW_DMA_0;
  162. else
  163. goto dma_pio;
  164. drive->current_speed = drive->init_speed = dmaspeed;
  165. return 0;
  166. } else if (pio_timing) {
  167. u8 piospeed = pio_timing;
  168. pio_timing &= ~0xFF;
  169. if ((piospeed & 0x20) == 0x20)
  170. piospeed = XFER_PIO_4;
  171. else if ((piospeed & 0x22) == 0x22)
  172. piospeed = XFER_PIO_3;
  173. else if ((piospeed & 0x34) == 0x34)
  174. piospeed = XFER_PIO_2;
  175. else if ((piospeed & 0x47) == 0x47)
  176. piospeed = XFER_PIO_1;
  177. else if ((piospeed & 0x5d) == 0x5d)
  178. piospeed = XFER_PIO_0;
  179. else
  180. goto oem_setup_failed;
  181. drive->current_speed = drive->init_speed = piospeed;
  182. return 0;
  183. }
  184. }
  185. }
  186. oem_setup_failed:
  187. pio_timing &= ~0xFF;
  188. dma_timing &= ~0xFF;
  189. ultra_timing &= ~(0x0F << (4*unit));
  190. ultra_enable &= ~(0x01 << drive->dn);
  191. csb5_pio &= ~(0x0F << (4*drive->dn));
  192. switch(speed) {
  193. case XFER_PIO_4:
  194. case XFER_PIO_3:
  195. case XFER_PIO_2:
  196. case XFER_PIO_1:
  197. case XFER_PIO_0:
  198. pio_timing |= pio_modes[speed - XFER_PIO_0];
  199. csb5_pio |= ((speed - XFER_PIO_0) << (4*drive->dn));
  200. break;
  201. case XFER_MW_DMA_2:
  202. case XFER_MW_DMA_1:
  203. case XFER_MW_DMA_0:
  204. /*
  205. * TODO: always setup PIO mode so this won't be needed
  206. */
  207. pio_timing |= pio_modes[pio];
  208. csb5_pio |= (pio << (4*drive->dn));
  209. dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
  210. break;
  211. case XFER_UDMA_5:
  212. case XFER_UDMA_4:
  213. case XFER_UDMA_3:
  214. case XFER_UDMA_2:
  215. case XFER_UDMA_1:
  216. case XFER_UDMA_0:
  217. /*
  218. * TODO: always setup PIO mode so this won't be needed
  219. */
  220. pio_timing |= pio_modes[pio];
  221. csb5_pio |= (pio << (4*drive->dn));
  222. dma_timing |= dma_modes[2];
  223. ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit));
  224. ultra_enable |= (0x01 << drive->dn);
  225. default:
  226. break;
  227. }
  228. pci_write_config_byte(dev, drive_pci[drive->dn], pio_timing);
  229. if (csb5)
  230. pci_write_config_word(dev, 0x4A, csb5_pio);
  231. pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
  232. pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
  233. pci_write_config_byte(dev, 0x54, ultra_enable);
  234. return (ide_config_drive_speed(drive, speed));
  235. }
  236. static void svwks_tune_drive (ide_drive_t *drive, u8 pio)
  237. {
  238. pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
  239. (void)svwks_tune_chipset(drive, XFER_PIO_0 + pio);
  240. }
  241. static int svwks_config_drive_xfer_rate (ide_drive_t *drive)
  242. {
  243. drive->init_speed = 0;
  244. if (ide_tune_dma(drive))
  245. return 0;
  246. if (ide_use_fast_pio(drive))
  247. svwks_tune_drive(drive, 255);
  248. return -1;
  249. }
  250. static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const char *name)
  251. {
  252. unsigned int reg;
  253. u8 btr;
  254. /* save revision id to determine DMA capability */
  255. pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
  256. /* force Master Latency Timer value to 64 PCICLKs */
  257. pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
  258. /* OSB4 : South Bridge and IDE */
  259. if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
  260. isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
  261. PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
  262. if (isa_dev) {
  263. pci_read_config_dword(isa_dev, 0x64, &reg);
  264. reg &= ~0x00002000; /* disable 600ns interrupt mask */
  265. if(!(reg & 0x00004000))
  266. printk(KERN_DEBUG "%s: UDMA not BIOS enabled.\n", name);
  267. reg |= 0x00004000; /* enable UDMA/33 support */
  268. pci_write_config_dword(isa_dev, 0x64, reg);
  269. }
  270. }
  271. /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
  272. else if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
  273. (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
  274. (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
  275. /* Third Channel Test */
  276. if (!(PCI_FUNC(dev->devfn) & 1)) {
  277. struct pci_dev * findev = NULL;
  278. u32 reg4c = 0;
  279. findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
  280. PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
  281. if (findev) {
  282. pci_read_config_dword(findev, 0x4C, &reg4c);
  283. reg4c &= ~0x000007FF;
  284. reg4c |= 0x00000040;
  285. reg4c |= 0x00000020;
  286. pci_write_config_dword(findev, 0x4C, reg4c);
  287. pci_dev_put(findev);
  288. }
  289. outb_p(0x06, 0x0c00);
  290. dev->irq = inb_p(0x0c01);
  291. } else {
  292. struct pci_dev * findev = NULL;
  293. u8 reg41 = 0;
  294. findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
  295. PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
  296. if (findev) {
  297. pci_read_config_byte(findev, 0x41, &reg41);
  298. reg41 &= ~0x40;
  299. pci_write_config_byte(findev, 0x41, reg41);
  300. pci_dev_put(findev);
  301. }
  302. /*
  303. * This is a device pin issue on CSB6.
  304. * Since there will be a future raid mode,
  305. * early versions of the chipset require the
  306. * interrupt pin to be set, and it is a compatibility
  307. * mode issue.
  308. */
  309. if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
  310. dev->irq = 0;
  311. }
  312. // pci_read_config_dword(dev, 0x40, &pioreg)
  313. // pci_write_config_dword(dev, 0x40, 0x99999999);
  314. // pci_read_config_dword(dev, 0x44, &dmareg);
  315. // pci_write_config_dword(dev, 0x44, 0xFFFFFFFF);
  316. /* setup the UDMA Control register
  317. *
  318. * 1. clear bit 6 to enable DMA
  319. * 2. enable DMA modes with bits 0-1
  320. * 00 : legacy
  321. * 01 : udma2
  322. * 10 : udma2/udma4
  323. * 11 : udma2/udma4/udma5
  324. */
  325. pci_read_config_byte(dev, 0x5A, &btr);
  326. btr &= ~0x40;
  327. if (!(PCI_FUNC(dev->devfn) & 1))
  328. btr |= 0x2;
  329. else
  330. btr |= (svwks_revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
  331. pci_write_config_byte(dev, 0x5A, btr);
  332. }
  333. /* Setup HT1000 SouthBridge Controller - Single Channel Only */
  334. else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
  335. pci_read_config_byte(dev, 0x5A, &btr);
  336. btr &= ~0x40;
  337. btr |= 0x3;
  338. pci_write_config_byte(dev, 0x5A, btr);
  339. }
  340. return dev->irq;
  341. }
  342. static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif)
  343. {
  344. return 1;
  345. }
  346. /* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits
  347. * of the subsystem device ID indicate presence of an 80-pin cable.
  348. * Bit 15 clear = secondary IDE channel does not have 80-pin cable.
  349. * Bit 15 set = secondary IDE channel has 80-pin cable.
  350. * Bit 14 clear = primary IDE channel does not have 80-pin cable.
  351. * Bit 14 set = primary IDE channel has 80-pin cable.
  352. */
  353. static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif)
  354. {
  355. struct pci_dev *dev = hwif->pci_dev;
  356. if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
  357. dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
  358. (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE ||
  359. dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE))
  360. return ((1 << (hwif->channel + 14)) &
  361. dev->subsystem_device) ? 1 : 0;
  362. return 0;
  363. }
  364. /* Sun Cobalt Alpine hardware avoids the 80-pin cable
  365. * detect issue by attaching the drives directly to the board.
  366. * This check follows the Dell precedent (how scary is that?!)
  367. *
  368. * WARNING: this only works on Alpine hardware!
  369. */
  370. static unsigned int __devinit ata66_svwks_cobalt (ide_hwif_t *hwif)
  371. {
  372. struct pci_dev *dev = hwif->pci_dev;
  373. if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
  374. dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
  375. dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
  376. return ((1 << (hwif->channel + 14)) &
  377. dev->subsystem_device) ? 1 : 0;
  378. return 0;
  379. }
  380. static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif)
  381. {
  382. struct pci_dev *dev = hwif->pci_dev;
  383. /* Server Works */
  384. if (dev->subsystem_vendor == PCI_VENDOR_ID_SERVERWORKS)
  385. return ata66_svwks_svwks (hwif);
  386. /* Dell PowerEdge */
  387. if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL)
  388. return ata66_svwks_dell (hwif);
  389. /* Cobalt Alpine */
  390. if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN)
  391. return ata66_svwks_cobalt (hwif);
  392. /* Per Specified Design by OEM, and ASIC Architect */
  393. if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
  394. (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
  395. return 1;
  396. return 0;
  397. }
  398. static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
  399. {
  400. u8 dma_stat = 0;
  401. if (!hwif->irq)
  402. hwif->irq = hwif->channel ? 15 : 14;
  403. hwif->tuneproc = &svwks_tune_drive;
  404. hwif->speedproc = &svwks_tune_chipset;
  405. hwif->udma_filter = &svwks_udma_filter;
  406. hwif->atapi_dma = 1;
  407. if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE)
  408. hwif->ultra_mask = 0x3f;
  409. hwif->mwdma_mask = 0x07;
  410. hwif->autodma = 0;
  411. if (!hwif->dma_base) {
  412. hwif->drives[0].autotune = 1;
  413. hwif->drives[1].autotune = 1;
  414. return;
  415. }
  416. hwif->ide_dma_check = &svwks_config_drive_xfer_rate;
  417. if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
  418. if (!hwif->udma_four)
  419. hwif->udma_four = ata66_svwks(hwif);
  420. }
  421. if (!noautodma)
  422. hwif->autodma = 1;
  423. dma_stat = inb(hwif->dma_status);
  424. hwif->drives[0].autodma = (dma_stat & 0x20);
  425. hwif->drives[1].autodma = (dma_stat & 0x40);
  426. hwif->drives[0].autotune = (!(dma_stat & 0x20));
  427. hwif->drives[1].autotune = (!(dma_stat & 0x40));
  428. }
  429. static int __devinit init_setup_svwks (struct pci_dev *dev, ide_pci_device_t *d)
  430. {
  431. return ide_setup_pci_device(dev, d);
  432. }
  433. static int __devinit init_setup_csb6 (struct pci_dev *dev, ide_pci_device_t *d)
  434. {
  435. if (!(PCI_FUNC(dev->devfn) & 1)) {
  436. d->bootable = NEVER_BOARD;
  437. if (dev->resource[0].start == 0x01f1)
  438. d->bootable = ON_BOARD;
  439. }
  440. d->channels = ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE ||
  441. dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) &&
  442. (!(PCI_FUNC(dev->devfn) & 1))) ? 1 : 2;
  443. return ide_setup_pci_device(dev, d);
  444. }
  445. static ide_pci_device_t serverworks_chipsets[] __devinitdata = {
  446. { /* 0 */
  447. .name = "SvrWks OSB4",
  448. .init_setup = init_setup_svwks,
  449. .init_chipset = init_chipset_svwks,
  450. .init_hwif = init_hwif_svwks,
  451. .channels = 2,
  452. .autodma = AUTODMA,
  453. .bootable = ON_BOARD,
  454. },{ /* 1 */
  455. .name = "SvrWks CSB5",
  456. .init_setup = init_setup_svwks,
  457. .init_chipset = init_chipset_svwks,
  458. .init_hwif = init_hwif_svwks,
  459. .channels = 2,
  460. .autodma = AUTODMA,
  461. .bootable = ON_BOARD,
  462. },{ /* 2 */
  463. .name = "SvrWks CSB6",
  464. .init_setup = init_setup_csb6,
  465. .init_chipset = init_chipset_svwks,
  466. .init_hwif = init_hwif_svwks,
  467. .channels = 2,
  468. .autodma = AUTODMA,
  469. .bootable = ON_BOARD,
  470. },{ /* 3 */
  471. .name = "SvrWks CSB6",
  472. .init_setup = init_setup_csb6,
  473. .init_chipset = init_chipset_svwks,
  474. .init_hwif = init_hwif_svwks,
  475. .channels = 1, /* 2 */
  476. .autodma = AUTODMA,
  477. .bootable = ON_BOARD,
  478. },{ /* 4 */
  479. .name = "SvrWks HT1000",
  480. .init_setup = init_setup_svwks,
  481. .init_chipset = init_chipset_svwks,
  482. .init_hwif = init_hwif_svwks,
  483. .channels = 1, /* 2 */
  484. .autodma = AUTODMA,
  485. .bootable = ON_BOARD,
  486. }
  487. };
  488. /**
  489. * svwks_init_one - called when a OSB/CSB is found
  490. * @dev: the svwks device
  491. * @id: the matching pci id
  492. *
  493. * Called when the PCI registration layer (or the IDE initialization)
  494. * finds a device matching our IDE device tables.
  495. */
  496. static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
  497. {
  498. ide_pci_device_t *d = &serverworks_chipsets[id->driver_data];
  499. return d->init_setup(dev, d);
  500. }
  501. static struct pci_device_id svwks_pci_tbl[] = {
  502. { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  503. { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
  504. { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
  505. { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
  506. { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
  507. { 0, },
  508. };
  509. MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
  510. static struct pci_driver driver = {
  511. .name = "Serverworks_IDE",
  512. .id_table = svwks_pci_tbl,
  513. .probe = svwks_init_one,
  514. };
  515. static int __init svwks_ide_init(void)
  516. {
  517. return ide_pci_register_driver(&driver);
  518. }
  519. module_init(svwks_ide_init);
  520. MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick");
  521. MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
  522. MODULE_LICENSE("GPL");