via-core.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
  3. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
  4. * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
  5. */
  6. /*
  7. * Core code for the Via multifunction framebuffer device.
  8. */
  9. #include <linux/via-core.h>
  10. #include <linux/via_i2c.h>
  11. #include <linux/via-gpio.h>
  12. #include "global.h"
  13. #include <linux/module.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. /*
  17. * The default port config.
  18. */
  19. static struct via_port_cfg adap_configs[] = {
  20. [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_OFF, VIASR, 0x26 },
  21. [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
  22. [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  23. [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
  24. [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  25. { 0, 0, 0, 0 }
  26. };
  27. /*
  28. * We currently only support one viafb device (will there ever be
  29. * more than one?), so just declare it globally here.
  30. */
  31. static struct viafb_dev global_dev;
  32. /*
  33. * Basic register access; spinlock required.
  34. */
  35. static inline void viafb_mmio_write(int reg, u32 v)
  36. {
  37. iowrite32(v, global_dev.engine_mmio + reg);
  38. }
  39. static inline int viafb_mmio_read(int reg)
  40. {
  41. return ioread32(global_dev.engine_mmio + reg);
  42. }
  43. /* ---------------------------------------------------------------------- */
  44. /*
  45. * Interrupt management. We have a single IRQ line for a lot of
  46. * different functions, so we need to share it. The design here
  47. * is that we don't want to reimplement the shared IRQ code here;
  48. * we also want to avoid having contention for a single handler thread.
  49. * So each subdev driver which needs interrupts just requests
  50. * them directly from the kernel. We just have what's needed for
  51. * overall access to the interrupt control register.
  52. */
  53. /*
  54. * Which interrupts are enabled now?
  55. */
  56. static u32 viafb_enabled_ints;
  57. static void __devinit viafb_int_init(void)
  58. {
  59. viafb_enabled_ints = 0;
  60. viafb_mmio_write(VDE_INTERRUPT, 0);
  61. }
  62. /*
  63. * Allow subdevs to ask for specific interrupts to be enabled. These
  64. * functions must be called with reg_lock held
  65. */
  66. void viafb_irq_enable(u32 mask)
  67. {
  68. viafb_enabled_ints |= mask;
  69. viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
  70. }
  71. EXPORT_SYMBOL_GPL(viafb_irq_enable);
  72. void viafb_irq_disable(u32 mask)
  73. {
  74. viafb_enabled_ints &= ~mask;
  75. if (viafb_enabled_ints == 0)
  76. viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
  77. else
  78. viafb_mmio_write(VDE_INTERRUPT,
  79. viafb_enabled_ints | VDE_I_ENABLE);
  80. }
  81. EXPORT_SYMBOL_GPL(viafb_irq_disable);
  82. /* ---------------------------------------------------------------------- */
  83. /*
  84. * Access to the DMA engine. This currently provides what the camera
  85. * driver needs (i.e. outgoing only) but is easily expandable if need
  86. * be.
  87. */
  88. /*
  89. * There are four DMA channels in the vx855. For now, we only
  90. * use one of them, though. Most of the time, the DMA channel
  91. * will be idle, so we keep the IRQ handler unregistered except
  92. * when some subsystem has indicated an interest.
  93. */
  94. static int viafb_dma_users;
  95. static DECLARE_COMPLETION(viafb_dma_completion);
  96. /*
  97. * This mutex protects viafb_dma_users and our global interrupt
  98. * registration state; it also serializes access to the DMA
  99. * engine.
  100. */
  101. static DEFINE_MUTEX(viafb_dma_lock);
  102. /*
  103. * The VX855 DMA descriptor (used for s/g transfers) looks
  104. * like this.
  105. */
  106. struct viafb_vx855_dma_descr {
  107. u32 addr_low; /* Low part of phys addr */
  108. u32 addr_high; /* High 12 bits of addr */
  109. u32 fb_offset; /* Offset into FB memory */
  110. u32 seg_size; /* Size, 16-byte units */
  111. u32 tile_mode; /* "tile mode" setting */
  112. u32 next_desc_low; /* Next descriptor addr */
  113. u32 next_desc_high;
  114. u32 pad; /* Fill out to 64 bytes */
  115. };
  116. /*
  117. * Flags added to the "next descriptor low" pointers
  118. */
  119. #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
  120. #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
  121. /*
  122. * The completion IRQ handler.
  123. */
  124. static irqreturn_t viafb_dma_irq(int irq, void *data)
  125. {
  126. int csr;
  127. irqreturn_t ret = IRQ_NONE;
  128. spin_lock(&global_dev.reg_lock);
  129. csr = viafb_mmio_read(VDMA_CSR0);
  130. if (csr & VDMA_C_DONE) {
  131. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  132. complete(&viafb_dma_completion);
  133. ret = IRQ_HANDLED;
  134. }
  135. spin_unlock(&global_dev.reg_lock);
  136. return ret;
  137. }
  138. /*
  139. * Indicate a need for DMA functionality.
  140. */
  141. int viafb_request_dma(void)
  142. {
  143. int ret = 0;
  144. /*
  145. * Only VX855 is supported currently.
  146. */
  147. if (global_dev.chip_type != UNICHROME_VX855)
  148. return -ENODEV;
  149. /*
  150. * Note the new user and set up our interrupt handler
  151. * if need be.
  152. */
  153. mutex_lock(&viafb_dma_lock);
  154. viafb_dma_users++;
  155. if (viafb_dma_users == 1) {
  156. ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
  157. IRQF_SHARED, "via-dma", &viafb_dma_users);
  158. if (ret)
  159. viafb_dma_users--;
  160. else
  161. viafb_irq_enable(VDE_I_DMA0TDEN);
  162. }
  163. mutex_unlock(&viafb_dma_lock);
  164. return ret;
  165. }
  166. EXPORT_SYMBOL_GPL(viafb_request_dma);
  167. void viafb_release_dma(void)
  168. {
  169. mutex_lock(&viafb_dma_lock);
  170. viafb_dma_users--;
  171. if (viafb_dma_users == 0) {
  172. viafb_irq_disable(VDE_I_DMA0TDEN);
  173. free_irq(global_dev.pdev->irq, &viafb_dma_users);
  174. }
  175. mutex_unlock(&viafb_dma_lock);
  176. }
  177. EXPORT_SYMBOL_GPL(viafb_release_dma);
  178. #if 0
  179. /*
  180. * Copy a single buffer from FB memory, synchronously. This code works
  181. * but is not currently used.
  182. */
  183. void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
  184. {
  185. unsigned long flags;
  186. int csr;
  187. mutex_lock(&viafb_dma_lock);
  188. init_completion(&viafb_dma_completion);
  189. /*
  190. * Program the controller.
  191. */
  192. spin_lock_irqsave(&global_dev.reg_lock, flags);
  193. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  194. /* Enable ints; must happen after CSR0 write! */
  195. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
  196. viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
  197. viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
  198. /* Data sheet suggests DAR0 should be <<4, but it lies */
  199. viafb_mmio_write(VDMA_DAR0, offset);
  200. viafb_mmio_write(VDMA_DQWCR0, len >> 4);
  201. viafb_mmio_write(VDMA_TMR0, 0);
  202. viafb_mmio_write(VDMA_DPRL0, 0);
  203. viafb_mmio_write(VDMA_DPRH0, 0);
  204. viafb_mmio_write(VDMA_PMR0, 0);
  205. csr = viafb_mmio_read(VDMA_CSR0);
  206. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  207. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  208. /*
  209. * Now we just wait until the interrupt handler says
  210. * we're done.
  211. */
  212. wait_for_completion_interruptible(&viafb_dma_completion);
  213. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  214. mutex_unlock(&viafb_dma_lock);
  215. }
  216. EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
  217. #endif
  218. /*
  219. * Do a scatter/gather DMA copy from FB memory. You must have done
  220. * a successful call to viafb_request_dma() first.
  221. */
  222. int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
  223. {
  224. struct viafb_vx855_dma_descr *descr;
  225. void *descrpages;
  226. dma_addr_t descr_handle;
  227. unsigned long flags;
  228. int i;
  229. struct scatterlist *sgentry;
  230. dma_addr_t nextdesc;
  231. /*
  232. * Get a place to put the descriptors.
  233. */
  234. descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
  235. nsg*sizeof(struct viafb_vx855_dma_descr),
  236. &descr_handle, GFP_KERNEL);
  237. if (descrpages == NULL) {
  238. dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
  239. return -ENOMEM;
  240. }
  241. mutex_lock(&viafb_dma_lock);
  242. /*
  243. * Fill them in.
  244. */
  245. descr = descrpages;
  246. nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
  247. for_each_sg(sg, sgentry, nsg, i) {
  248. dma_addr_t paddr = sg_dma_address(sgentry);
  249. descr->addr_low = paddr & 0xfffffff0;
  250. descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
  251. descr->fb_offset = offset;
  252. descr->seg_size = sg_dma_len(sgentry) >> 4;
  253. descr->tile_mode = 0;
  254. descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
  255. descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
  256. descr->pad = 0xffffffff; /* VIA driver does this */
  257. offset += sg_dma_len(sgentry);
  258. nextdesc += sizeof(struct viafb_vx855_dma_descr);
  259. descr++;
  260. }
  261. descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
  262. /*
  263. * Program the engine.
  264. */
  265. spin_lock_irqsave(&global_dev.reg_lock, flags);
  266. init_completion(&viafb_dma_completion);
  267. viafb_mmio_write(VDMA_DQWCR0, 0);
  268. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  269. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
  270. viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
  271. viafb_mmio_write(VDMA_DPRH0,
  272. (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
  273. (void) viafb_mmio_read(VDMA_CSR0);
  274. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  275. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  276. /*
  277. * Now we just wait until the interrupt handler says
  278. * we're done. Except that, actually, we need to wait a little
  279. * longer: the interrupts seem to jump the gun a little and we
  280. * get corrupted frames sometimes.
  281. */
  282. wait_for_completion_timeout(&viafb_dma_completion, 1);
  283. msleep(1);
  284. if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
  285. printk(KERN_ERR "VIA DMA timeout!\n");
  286. /*
  287. * Clean up and we're done.
  288. */
  289. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  290. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  291. mutex_unlock(&viafb_dma_lock);
  292. dma_free_coherent(&global_dev.pdev->dev,
  293. nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
  294. descr_handle);
  295. return 0;
  296. }
  297. EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
  298. /* ---------------------------------------------------------------------- */
  299. /*
  300. * Figure out how big our framebuffer memory is. Kind of ugly,
  301. * but evidently we can't trust the information found in the
  302. * fbdev configuration area.
  303. */
  304. static u16 via_function3[] = {
  305. CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
  306. CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
  307. P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
  308. };
  309. /* Get the BIOS-configured framebuffer size from PCI configuration space
  310. * of function 3 in the respective chipset */
  311. static int viafb_get_fb_size_from_pci(int chip_type)
  312. {
  313. int i;
  314. u8 offset = 0;
  315. u32 FBSize;
  316. u32 VideoMemSize;
  317. /* search for the "FUNCTION3" device in this chipset */
  318. for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
  319. struct pci_dev *pdev;
  320. pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
  321. NULL);
  322. if (!pdev)
  323. continue;
  324. DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
  325. switch (pdev->device) {
  326. case CLE266_FUNCTION3:
  327. case KM400_FUNCTION3:
  328. offset = 0xE0;
  329. break;
  330. case CN400_FUNCTION3:
  331. case CN700_FUNCTION3:
  332. case CX700_FUNCTION3:
  333. case KM800_FUNCTION3:
  334. case KM890_FUNCTION3:
  335. case P4M890_FUNCTION3:
  336. case P4M900_FUNCTION3:
  337. case VX800_FUNCTION3:
  338. case VX855_FUNCTION3:
  339. /*case CN750_FUNCTION3: */
  340. offset = 0xA0;
  341. break;
  342. }
  343. if (!offset)
  344. break;
  345. pci_read_config_dword(pdev, offset, &FBSize);
  346. pci_dev_put(pdev);
  347. }
  348. if (!offset) {
  349. printk(KERN_ERR "cannot determine framebuffer size\n");
  350. return -EIO;
  351. }
  352. FBSize = FBSize & 0x00007000;
  353. DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
  354. if (chip_type < UNICHROME_CX700) {
  355. switch (FBSize) {
  356. case 0x00004000:
  357. VideoMemSize = (16 << 20); /*16M */
  358. break;
  359. case 0x00005000:
  360. VideoMemSize = (32 << 20); /*32M */
  361. break;
  362. case 0x00006000:
  363. VideoMemSize = (64 << 20); /*64M */
  364. break;
  365. default:
  366. VideoMemSize = (32 << 20); /*32M */
  367. break;
  368. }
  369. } else {
  370. switch (FBSize) {
  371. case 0x00001000:
  372. VideoMemSize = (8 << 20); /*8M */
  373. break;
  374. case 0x00002000:
  375. VideoMemSize = (16 << 20); /*16M */
  376. break;
  377. case 0x00003000:
  378. VideoMemSize = (32 << 20); /*32M */
  379. break;
  380. case 0x00004000:
  381. VideoMemSize = (64 << 20); /*64M */
  382. break;
  383. case 0x00005000:
  384. VideoMemSize = (128 << 20); /*128M */
  385. break;
  386. case 0x00006000:
  387. VideoMemSize = (256 << 20); /*256M */
  388. break;
  389. case 0x00007000: /* Only on VX855/875 */
  390. VideoMemSize = (512 << 20); /*512M */
  391. break;
  392. default:
  393. VideoMemSize = (32 << 20); /*32M */
  394. break;
  395. }
  396. }
  397. return VideoMemSize;
  398. }
  399. /*
  400. * Figure out and map our MMIO regions.
  401. */
  402. static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
  403. {
  404. int ret;
  405. /*
  406. * Hook up to the device registers. Note that we soldier
  407. * on if it fails; the framebuffer can operate (without
  408. * acceleration) without this region.
  409. */
  410. vdev->engine_start = pci_resource_start(vdev->pdev, 1);
  411. vdev->engine_len = pci_resource_len(vdev->pdev, 1);
  412. vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
  413. vdev->engine_len);
  414. if (vdev->engine_mmio == NULL)
  415. dev_err(&vdev->pdev->dev,
  416. "Unable to map engine MMIO; operation will be "
  417. "slow and crippled.\n");
  418. /*
  419. * Map in framebuffer memory. For now, failure here is
  420. * fatal. Unfortunately, in the absence of significant
  421. * vmalloc space, failure here is also entirely plausible.
  422. * Eventually we want to move away from mapping this
  423. * entire region.
  424. */
  425. vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
  426. ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
  427. if (ret < 0)
  428. goto out_unmap;
  429. vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
  430. if (vdev->fbmem == NULL) {
  431. ret = -ENOMEM;
  432. goto out_unmap;
  433. }
  434. return 0;
  435. out_unmap:
  436. iounmap(vdev->engine_mmio);
  437. return ret;
  438. }
  439. static void via_pci_teardown_mmio(struct viafb_dev *vdev)
  440. {
  441. iounmap(vdev->fbmem);
  442. iounmap(vdev->engine_mmio);
  443. }
  444. /*
  445. * Create our subsidiary devices.
  446. */
  447. static struct viafb_subdev_info {
  448. char *name;
  449. struct platform_device *platdev;
  450. } viafb_subdevs[] = {
  451. {
  452. .name = "viafb-gpio",
  453. },
  454. {
  455. .name = "viafb-i2c",
  456. }
  457. };
  458. #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
  459. static int __devinit via_create_subdev(struct viafb_dev *vdev,
  460. struct viafb_subdev_info *info)
  461. {
  462. int ret;
  463. info->platdev = platform_device_alloc(info->name, -1);
  464. if (!info->platdev) {
  465. dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
  466. info->name);
  467. return -ENOMEM;
  468. }
  469. info->platdev->dev.parent = &vdev->pdev->dev;
  470. info->platdev->dev.platform_data = vdev;
  471. ret = platform_device_add(info->platdev);
  472. if (ret) {
  473. dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
  474. info->name);
  475. platform_device_put(info->platdev);
  476. info->platdev = NULL;
  477. }
  478. return ret;
  479. }
  480. static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
  481. {
  482. int i;
  483. /*
  484. * Ignore return values. Even if some of the devices
  485. * fail to be created, we'll still be able to use some
  486. * of the rest.
  487. */
  488. for (i = 0; i < N_SUBDEVS; i++)
  489. via_create_subdev(vdev, viafb_subdevs + i);
  490. return 0;
  491. }
  492. static void via_teardown_subdevs(void)
  493. {
  494. int i;
  495. for (i = 0; i < N_SUBDEVS; i++)
  496. if (viafb_subdevs[i].platdev) {
  497. viafb_subdevs[i].platdev->dev.platform_data = NULL;
  498. platform_device_unregister(viafb_subdevs[i].platdev);
  499. }
  500. }
  501. static int __devinit via_pci_probe(struct pci_dev *pdev,
  502. const struct pci_device_id *ent)
  503. {
  504. int ret;
  505. ret = pci_enable_device(pdev);
  506. if (ret)
  507. return ret;
  508. /*
  509. * Global device initialization.
  510. */
  511. memset(&global_dev, 0, sizeof(global_dev));
  512. global_dev.pdev = pdev;
  513. global_dev.chip_type = ent->driver_data;
  514. global_dev.port_cfg = adap_configs;
  515. spin_lock_init(&global_dev.reg_lock);
  516. ret = via_pci_setup_mmio(&global_dev);
  517. if (ret)
  518. goto out_disable;
  519. /*
  520. * Set up interrupts and create our subdevices. Continue even if
  521. * some things fail.
  522. */
  523. viafb_int_init();
  524. via_setup_subdevs(&global_dev);
  525. /*
  526. * Set up the framebuffer device
  527. */
  528. ret = via_fb_pci_probe(&global_dev);
  529. if (ret)
  530. goto out_subdevs;
  531. return 0;
  532. out_subdevs:
  533. via_teardown_subdevs();
  534. via_pci_teardown_mmio(&global_dev);
  535. out_disable:
  536. pci_disable_device(pdev);
  537. return ret;
  538. }
  539. static void __devexit via_pci_remove(struct pci_dev *pdev)
  540. {
  541. via_teardown_subdevs();
  542. via_fb_pci_remove(pdev);
  543. via_pci_teardown_mmio(&global_dev);
  544. pci_disable_device(pdev);
  545. }
  546. static struct pci_device_id via_pci_table[] __devinitdata = {
  547. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
  548. .driver_data = UNICHROME_CLE266 },
  549. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
  550. .driver_data = UNICHROME_K400 },
  551. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
  552. .driver_data = UNICHROME_K800 },
  553. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
  554. .driver_data = UNICHROME_PM800 },
  555. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
  556. .driver_data = UNICHROME_CN700 },
  557. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
  558. .driver_data = UNICHROME_CX700 },
  559. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
  560. .driver_data = UNICHROME_CN750 },
  561. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
  562. .driver_data = UNICHROME_K8M890 },
  563. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
  564. .driver_data = UNICHROME_P4M890 },
  565. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
  566. .driver_data = UNICHROME_P4M900 },
  567. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
  568. .driver_data = UNICHROME_VX800 },
  569. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
  570. .driver_data = UNICHROME_VX855 },
  571. { }
  572. };
  573. MODULE_DEVICE_TABLE(pci, via_pci_table);
  574. static struct pci_driver via_driver = {
  575. .name = "viafb",
  576. .id_table = via_pci_table,
  577. .probe = via_pci_probe,
  578. .remove = __devexit_p(via_pci_remove),
  579. };
  580. static int __init via_core_init(void)
  581. {
  582. int ret;
  583. ret = viafb_init();
  584. if (ret)
  585. return ret;
  586. viafb_i2c_init();
  587. viafb_gpio_init();
  588. return pci_register_driver(&via_driver);
  589. }
  590. static void __exit via_core_exit(void)
  591. {
  592. pci_unregister_driver(&via_driver);
  593. viafb_gpio_exit();
  594. viafb_i2c_exit();
  595. viafb_exit();
  596. }
  597. module_init(via_core_init);
  598. module_exit(via_core_exit);