via-core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*
  2. * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
  3. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
  4. * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
  5. */
  6. /*
  7. * Core code for the Via multifunction framebuffer device.
  8. */
  9. #include <linux/via-core.h>
  10. #include <linux/via_i2c.h>
  11. #include <linux/via-gpio.h>
  12. #include "global.h"
  13. #include <linux/module.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. /*
  17. * The default port config.
  18. */
  19. static struct via_port_cfg adap_configs[] = {
  20. [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_OFF, VIASR, 0x26 },
  21. [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
  22. [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  23. [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
  24. [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  25. { 0, 0, 0, 0 }
  26. };
  27. /*
  28. * We currently only support one viafb device (will there ever be
  29. * more than one?), so just declare it globally here.
  30. */
  31. static struct viafb_dev global_dev;
  32. /*
  33. * Basic register access; spinlock required.
  34. */
  35. static inline void viafb_mmio_write(int reg, u32 v)
  36. {
  37. iowrite32(v, global_dev.engine_mmio + reg);
  38. }
  39. static inline int viafb_mmio_read(int reg)
  40. {
  41. return ioread32(global_dev.engine_mmio + reg);
  42. }
  43. /* ---------------------------------------------------------------------- */
  44. /*
  45. * Interrupt management. We have a single IRQ line for a lot of
  46. * different functions, so we need to share it. The design here
  47. * is that we don't want to reimplement the shared IRQ code here;
  48. * we also want to avoid having contention for a single handler thread.
  49. * So each subdev driver which needs interrupts just requests
  50. * them directly from the kernel. We just have what's needed for
  51. * overall access to the interrupt control register.
  52. */
  53. /*
  54. * Which interrupts are enabled now?
  55. */
  56. static u32 viafb_enabled_ints;
  57. static void __devinit viafb_int_init(void)
  58. {
  59. viafb_enabled_ints = 0;
  60. viafb_mmio_write(VDE_INTERRUPT, 0);
  61. }
  62. /*
  63. * Allow subdevs to ask for specific interrupts to be enabled. These
  64. * functions must be called with reg_lock held
  65. */
  66. void viafb_irq_enable(u32 mask)
  67. {
  68. viafb_enabled_ints |= mask;
  69. viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
  70. }
  71. EXPORT_SYMBOL_GPL(viafb_irq_enable);
  72. void viafb_irq_disable(u32 mask)
  73. {
  74. viafb_enabled_ints &= ~mask;
  75. if (viafb_enabled_ints == 0)
  76. viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
  77. else
  78. viafb_mmio_write(VDE_INTERRUPT,
  79. viafb_enabled_ints | VDE_I_ENABLE);
  80. }
  81. EXPORT_SYMBOL_GPL(viafb_irq_disable);
  82. /* ---------------------------------------------------------------------- */
  83. /*
  84. * Currently, the camera driver is the only user of the DMA code, so we
  85. * only compile it in if the camera driver is being built. Chances are,
  86. * most viafb systems will not need to have this extra code for a while.
  87. * As soon as another user comes long, the ifdef can be removed.
  88. */
  89. #if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
  90. /*
  91. * Access to the DMA engine. This currently provides what the camera
  92. * driver needs (i.e. outgoing only) but is easily expandable if need
  93. * be.
  94. */
  95. /*
  96. * There are four DMA channels in the vx855. For now, we only
  97. * use one of them, though. Most of the time, the DMA channel
  98. * will be idle, so we keep the IRQ handler unregistered except
  99. * when some subsystem has indicated an interest.
  100. */
  101. static int viafb_dma_users;
  102. static DECLARE_COMPLETION(viafb_dma_completion);
  103. /*
  104. * This mutex protects viafb_dma_users and our global interrupt
  105. * registration state; it also serializes access to the DMA
  106. * engine.
  107. */
  108. static DEFINE_MUTEX(viafb_dma_lock);
  109. /*
  110. * The VX855 DMA descriptor (used for s/g transfers) looks
  111. * like this.
  112. */
  113. struct viafb_vx855_dma_descr {
  114. u32 addr_low; /* Low part of phys addr */
  115. u32 addr_high; /* High 12 bits of addr */
  116. u32 fb_offset; /* Offset into FB memory */
  117. u32 seg_size; /* Size, 16-byte units */
  118. u32 tile_mode; /* "tile mode" setting */
  119. u32 next_desc_low; /* Next descriptor addr */
  120. u32 next_desc_high;
  121. u32 pad; /* Fill out to 64 bytes */
  122. };
  123. /*
  124. * Flags added to the "next descriptor low" pointers
  125. */
  126. #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
  127. #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
  128. /*
  129. * The completion IRQ handler.
  130. */
  131. static irqreturn_t viafb_dma_irq(int irq, void *data)
  132. {
  133. int csr;
  134. irqreturn_t ret = IRQ_NONE;
  135. spin_lock(&global_dev.reg_lock);
  136. csr = viafb_mmio_read(VDMA_CSR0);
  137. if (csr & VDMA_C_DONE) {
  138. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  139. complete(&viafb_dma_completion);
  140. ret = IRQ_HANDLED;
  141. }
  142. spin_unlock(&global_dev.reg_lock);
  143. return ret;
  144. }
  145. /*
  146. * Indicate a need for DMA functionality.
  147. */
  148. int viafb_request_dma(void)
  149. {
  150. int ret = 0;
  151. /*
  152. * Only VX855 is supported currently.
  153. */
  154. if (global_dev.chip_type != UNICHROME_VX855)
  155. return -ENODEV;
  156. /*
  157. * Note the new user and set up our interrupt handler
  158. * if need be.
  159. */
  160. mutex_lock(&viafb_dma_lock);
  161. viafb_dma_users++;
  162. if (viafb_dma_users == 1) {
  163. ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
  164. IRQF_SHARED, "via-dma", &viafb_dma_users);
  165. if (ret)
  166. viafb_dma_users--;
  167. else
  168. viafb_irq_enable(VDE_I_DMA0TDEN);
  169. }
  170. mutex_unlock(&viafb_dma_lock);
  171. return ret;
  172. }
  173. EXPORT_SYMBOL_GPL(viafb_request_dma);
  174. void viafb_release_dma(void)
  175. {
  176. mutex_lock(&viafb_dma_lock);
  177. viafb_dma_users--;
  178. if (viafb_dma_users == 0) {
  179. viafb_irq_disable(VDE_I_DMA0TDEN);
  180. free_irq(global_dev.pdev->irq, &viafb_dma_users);
  181. }
  182. mutex_unlock(&viafb_dma_lock);
  183. }
  184. EXPORT_SYMBOL_GPL(viafb_release_dma);
  185. #if 0
  186. /*
  187. * Copy a single buffer from FB memory, synchronously. This code works
  188. * but is not currently used.
  189. */
  190. void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
  191. {
  192. unsigned long flags;
  193. int csr;
  194. mutex_lock(&viafb_dma_lock);
  195. init_completion(&viafb_dma_completion);
  196. /*
  197. * Program the controller.
  198. */
  199. spin_lock_irqsave(&global_dev.reg_lock, flags);
  200. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  201. /* Enable ints; must happen after CSR0 write! */
  202. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
  203. viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
  204. viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
  205. /* Data sheet suggests DAR0 should be <<4, but it lies */
  206. viafb_mmio_write(VDMA_DAR0, offset);
  207. viafb_mmio_write(VDMA_DQWCR0, len >> 4);
  208. viafb_mmio_write(VDMA_TMR0, 0);
  209. viafb_mmio_write(VDMA_DPRL0, 0);
  210. viafb_mmio_write(VDMA_DPRH0, 0);
  211. viafb_mmio_write(VDMA_PMR0, 0);
  212. csr = viafb_mmio_read(VDMA_CSR0);
  213. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  214. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  215. /*
  216. * Now we just wait until the interrupt handler says
  217. * we're done.
  218. */
  219. wait_for_completion_interruptible(&viafb_dma_completion);
  220. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  221. mutex_unlock(&viafb_dma_lock);
  222. }
  223. EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
  224. #endif
  225. /*
  226. * Do a scatter/gather DMA copy from FB memory. You must have done
  227. * a successful call to viafb_request_dma() first.
  228. */
  229. int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
  230. {
  231. struct viafb_vx855_dma_descr *descr;
  232. void *descrpages;
  233. dma_addr_t descr_handle;
  234. unsigned long flags;
  235. int i;
  236. struct scatterlist *sgentry;
  237. dma_addr_t nextdesc;
  238. /*
  239. * Get a place to put the descriptors.
  240. */
  241. descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
  242. nsg*sizeof(struct viafb_vx855_dma_descr),
  243. &descr_handle, GFP_KERNEL);
  244. if (descrpages == NULL) {
  245. dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
  246. return -ENOMEM;
  247. }
  248. mutex_lock(&viafb_dma_lock);
  249. /*
  250. * Fill them in.
  251. */
  252. descr = descrpages;
  253. nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
  254. for_each_sg(sg, sgentry, nsg, i) {
  255. dma_addr_t paddr = sg_dma_address(sgentry);
  256. descr->addr_low = paddr & 0xfffffff0;
  257. descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
  258. descr->fb_offset = offset;
  259. descr->seg_size = sg_dma_len(sgentry) >> 4;
  260. descr->tile_mode = 0;
  261. descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
  262. descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
  263. descr->pad = 0xffffffff; /* VIA driver does this */
  264. offset += sg_dma_len(sgentry);
  265. nextdesc += sizeof(struct viafb_vx855_dma_descr);
  266. descr++;
  267. }
  268. descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
  269. /*
  270. * Program the engine.
  271. */
  272. spin_lock_irqsave(&global_dev.reg_lock, flags);
  273. init_completion(&viafb_dma_completion);
  274. viafb_mmio_write(VDMA_DQWCR0, 0);
  275. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  276. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
  277. viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
  278. viafb_mmio_write(VDMA_DPRH0,
  279. (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
  280. (void) viafb_mmio_read(VDMA_CSR0);
  281. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  282. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  283. /*
  284. * Now we just wait until the interrupt handler says
  285. * we're done. Except that, actually, we need to wait a little
  286. * longer: the interrupts seem to jump the gun a little and we
  287. * get corrupted frames sometimes.
  288. */
  289. wait_for_completion_timeout(&viafb_dma_completion, 1);
  290. msleep(1);
  291. if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
  292. printk(KERN_ERR "VIA DMA timeout!\n");
  293. /*
  294. * Clean up and we're done.
  295. */
  296. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  297. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  298. mutex_unlock(&viafb_dma_lock);
  299. dma_free_coherent(&global_dev.pdev->dev,
  300. nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
  301. descr_handle);
  302. return 0;
  303. }
  304. EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
  305. #endif /* CONFIG_VIDEO_VIA_CAMERA */
  306. /* ---------------------------------------------------------------------- */
  307. /*
  308. * Figure out how big our framebuffer memory is. Kind of ugly,
  309. * but evidently we can't trust the information found in the
  310. * fbdev configuration area.
  311. */
  312. static u16 via_function3[] = {
  313. CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
  314. CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
  315. P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
  316. };
  317. /* Get the BIOS-configured framebuffer size from PCI configuration space
  318. * of function 3 in the respective chipset */
  319. static int viafb_get_fb_size_from_pci(int chip_type)
  320. {
  321. int i;
  322. u8 offset = 0;
  323. u32 FBSize;
  324. u32 VideoMemSize;
  325. /* search for the "FUNCTION3" device in this chipset */
  326. for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
  327. struct pci_dev *pdev;
  328. pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
  329. NULL);
  330. if (!pdev)
  331. continue;
  332. DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
  333. switch (pdev->device) {
  334. case CLE266_FUNCTION3:
  335. case KM400_FUNCTION3:
  336. offset = 0xE0;
  337. break;
  338. case CN400_FUNCTION3:
  339. case CN700_FUNCTION3:
  340. case CX700_FUNCTION3:
  341. case KM800_FUNCTION3:
  342. case KM890_FUNCTION3:
  343. case P4M890_FUNCTION3:
  344. case P4M900_FUNCTION3:
  345. case VX800_FUNCTION3:
  346. case VX855_FUNCTION3:
  347. /*case CN750_FUNCTION3: */
  348. offset = 0xA0;
  349. break;
  350. }
  351. if (!offset)
  352. break;
  353. pci_read_config_dword(pdev, offset, &FBSize);
  354. pci_dev_put(pdev);
  355. }
  356. if (!offset) {
  357. printk(KERN_ERR "cannot determine framebuffer size\n");
  358. return -EIO;
  359. }
  360. FBSize = FBSize & 0x00007000;
  361. DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
  362. if (chip_type < UNICHROME_CX700) {
  363. switch (FBSize) {
  364. case 0x00004000:
  365. VideoMemSize = (16 << 20); /*16M */
  366. break;
  367. case 0x00005000:
  368. VideoMemSize = (32 << 20); /*32M */
  369. break;
  370. case 0x00006000:
  371. VideoMemSize = (64 << 20); /*64M */
  372. break;
  373. default:
  374. VideoMemSize = (32 << 20); /*32M */
  375. break;
  376. }
  377. } else {
  378. switch (FBSize) {
  379. case 0x00001000:
  380. VideoMemSize = (8 << 20); /*8M */
  381. break;
  382. case 0x00002000:
  383. VideoMemSize = (16 << 20); /*16M */
  384. break;
  385. case 0x00003000:
  386. VideoMemSize = (32 << 20); /*32M */
  387. break;
  388. case 0x00004000:
  389. VideoMemSize = (64 << 20); /*64M */
  390. break;
  391. case 0x00005000:
  392. VideoMemSize = (128 << 20); /*128M */
  393. break;
  394. case 0x00006000:
  395. VideoMemSize = (256 << 20); /*256M */
  396. break;
  397. case 0x00007000: /* Only on VX855/875 */
  398. VideoMemSize = (512 << 20); /*512M */
  399. break;
  400. default:
  401. VideoMemSize = (32 << 20); /*32M */
  402. break;
  403. }
  404. }
  405. return VideoMemSize;
  406. }
  407. /*
  408. * Figure out and map our MMIO regions.
  409. */
  410. static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
  411. {
  412. int ret;
  413. /*
  414. * Hook up to the device registers. Note that we soldier
  415. * on if it fails; the framebuffer can operate (without
  416. * acceleration) without this region.
  417. */
  418. vdev->engine_start = pci_resource_start(vdev->pdev, 1);
  419. vdev->engine_len = pci_resource_len(vdev->pdev, 1);
  420. vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
  421. vdev->engine_len);
  422. if (vdev->engine_mmio == NULL)
  423. dev_err(&vdev->pdev->dev,
  424. "Unable to map engine MMIO; operation will be "
  425. "slow and crippled.\n");
  426. /*
  427. * Map in framebuffer memory. For now, failure here is
  428. * fatal. Unfortunately, in the absence of significant
  429. * vmalloc space, failure here is also entirely plausible.
  430. * Eventually we want to move away from mapping this
  431. * entire region.
  432. */
  433. vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
  434. ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
  435. if (ret < 0)
  436. goto out_unmap;
  437. vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
  438. if (vdev->fbmem == NULL) {
  439. ret = -ENOMEM;
  440. goto out_unmap;
  441. }
  442. return 0;
  443. out_unmap:
  444. iounmap(vdev->engine_mmio);
  445. return ret;
  446. }
  447. static void via_pci_teardown_mmio(struct viafb_dev *vdev)
  448. {
  449. iounmap(vdev->fbmem);
  450. iounmap(vdev->engine_mmio);
  451. }
  452. /*
  453. * Create our subsidiary devices.
  454. */
  455. static struct viafb_subdev_info {
  456. char *name;
  457. struct platform_device *platdev;
  458. } viafb_subdevs[] = {
  459. {
  460. .name = "viafb-gpio",
  461. },
  462. {
  463. .name = "viafb-i2c",
  464. },
  465. #if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
  466. {
  467. .name = "viafb-camera",
  468. },
  469. #endif
  470. };
  471. #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
  472. static int __devinit via_create_subdev(struct viafb_dev *vdev,
  473. struct viafb_subdev_info *info)
  474. {
  475. int ret;
  476. info->platdev = platform_device_alloc(info->name, -1);
  477. if (!info->platdev) {
  478. dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
  479. info->name);
  480. return -ENOMEM;
  481. }
  482. info->platdev->dev.parent = &vdev->pdev->dev;
  483. info->platdev->dev.platform_data = vdev;
  484. ret = platform_device_add(info->platdev);
  485. if (ret) {
  486. dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
  487. info->name);
  488. platform_device_put(info->platdev);
  489. info->platdev = NULL;
  490. }
  491. return ret;
  492. }
  493. static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
  494. {
  495. int i;
  496. /*
  497. * Ignore return values. Even if some of the devices
  498. * fail to be created, we'll still be able to use some
  499. * of the rest.
  500. */
  501. for (i = 0; i < N_SUBDEVS; i++)
  502. via_create_subdev(vdev, viafb_subdevs + i);
  503. return 0;
  504. }
  505. static void via_teardown_subdevs(void)
  506. {
  507. int i;
  508. for (i = 0; i < N_SUBDEVS; i++)
  509. if (viafb_subdevs[i].platdev) {
  510. viafb_subdevs[i].platdev->dev.platform_data = NULL;
  511. platform_device_unregister(viafb_subdevs[i].platdev);
  512. }
  513. }
  514. static int __devinit via_pci_probe(struct pci_dev *pdev,
  515. const struct pci_device_id *ent)
  516. {
  517. int ret;
  518. ret = pci_enable_device(pdev);
  519. if (ret)
  520. return ret;
  521. /*
  522. * Global device initialization.
  523. */
  524. memset(&global_dev, 0, sizeof(global_dev));
  525. global_dev.pdev = pdev;
  526. global_dev.chip_type = ent->driver_data;
  527. global_dev.port_cfg = adap_configs;
  528. spin_lock_init(&global_dev.reg_lock);
  529. ret = via_pci_setup_mmio(&global_dev);
  530. if (ret)
  531. goto out_disable;
  532. /*
  533. * Set up interrupts and create our subdevices. Continue even if
  534. * some things fail.
  535. */
  536. viafb_int_init();
  537. via_setup_subdevs(&global_dev);
  538. /*
  539. * Set up the framebuffer device
  540. */
  541. ret = via_fb_pci_probe(&global_dev);
  542. if (ret)
  543. goto out_subdevs;
  544. return 0;
  545. out_subdevs:
  546. via_teardown_subdevs();
  547. via_pci_teardown_mmio(&global_dev);
  548. out_disable:
  549. pci_disable_device(pdev);
  550. return ret;
  551. }
  552. static void __devexit via_pci_remove(struct pci_dev *pdev)
  553. {
  554. via_teardown_subdevs();
  555. via_fb_pci_remove(pdev);
  556. via_pci_teardown_mmio(&global_dev);
  557. pci_disable_device(pdev);
  558. }
  559. static struct pci_device_id via_pci_table[] __devinitdata = {
  560. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
  561. .driver_data = UNICHROME_CLE266 },
  562. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
  563. .driver_data = UNICHROME_K400 },
  564. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
  565. .driver_data = UNICHROME_K800 },
  566. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
  567. .driver_data = UNICHROME_PM800 },
  568. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
  569. .driver_data = UNICHROME_CN700 },
  570. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
  571. .driver_data = UNICHROME_CX700 },
  572. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
  573. .driver_data = UNICHROME_CN750 },
  574. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
  575. .driver_data = UNICHROME_K8M890 },
  576. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
  577. .driver_data = UNICHROME_P4M890 },
  578. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
  579. .driver_data = UNICHROME_P4M900 },
  580. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
  581. .driver_data = UNICHROME_VX800 },
  582. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
  583. .driver_data = UNICHROME_VX855 },
  584. { }
  585. };
  586. MODULE_DEVICE_TABLE(pci, via_pci_table);
  587. static struct pci_driver via_driver = {
  588. .name = "viafb",
  589. .id_table = via_pci_table,
  590. .probe = via_pci_probe,
  591. .remove = __devexit_p(via_pci_remove),
  592. };
  593. static int __init via_core_init(void)
  594. {
  595. int ret;
  596. ret = viafb_init();
  597. if (ret)
  598. return ret;
  599. viafb_i2c_init();
  600. viafb_gpio_init();
  601. return pci_register_driver(&via_driver);
  602. }
  603. static void __exit via_core_exit(void)
  604. {
  605. pci_unregister_driver(&via_driver);
  606. viafb_gpio_exit();
  607. viafb_i2c_exit();
  608. viafb_exit();
  609. }
  610. module_init(via_core_init);
  611. module_exit(via_core_exit);