via-core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /*
  2. * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
  3. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
  4. * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
  5. */
  6. /*
  7. * Core code for the Via multifunction framebuffer device.
  8. */
  9. #include <linux/via-core.h>
  10. #include <linux/via_i2c.h>
  11. #include <linux/via-gpio.h>
  12. #include "global.h"
  13. #include <linux/module.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/list.h>
  17. #include <linux/pm.h>
  18. #include <asm/olpc.h>
  19. /*
  20. * The default port config.
  21. */
  22. static struct via_port_cfg adap_configs[] = {
  23. [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 },
  24. [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
  25. [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  26. [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
  27. [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  28. { 0, 0, 0, 0 }
  29. };
  30. /*
  31. * The OLPC XO-1.5 puts the camera power and reset lines onto
  32. * GPIO 2C.
  33. */
  34. static const struct via_port_cfg olpc_adap_configs[] = {
  35. [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 },
  36. [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
  37. [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  38. [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x2c },
  39. [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  40. { 0, 0, 0, 0 }
  41. };
  42. /*
  43. * We currently only support one viafb device (will there ever be
  44. * more than one?), so just declare it globally here.
  45. */
  46. static struct viafb_dev global_dev;
  47. /*
  48. * Basic register access; spinlock required.
  49. */
  50. static inline void viafb_mmio_write(int reg, u32 v)
  51. {
  52. iowrite32(v, global_dev.engine_mmio + reg);
  53. }
  54. static inline int viafb_mmio_read(int reg)
  55. {
  56. return ioread32(global_dev.engine_mmio + reg);
  57. }
  58. /* ---------------------------------------------------------------------- */
  59. /*
  60. * Interrupt management. We have a single IRQ line for a lot of
  61. * different functions, so we need to share it. The design here
  62. * is that we don't want to reimplement the shared IRQ code here;
  63. * we also want to avoid having contention for a single handler thread.
  64. * So each subdev driver which needs interrupts just requests
  65. * them directly from the kernel. We just have what's needed for
  66. * overall access to the interrupt control register.
  67. */
  68. /*
  69. * Which interrupts are enabled now?
  70. */
  71. static u32 viafb_enabled_ints;
  72. static void __devinit viafb_int_init(void)
  73. {
  74. viafb_enabled_ints = 0;
  75. viafb_mmio_write(VDE_INTERRUPT, 0);
  76. }
  77. /*
  78. * Allow subdevs to ask for specific interrupts to be enabled. These
  79. * functions must be called with reg_lock held
  80. */
  81. void viafb_irq_enable(u32 mask)
  82. {
  83. viafb_enabled_ints |= mask;
  84. viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
  85. }
  86. EXPORT_SYMBOL_GPL(viafb_irq_enable);
  87. void viafb_irq_disable(u32 mask)
  88. {
  89. viafb_enabled_ints &= ~mask;
  90. if (viafb_enabled_ints == 0)
  91. viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
  92. else
  93. viafb_mmio_write(VDE_INTERRUPT,
  94. viafb_enabled_ints | VDE_I_ENABLE);
  95. }
  96. EXPORT_SYMBOL_GPL(viafb_irq_disable);
  97. /* ---------------------------------------------------------------------- */
  98. /*
  99. * Access to the DMA engine. This currently provides what the camera
  100. * driver needs (i.e. outgoing only) but is easily expandable if need
  101. * be.
  102. */
  103. /*
  104. * There are four DMA channels in the vx855. For now, we only
  105. * use one of them, though. Most of the time, the DMA channel
  106. * will be idle, so we keep the IRQ handler unregistered except
  107. * when some subsystem has indicated an interest.
  108. */
  109. static int viafb_dma_users;
  110. static DECLARE_COMPLETION(viafb_dma_completion);
  111. /*
  112. * This mutex protects viafb_dma_users and our global interrupt
  113. * registration state; it also serializes access to the DMA
  114. * engine.
  115. */
  116. static DEFINE_MUTEX(viafb_dma_lock);
  117. /*
  118. * The VX855 DMA descriptor (used for s/g transfers) looks
  119. * like this.
  120. */
  121. struct viafb_vx855_dma_descr {
  122. u32 addr_low; /* Low part of phys addr */
  123. u32 addr_high; /* High 12 bits of addr */
  124. u32 fb_offset; /* Offset into FB memory */
  125. u32 seg_size; /* Size, 16-byte units */
  126. u32 tile_mode; /* "tile mode" setting */
  127. u32 next_desc_low; /* Next descriptor addr */
  128. u32 next_desc_high;
  129. u32 pad; /* Fill out to 64 bytes */
  130. };
  131. /*
  132. * Flags added to the "next descriptor low" pointers
  133. */
  134. #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
  135. #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
  136. /*
  137. * The completion IRQ handler.
  138. */
  139. static irqreturn_t viafb_dma_irq(int irq, void *data)
  140. {
  141. int csr;
  142. irqreturn_t ret = IRQ_NONE;
  143. spin_lock(&global_dev.reg_lock);
  144. csr = viafb_mmio_read(VDMA_CSR0);
  145. if (csr & VDMA_C_DONE) {
  146. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  147. complete(&viafb_dma_completion);
  148. ret = IRQ_HANDLED;
  149. }
  150. spin_unlock(&global_dev.reg_lock);
  151. return ret;
  152. }
  153. /*
  154. * Indicate a need for DMA functionality.
  155. */
  156. int viafb_request_dma(void)
  157. {
  158. int ret = 0;
  159. /*
  160. * Only VX855 is supported currently.
  161. */
  162. if (global_dev.chip_type != UNICHROME_VX855)
  163. return -ENODEV;
  164. /*
  165. * Note the new user and set up our interrupt handler
  166. * if need be.
  167. */
  168. mutex_lock(&viafb_dma_lock);
  169. viafb_dma_users++;
  170. if (viafb_dma_users == 1) {
  171. ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
  172. IRQF_SHARED, "via-dma", &viafb_dma_users);
  173. if (ret)
  174. viafb_dma_users--;
  175. else
  176. viafb_irq_enable(VDE_I_DMA0TDEN);
  177. }
  178. mutex_unlock(&viafb_dma_lock);
  179. return ret;
  180. }
  181. EXPORT_SYMBOL_GPL(viafb_request_dma);
  182. void viafb_release_dma(void)
  183. {
  184. mutex_lock(&viafb_dma_lock);
  185. viafb_dma_users--;
  186. if (viafb_dma_users == 0) {
  187. viafb_irq_disable(VDE_I_DMA0TDEN);
  188. free_irq(global_dev.pdev->irq, &viafb_dma_users);
  189. }
  190. mutex_unlock(&viafb_dma_lock);
  191. }
  192. EXPORT_SYMBOL_GPL(viafb_release_dma);
  193. #if 0
  194. /*
  195. * Copy a single buffer from FB memory, synchronously. This code works
  196. * but is not currently used.
  197. */
  198. void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
  199. {
  200. unsigned long flags;
  201. int csr;
  202. mutex_lock(&viafb_dma_lock);
  203. init_completion(&viafb_dma_completion);
  204. /*
  205. * Program the controller.
  206. */
  207. spin_lock_irqsave(&global_dev.reg_lock, flags);
  208. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  209. /* Enable ints; must happen after CSR0 write! */
  210. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
  211. viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
  212. viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
  213. /* Data sheet suggests DAR0 should be <<4, but it lies */
  214. viafb_mmio_write(VDMA_DAR0, offset);
  215. viafb_mmio_write(VDMA_DQWCR0, len >> 4);
  216. viafb_mmio_write(VDMA_TMR0, 0);
  217. viafb_mmio_write(VDMA_DPRL0, 0);
  218. viafb_mmio_write(VDMA_DPRH0, 0);
  219. viafb_mmio_write(VDMA_PMR0, 0);
  220. csr = viafb_mmio_read(VDMA_CSR0);
  221. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  222. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  223. /*
  224. * Now we just wait until the interrupt handler says
  225. * we're done.
  226. */
  227. wait_for_completion_interruptible(&viafb_dma_completion);
  228. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  229. mutex_unlock(&viafb_dma_lock);
  230. }
  231. EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
  232. #endif
  233. /*
  234. * Do a scatter/gather DMA copy from FB memory. You must have done
  235. * a successful call to viafb_request_dma() first.
  236. */
  237. int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
  238. {
  239. struct viafb_vx855_dma_descr *descr;
  240. void *descrpages;
  241. dma_addr_t descr_handle;
  242. unsigned long flags;
  243. int i;
  244. struct scatterlist *sgentry;
  245. dma_addr_t nextdesc;
  246. /*
  247. * Get a place to put the descriptors.
  248. */
  249. descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
  250. nsg*sizeof(struct viafb_vx855_dma_descr),
  251. &descr_handle, GFP_KERNEL);
  252. if (descrpages == NULL) {
  253. dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
  254. return -ENOMEM;
  255. }
  256. mutex_lock(&viafb_dma_lock);
  257. /*
  258. * Fill them in.
  259. */
  260. descr = descrpages;
  261. nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
  262. for_each_sg(sg, sgentry, nsg, i) {
  263. dma_addr_t paddr = sg_dma_address(sgentry);
  264. descr->addr_low = paddr & 0xfffffff0;
  265. descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
  266. descr->fb_offset = offset;
  267. descr->seg_size = sg_dma_len(sgentry) >> 4;
  268. descr->tile_mode = 0;
  269. descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
  270. descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
  271. descr->pad = 0xffffffff; /* VIA driver does this */
  272. offset += sg_dma_len(sgentry);
  273. nextdesc += sizeof(struct viafb_vx855_dma_descr);
  274. descr++;
  275. }
  276. descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
  277. /*
  278. * Program the engine.
  279. */
  280. spin_lock_irqsave(&global_dev.reg_lock, flags);
  281. init_completion(&viafb_dma_completion);
  282. viafb_mmio_write(VDMA_DQWCR0, 0);
  283. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  284. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
  285. viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
  286. viafb_mmio_write(VDMA_DPRH0,
  287. (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
  288. (void) viafb_mmio_read(VDMA_CSR0);
  289. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  290. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  291. /*
  292. * Now we just wait until the interrupt handler says
  293. * we're done. Except that, actually, we need to wait a little
  294. * longer: the interrupts seem to jump the gun a little and we
  295. * get corrupted frames sometimes.
  296. */
  297. wait_for_completion_timeout(&viafb_dma_completion, 1);
  298. msleep(1);
  299. if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
  300. printk(KERN_ERR "VIA DMA timeout!\n");
  301. /*
  302. * Clean up and we're done.
  303. */
  304. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  305. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  306. mutex_unlock(&viafb_dma_lock);
  307. dma_free_coherent(&global_dev.pdev->dev,
  308. nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
  309. descr_handle);
  310. return 0;
  311. }
  312. EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
  313. /* ---------------------------------------------------------------------- */
  314. /*
  315. * Figure out how big our framebuffer memory is. Kind of ugly,
  316. * but evidently we can't trust the information found in the
  317. * fbdev configuration area.
  318. */
  319. static u16 via_function3[] = {
  320. CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
  321. CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
  322. P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
  323. };
  324. /* Get the BIOS-configured framebuffer size from PCI configuration space
  325. * of function 3 in the respective chipset */
  326. static int viafb_get_fb_size_from_pci(int chip_type)
  327. {
  328. int i;
  329. u8 offset = 0;
  330. u32 FBSize;
  331. u32 VideoMemSize;
  332. /* search for the "FUNCTION3" device in this chipset */
  333. for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
  334. struct pci_dev *pdev;
  335. pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
  336. NULL);
  337. if (!pdev)
  338. continue;
  339. DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
  340. switch (pdev->device) {
  341. case CLE266_FUNCTION3:
  342. case KM400_FUNCTION3:
  343. offset = 0xE0;
  344. break;
  345. case CN400_FUNCTION3:
  346. case CN700_FUNCTION3:
  347. case CX700_FUNCTION3:
  348. case KM800_FUNCTION3:
  349. case KM890_FUNCTION3:
  350. case P4M890_FUNCTION3:
  351. case P4M900_FUNCTION3:
  352. case VX800_FUNCTION3:
  353. case VX855_FUNCTION3:
  354. case VX900_FUNCTION3:
  355. /*case CN750_FUNCTION3: */
  356. offset = 0xA0;
  357. break;
  358. }
  359. if (!offset)
  360. break;
  361. pci_read_config_dword(pdev, offset, &FBSize);
  362. pci_dev_put(pdev);
  363. }
  364. if (!offset) {
  365. printk(KERN_ERR "cannot determine framebuffer size\n");
  366. return -EIO;
  367. }
  368. FBSize = FBSize & 0x00007000;
  369. DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
  370. if (chip_type < UNICHROME_CX700) {
  371. switch (FBSize) {
  372. case 0x00004000:
  373. VideoMemSize = (16 << 20); /*16M */
  374. break;
  375. case 0x00005000:
  376. VideoMemSize = (32 << 20); /*32M */
  377. break;
  378. case 0x00006000:
  379. VideoMemSize = (64 << 20); /*64M */
  380. break;
  381. default:
  382. VideoMemSize = (32 << 20); /*32M */
  383. break;
  384. }
  385. } else {
  386. switch (FBSize) {
  387. case 0x00001000:
  388. VideoMemSize = (8 << 20); /*8M */
  389. break;
  390. case 0x00002000:
  391. VideoMemSize = (16 << 20); /*16M */
  392. break;
  393. case 0x00003000:
  394. VideoMemSize = (32 << 20); /*32M */
  395. break;
  396. case 0x00004000:
  397. VideoMemSize = (64 << 20); /*64M */
  398. break;
  399. case 0x00005000:
  400. VideoMemSize = (128 << 20); /*128M */
  401. break;
  402. case 0x00006000:
  403. VideoMemSize = (256 << 20); /*256M */
  404. break;
  405. case 0x00007000: /* Only on VX855/875 */
  406. VideoMemSize = (512 << 20); /*512M */
  407. break;
  408. default:
  409. VideoMemSize = (32 << 20); /*32M */
  410. break;
  411. }
  412. }
  413. return VideoMemSize;
  414. }
  415. /*
  416. * Figure out and map our MMIO regions.
  417. */
  418. static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
  419. {
  420. int ret;
  421. /*
  422. * Hook up to the device registers. Note that we soldier
  423. * on if it fails; the framebuffer can operate (without
  424. * acceleration) without this region.
  425. */
  426. vdev->engine_start = pci_resource_start(vdev->pdev, 1);
  427. vdev->engine_len = pci_resource_len(vdev->pdev, 1);
  428. vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
  429. vdev->engine_len);
  430. if (vdev->engine_mmio == NULL)
  431. dev_err(&vdev->pdev->dev,
  432. "Unable to map engine MMIO; operation will be "
  433. "slow and crippled.\n");
  434. /*
  435. * Map in framebuffer memory. For now, failure here is
  436. * fatal. Unfortunately, in the absence of significant
  437. * vmalloc space, failure here is also entirely plausible.
  438. * Eventually we want to move away from mapping this
  439. * entire region.
  440. */
  441. if (vdev->chip_type == UNICHROME_VX900)
  442. vdev->fbmem_start = pci_resource_start(vdev->pdev, 2);
  443. else
  444. vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
  445. ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
  446. if (ret < 0)
  447. goto out_unmap;
  448. vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
  449. if (vdev->fbmem == NULL) {
  450. ret = -ENOMEM;
  451. goto out_unmap;
  452. }
  453. return 0;
  454. out_unmap:
  455. iounmap(vdev->engine_mmio);
  456. return ret;
  457. }
  458. static void via_pci_teardown_mmio(struct viafb_dev *vdev)
  459. {
  460. iounmap(vdev->fbmem);
  461. iounmap(vdev->engine_mmio);
  462. }
  463. /*
  464. * Create our subsidiary devices.
  465. */
  466. static struct viafb_subdev_info {
  467. char *name;
  468. struct platform_device *platdev;
  469. } viafb_subdevs[] = {
  470. {
  471. .name = "viafb-gpio",
  472. },
  473. {
  474. .name = "viafb-i2c",
  475. }
  476. };
  477. #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
  478. static int __devinit via_create_subdev(struct viafb_dev *vdev,
  479. struct viafb_subdev_info *info)
  480. {
  481. int ret;
  482. info->platdev = platform_device_alloc(info->name, -1);
  483. if (!info->platdev) {
  484. dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
  485. info->name);
  486. return -ENOMEM;
  487. }
  488. info->platdev->dev.parent = &vdev->pdev->dev;
  489. info->platdev->dev.platform_data = vdev;
  490. ret = platform_device_add(info->platdev);
  491. if (ret) {
  492. dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
  493. info->name);
  494. platform_device_put(info->platdev);
  495. info->platdev = NULL;
  496. }
  497. return ret;
  498. }
  499. static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
  500. {
  501. int i;
  502. /*
  503. * Ignore return values. Even if some of the devices
  504. * fail to be created, we'll still be able to use some
  505. * of the rest.
  506. */
  507. for (i = 0; i < N_SUBDEVS; i++)
  508. via_create_subdev(vdev, viafb_subdevs + i);
  509. return 0;
  510. }
  511. static void via_teardown_subdevs(void)
  512. {
  513. int i;
  514. for (i = 0; i < N_SUBDEVS; i++)
  515. if (viafb_subdevs[i].platdev) {
  516. viafb_subdevs[i].platdev->dev.platform_data = NULL;
  517. platform_device_unregister(viafb_subdevs[i].platdev);
  518. }
  519. }
  520. /*
  521. * Power management functions
  522. */
  523. #ifdef CONFIG_PM
  524. static LIST_HEAD(viafb_pm_hooks);
  525. static DEFINE_MUTEX(viafb_pm_hooks_lock);
  526. void viafb_pm_register(struct viafb_pm_hooks *hooks)
  527. {
  528. INIT_LIST_HEAD(&hooks->list);
  529. mutex_lock(&viafb_pm_hooks_lock);
  530. list_add_tail(&hooks->list, &viafb_pm_hooks);
  531. mutex_unlock(&viafb_pm_hooks_lock);
  532. }
  533. EXPORT_SYMBOL_GPL(viafb_pm_register);
  534. void viafb_pm_unregister(struct viafb_pm_hooks *hooks)
  535. {
  536. mutex_lock(&viafb_pm_hooks_lock);
  537. list_del(&hooks->list);
  538. mutex_unlock(&viafb_pm_hooks_lock);
  539. }
  540. EXPORT_SYMBOL_GPL(viafb_pm_unregister);
  541. static int via_suspend(struct pci_dev *pdev, pm_message_t state)
  542. {
  543. struct viafb_pm_hooks *hooks;
  544. if (state.event != PM_EVENT_SUSPEND)
  545. return 0;
  546. /*
  547. * "I've occasionally hit a few drivers that caused suspend
  548. * failures, and each and every time it was a driver bug, and
  549. * the right thing to do was to just ignore the error and suspend
  550. * anyway - returning an error code and trying to undo the suspend
  551. * is not what anybody ever really wants, even if our model
  552. *_allows_ for it."
  553. * -- Linus Torvalds, Dec. 7, 2009
  554. */
  555. mutex_lock(&viafb_pm_hooks_lock);
  556. list_for_each_entry_reverse(hooks, &viafb_pm_hooks, list)
  557. hooks->suspend(hooks->private);
  558. mutex_unlock(&viafb_pm_hooks_lock);
  559. pci_save_state(pdev);
  560. pci_disable_device(pdev);
  561. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  562. return 0;
  563. }
  564. static int via_resume(struct pci_dev *pdev)
  565. {
  566. struct viafb_pm_hooks *hooks;
  567. /* Get the bus side powered up */
  568. pci_set_power_state(pdev, PCI_D0);
  569. pci_restore_state(pdev);
  570. if (pci_enable_device(pdev))
  571. return 0;
  572. pci_set_master(pdev);
  573. /* Now bring back any subdevs */
  574. mutex_lock(&viafb_pm_hooks_lock);
  575. list_for_each_entry(hooks, &viafb_pm_hooks, list)
  576. hooks->resume(hooks->private);
  577. mutex_unlock(&viafb_pm_hooks_lock);
  578. return 0;
  579. }
  580. #endif /* CONFIG_PM */
  581. static int __devinit via_pci_probe(struct pci_dev *pdev,
  582. const struct pci_device_id *ent)
  583. {
  584. int ret;
  585. ret = pci_enable_device(pdev);
  586. if (ret)
  587. return ret;
  588. /*
  589. * Global device initialization.
  590. */
  591. memset(&global_dev, 0, sizeof(global_dev));
  592. global_dev.pdev = pdev;
  593. global_dev.chip_type = ent->driver_data;
  594. global_dev.port_cfg = adap_configs;
  595. if (machine_is_olpc())
  596. global_dev.port_cfg = olpc_adap_configs;
  597. spin_lock_init(&global_dev.reg_lock);
  598. ret = via_pci_setup_mmio(&global_dev);
  599. if (ret)
  600. goto out_disable;
  601. /*
  602. * Set up interrupts and create our subdevices. Continue even if
  603. * some things fail.
  604. */
  605. viafb_int_init();
  606. via_setup_subdevs(&global_dev);
  607. /*
  608. * Set up the framebuffer device
  609. */
  610. ret = via_fb_pci_probe(&global_dev);
  611. if (ret)
  612. goto out_subdevs;
  613. return 0;
  614. out_subdevs:
  615. via_teardown_subdevs();
  616. via_pci_teardown_mmio(&global_dev);
  617. out_disable:
  618. pci_disable_device(pdev);
  619. return ret;
  620. }
  621. static void __devexit via_pci_remove(struct pci_dev *pdev)
  622. {
  623. via_teardown_subdevs();
  624. via_fb_pci_remove(pdev);
  625. via_pci_teardown_mmio(&global_dev);
  626. pci_disable_device(pdev);
  627. }
  628. static struct pci_device_id via_pci_table[] __devinitdata = {
  629. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
  630. .driver_data = UNICHROME_CLE266 },
  631. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
  632. .driver_data = UNICHROME_K400 },
  633. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
  634. .driver_data = UNICHROME_K800 },
  635. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
  636. .driver_data = UNICHROME_PM800 },
  637. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
  638. .driver_data = UNICHROME_CN700 },
  639. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
  640. .driver_data = UNICHROME_CX700 },
  641. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
  642. .driver_data = UNICHROME_CN750 },
  643. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
  644. .driver_data = UNICHROME_K8M890 },
  645. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
  646. .driver_data = UNICHROME_P4M890 },
  647. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
  648. .driver_data = UNICHROME_P4M900 },
  649. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
  650. .driver_data = UNICHROME_VX800 },
  651. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
  652. .driver_data = UNICHROME_VX855 },
  653. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID),
  654. .driver_data = UNICHROME_VX900 },
  655. { }
  656. };
  657. MODULE_DEVICE_TABLE(pci, via_pci_table);
  658. static struct pci_driver via_driver = {
  659. .name = "viafb",
  660. .id_table = via_pci_table,
  661. .probe = via_pci_probe,
  662. .remove = __devexit_p(via_pci_remove),
  663. #ifdef CONFIG_PM
  664. .suspend = via_suspend,
  665. .resume = via_resume,
  666. #endif
  667. };
  668. static int __init via_core_init(void)
  669. {
  670. int ret;
  671. ret = viafb_init();
  672. if (ret)
  673. return ret;
  674. viafb_i2c_init();
  675. viafb_gpio_init();
  676. return pci_register_driver(&via_driver);
  677. }
  678. static void __exit via_core_exit(void)
  679. {
  680. pci_unregister_driver(&via_driver);
  681. viafb_gpio_exit();
  682. viafb_i2c_exit();
  683. viafb_exit();
  684. }
  685. module_init(via_core_init);
  686. module_exit(via_core_exit);