msm_drv.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_gpu.h"
  19. #include <mach/iommu.h>
  20. static void msm_fb_output_poll_changed(struct drm_device *dev)
  21. {
  22. struct msm_drm_private *priv = dev->dev_private;
  23. if (priv->fbdev)
  24. drm_fb_helper_hotplug_event(priv->fbdev);
  25. }
  26. static const struct drm_mode_config_funcs mode_config_funcs = {
  27. .fb_create = msm_framebuffer_create,
  28. .output_poll_changed = msm_fb_output_poll_changed,
  29. };
  30. static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
  31. unsigned long iova, int flags, void *arg)
  32. {
  33. DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
  34. return 0;
  35. }
  36. int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
  37. {
  38. struct msm_drm_private *priv = dev->dev_private;
  39. int idx = priv->num_iommus++;
  40. if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
  41. return -EINVAL;
  42. priv->iommus[idx] = iommu;
  43. iommu_set_fault_handler(iommu, msm_fault_handler, dev);
  44. /* need to iommu_attach_device() somewhere?? on resume?? */
  45. return idx;
  46. }
  47. int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
  48. const char **names, int cnt)
  49. {
  50. int i, ret;
  51. for (i = 0; i < cnt; i++) {
  52. struct device *ctx = msm_iommu_get_ctx(names[i]);
  53. if (!ctx)
  54. continue;
  55. ret = iommu_attach_device(iommu, ctx);
  56. if (ret) {
  57. dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
  58. return ret;
  59. }
  60. }
  61. return 0;
  62. }
  63. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  64. static bool reglog = false;
  65. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  66. module_param(reglog, bool, 0600);
  67. #else
  68. #define reglog 0
  69. #endif
  70. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  71. const char *dbgname)
  72. {
  73. struct resource *res;
  74. unsigned long size;
  75. void __iomem *ptr;
  76. if (name)
  77. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  78. else
  79. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  80. if (!res) {
  81. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  82. return ERR_PTR(-EINVAL);
  83. }
  84. size = resource_size(res);
  85. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  86. if (!ptr) {
  87. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  88. return ERR_PTR(-ENOMEM);
  89. }
  90. if (reglog)
  91. printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
  92. return ptr;
  93. }
  94. void msm_writel(u32 data, void __iomem *addr)
  95. {
  96. if (reglog)
  97. printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
  98. writel(data, addr);
  99. }
  100. u32 msm_readl(const void __iomem *addr)
  101. {
  102. u32 val = readl(addr);
  103. if (reglog)
  104. printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
  105. return val;
  106. }
  107. /*
  108. * DRM operations:
  109. */
  110. static int msm_unload(struct drm_device *dev)
  111. {
  112. struct msm_drm_private *priv = dev->dev_private;
  113. struct msm_kms *kms = priv->kms;
  114. struct msm_gpu *gpu = priv->gpu;
  115. drm_kms_helper_poll_fini(dev);
  116. drm_mode_config_cleanup(dev);
  117. drm_vblank_cleanup(dev);
  118. pm_runtime_get_sync(dev->dev);
  119. drm_irq_uninstall(dev);
  120. pm_runtime_put_sync(dev->dev);
  121. flush_workqueue(priv->wq);
  122. destroy_workqueue(priv->wq);
  123. if (kms) {
  124. pm_runtime_disable(dev->dev);
  125. kms->funcs->destroy(kms);
  126. }
  127. if (gpu) {
  128. mutex_lock(&dev->struct_mutex);
  129. gpu->funcs->pm_suspend(gpu);
  130. gpu->funcs->destroy(gpu);
  131. mutex_unlock(&dev->struct_mutex);
  132. }
  133. dev->dev_private = NULL;
  134. kfree(priv);
  135. return 0;
  136. }
  137. static int msm_load(struct drm_device *dev, unsigned long flags)
  138. {
  139. struct platform_device *pdev = dev->platformdev;
  140. struct msm_drm_private *priv;
  141. struct msm_kms *kms;
  142. int ret;
  143. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  144. if (!priv) {
  145. dev_err(dev->dev, "failed to allocate private data\n");
  146. return -ENOMEM;
  147. }
  148. dev->dev_private = priv;
  149. priv->wq = alloc_ordered_workqueue("msm", 0);
  150. init_waitqueue_head(&priv->fence_event);
  151. INIT_LIST_HEAD(&priv->inactive_list);
  152. drm_mode_config_init(dev);
  153. kms = mdp4_kms_init(dev);
  154. if (IS_ERR(kms)) {
  155. /*
  156. * NOTE: once we have GPU support, having no kms should not
  157. * be considered fatal.. ideally we would still support gpu
  158. * and (for example) use dmabuf/prime to share buffers with
  159. * imx drm driver on iMX5
  160. */
  161. dev_err(dev->dev, "failed to load kms\n");
  162. ret = PTR_ERR(priv->kms);
  163. goto fail;
  164. }
  165. priv->kms = kms;
  166. if (kms) {
  167. pm_runtime_enable(dev->dev);
  168. ret = kms->funcs->hw_init(kms);
  169. if (ret) {
  170. dev_err(dev->dev, "kms hw init failed: %d\n", ret);
  171. goto fail;
  172. }
  173. }
  174. dev->mode_config.min_width = 0;
  175. dev->mode_config.min_height = 0;
  176. dev->mode_config.max_width = 2048;
  177. dev->mode_config.max_height = 2048;
  178. dev->mode_config.funcs = &mode_config_funcs;
  179. ret = drm_vblank_init(dev, 1);
  180. if (ret < 0) {
  181. dev_err(dev->dev, "failed to initialize vblank\n");
  182. goto fail;
  183. }
  184. pm_runtime_get_sync(dev->dev);
  185. ret = drm_irq_install(dev);
  186. pm_runtime_put_sync(dev->dev);
  187. if (ret < 0) {
  188. dev_err(dev->dev, "failed to install IRQ handler\n");
  189. goto fail;
  190. }
  191. platform_set_drvdata(pdev, dev);
  192. #ifdef CONFIG_DRM_MSM_FBDEV
  193. priv->fbdev = msm_fbdev_init(dev);
  194. #endif
  195. drm_kms_helper_poll_init(dev);
  196. return 0;
  197. fail:
  198. msm_unload(dev);
  199. return ret;
  200. }
  201. static void load_gpu(struct drm_device *dev)
  202. {
  203. struct msm_drm_private *priv = dev->dev_private;
  204. struct msm_gpu *gpu;
  205. if (priv->gpu)
  206. return;
  207. mutex_lock(&dev->struct_mutex);
  208. gpu = a3xx_gpu_init(dev);
  209. if (IS_ERR(gpu)) {
  210. dev_warn(dev->dev, "failed to load a3xx gpu\n");
  211. gpu = NULL;
  212. /* not fatal */
  213. }
  214. mutex_unlock(&dev->struct_mutex);
  215. if (gpu) {
  216. int ret;
  217. gpu->funcs->pm_resume(gpu);
  218. ret = gpu->funcs->hw_init(gpu);
  219. if (ret) {
  220. dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
  221. gpu->funcs->destroy(gpu);
  222. gpu = NULL;
  223. }
  224. }
  225. priv->gpu = gpu;
  226. }
  227. static int msm_open(struct drm_device *dev, struct drm_file *file)
  228. {
  229. struct msm_file_private *ctx;
  230. /* For now, load gpu on open.. to avoid the requirement of having
  231. * firmware in the initrd.
  232. */
  233. load_gpu(dev);
  234. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  235. if (!ctx)
  236. return -ENOMEM;
  237. file->driver_priv = ctx;
  238. return 0;
  239. }
  240. static void msm_preclose(struct drm_device *dev, struct drm_file *file)
  241. {
  242. struct msm_drm_private *priv = dev->dev_private;
  243. struct msm_file_private *ctx = file->driver_priv;
  244. struct msm_kms *kms = priv->kms;
  245. if (kms)
  246. kms->funcs->preclose(kms, file);
  247. mutex_lock(&dev->struct_mutex);
  248. if (ctx == priv->lastctx)
  249. priv->lastctx = NULL;
  250. mutex_unlock(&dev->struct_mutex);
  251. kfree(ctx);
  252. }
  253. static void msm_lastclose(struct drm_device *dev)
  254. {
  255. struct msm_drm_private *priv = dev->dev_private;
  256. if (priv->fbdev) {
  257. drm_modeset_lock_all(dev);
  258. drm_fb_helper_restore_fbdev_mode(priv->fbdev);
  259. drm_modeset_unlock_all(dev);
  260. }
  261. }
  262. static irqreturn_t msm_irq(DRM_IRQ_ARGS)
  263. {
  264. struct drm_device *dev = arg;
  265. struct msm_drm_private *priv = dev->dev_private;
  266. struct msm_kms *kms = priv->kms;
  267. BUG_ON(!kms);
  268. return kms->funcs->irq(kms);
  269. }
  270. static void msm_irq_preinstall(struct drm_device *dev)
  271. {
  272. struct msm_drm_private *priv = dev->dev_private;
  273. struct msm_kms *kms = priv->kms;
  274. BUG_ON(!kms);
  275. kms->funcs->irq_preinstall(kms);
  276. }
  277. static int msm_irq_postinstall(struct drm_device *dev)
  278. {
  279. struct msm_drm_private *priv = dev->dev_private;
  280. struct msm_kms *kms = priv->kms;
  281. BUG_ON(!kms);
  282. return kms->funcs->irq_postinstall(kms);
  283. }
  284. static void msm_irq_uninstall(struct drm_device *dev)
  285. {
  286. struct msm_drm_private *priv = dev->dev_private;
  287. struct msm_kms *kms = priv->kms;
  288. BUG_ON(!kms);
  289. kms->funcs->irq_uninstall(kms);
  290. }
  291. static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
  292. {
  293. struct msm_drm_private *priv = dev->dev_private;
  294. struct msm_kms *kms = priv->kms;
  295. if (!kms)
  296. return -ENXIO;
  297. DBG("dev=%p, crtc=%d", dev, crtc_id);
  298. return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
  299. }
  300. static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
  301. {
  302. struct msm_drm_private *priv = dev->dev_private;
  303. struct msm_kms *kms = priv->kms;
  304. if (!kms)
  305. return;
  306. DBG("dev=%p, crtc=%d", dev, crtc_id);
  307. kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
  308. }
  309. /*
  310. * DRM debugfs:
  311. */
  312. #ifdef CONFIG_DEBUG_FS
  313. static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
  314. {
  315. struct msm_drm_private *priv = dev->dev_private;
  316. struct msm_gpu *gpu = priv->gpu;
  317. if (gpu) {
  318. seq_printf(m, "%s Status:\n", gpu->name);
  319. gpu->funcs->show(gpu, m);
  320. }
  321. return 0;
  322. }
  323. static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
  324. {
  325. struct msm_drm_private *priv = dev->dev_private;
  326. struct msm_gpu *gpu = priv->gpu;
  327. if (gpu) {
  328. seq_printf(m, "Active Objects (%s):\n", gpu->name);
  329. msm_gem_describe_objects(&gpu->active_list, m);
  330. }
  331. seq_printf(m, "Inactive Objects:\n");
  332. msm_gem_describe_objects(&priv->inactive_list, m);
  333. return 0;
  334. }
  335. static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
  336. {
  337. return drm_mm_dump_table(m, dev->mm_private);
  338. }
  339. static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
  340. {
  341. struct msm_drm_private *priv = dev->dev_private;
  342. struct drm_framebuffer *fb, *fbdev_fb = NULL;
  343. if (priv->fbdev) {
  344. seq_printf(m, "fbcon ");
  345. fbdev_fb = priv->fbdev->fb;
  346. msm_framebuffer_describe(fbdev_fb, m);
  347. }
  348. mutex_lock(&dev->mode_config.fb_lock);
  349. list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
  350. if (fb == fbdev_fb)
  351. continue;
  352. seq_printf(m, "user ");
  353. msm_framebuffer_describe(fb, m);
  354. }
  355. mutex_unlock(&dev->mode_config.fb_lock);
  356. return 0;
  357. }
  358. static int show_locked(struct seq_file *m, void *arg)
  359. {
  360. struct drm_info_node *node = (struct drm_info_node *) m->private;
  361. struct drm_device *dev = node->minor->dev;
  362. int (*show)(struct drm_device *dev, struct seq_file *m) =
  363. node->info_ent->data;
  364. int ret;
  365. ret = mutex_lock_interruptible(&dev->struct_mutex);
  366. if (ret)
  367. return ret;
  368. ret = show(dev, m);
  369. mutex_unlock(&dev->struct_mutex);
  370. return ret;
  371. }
  372. static struct drm_info_list msm_debugfs_list[] = {
  373. {"gpu", show_locked, 0, msm_gpu_show},
  374. {"gem", show_locked, 0, msm_gem_show},
  375. { "mm", show_locked, 0, msm_mm_show },
  376. { "fb", show_locked, 0, msm_fb_show },
  377. };
  378. static int msm_debugfs_init(struct drm_minor *minor)
  379. {
  380. struct drm_device *dev = minor->dev;
  381. int ret;
  382. ret = drm_debugfs_create_files(msm_debugfs_list,
  383. ARRAY_SIZE(msm_debugfs_list),
  384. minor->debugfs_root, minor);
  385. if (ret) {
  386. dev_err(dev->dev, "could not install msm_debugfs_list\n");
  387. return ret;
  388. }
  389. return ret;
  390. }
  391. static void msm_debugfs_cleanup(struct drm_minor *minor)
  392. {
  393. drm_debugfs_remove_files(msm_debugfs_list,
  394. ARRAY_SIZE(msm_debugfs_list), minor);
  395. }
  396. #endif
  397. /*
  398. * Fences:
  399. */
  400. int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
  401. struct timespec *timeout)
  402. {
  403. struct msm_drm_private *priv = dev->dev_private;
  404. unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
  405. unsigned long start_jiffies = jiffies;
  406. unsigned long remaining_jiffies;
  407. int ret;
  408. if (time_after(start_jiffies, timeout_jiffies))
  409. remaining_jiffies = 0;
  410. else
  411. remaining_jiffies = timeout_jiffies - start_jiffies;
  412. ret = wait_event_interruptible_timeout(priv->fence_event,
  413. priv->completed_fence >= fence,
  414. remaining_jiffies);
  415. if (ret == 0) {
  416. DBG("timeout waiting for fence: %u (completed: %u)",
  417. fence, priv->completed_fence);
  418. ret = -ETIMEDOUT;
  419. } else if (ret != -ERESTARTSYS) {
  420. ret = 0;
  421. }
  422. return ret;
  423. }
  424. /* call under struct_mutex */
  425. void msm_update_fence(struct drm_device *dev, uint32_t fence)
  426. {
  427. struct msm_drm_private *priv = dev->dev_private;
  428. if (fence > priv->completed_fence) {
  429. priv->completed_fence = fence;
  430. wake_up_all(&priv->fence_event);
  431. }
  432. }
  433. /*
  434. * DRM ioctls:
  435. */
  436. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  437. struct drm_file *file)
  438. {
  439. struct msm_drm_private *priv = dev->dev_private;
  440. struct drm_msm_param *args = data;
  441. struct msm_gpu *gpu;
  442. /* for now, we just have 3d pipe.. eventually this would need to
  443. * be more clever to dispatch to appropriate gpu module:
  444. */
  445. if (args->pipe != MSM_PIPE_3D0)
  446. return -EINVAL;
  447. gpu = priv->gpu;
  448. if (!gpu)
  449. return -ENXIO;
  450. return gpu->funcs->get_param(gpu, args->param, &args->value);
  451. }
  452. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  453. struct drm_file *file)
  454. {
  455. struct drm_msm_gem_new *args = data;
  456. return msm_gem_new_handle(dev, file, args->size,
  457. args->flags, &args->handle);
  458. }
  459. #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
  460. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  461. struct drm_file *file)
  462. {
  463. struct drm_msm_gem_cpu_prep *args = data;
  464. struct drm_gem_object *obj;
  465. int ret;
  466. obj = drm_gem_object_lookup(dev, file, args->handle);
  467. if (!obj)
  468. return -ENOENT;
  469. ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  470. drm_gem_object_unreference_unlocked(obj);
  471. return ret;
  472. }
  473. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  474. struct drm_file *file)
  475. {
  476. struct drm_msm_gem_cpu_fini *args = data;
  477. struct drm_gem_object *obj;
  478. int ret;
  479. obj = drm_gem_object_lookup(dev, file, args->handle);
  480. if (!obj)
  481. return -ENOENT;
  482. ret = msm_gem_cpu_fini(obj);
  483. drm_gem_object_unreference_unlocked(obj);
  484. return ret;
  485. }
  486. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  487. struct drm_file *file)
  488. {
  489. struct drm_msm_gem_info *args = data;
  490. struct drm_gem_object *obj;
  491. int ret = 0;
  492. if (args->pad)
  493. return -EINVAL;
  494. obj = drm_gem_object_lookup(dev, file, args->handle);
  495. if (!obj)
  496. return -ENOENT;
  497. args->offset = msm_gem_mmap_offset(obj);
  498. drm_gem_object_unreference_unlocked(obj);
  499. return ret;
  500. }
  501. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  502. struct drm_file *file)
  503. {
  504. struct drm_msm_wait_fence *args = data;
  505. return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
  506. }
  507. static const struct drm_ioctl_desc msm_ioctls[] = {
  508. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
  509. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
  510. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
  511. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
  512. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
  513. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
  514. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
  515. };
  516. static const struct vm_operations_struct vm_ops = {
  517. .fault = msm_gem_fault,
  518. .open = drm_gem_vm_open,
  519. .close = drm_gem_vm_close,
  520. };
  521. static const struct file_operations fops = {
  522. .owner = THIS_MODULE,
  523. .open = drm_open,
  524. .release = drm_release,
  525. .unlocked_ioctl = drm_ioctl,
  526. #ifdef CONFIG_COMPAT
  527. .compat_ioctl = drm_compat_ioctl,
  528. #endif
  529. .poll = drm_poll,
  530. .read = drm_read,
  531. .llseek = no_llseek,
  532. .mmap = msm_gem_mmap,
  533. };
  534. static struct drm_driver msm_driver = {
  535. .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
  536. .load = msm_load,
  537. .unload = msm_unload,
  538. .open = msm_open,
  539. .preclose = msm_preclose,
  540. .lastclose = msm_lastclose,
  541. .irq_handler = msm_irq,
  542. .irq_preinstall = msm_irq_preinstall,
  543. .irq_postinstall = msm_irq_postinstall,
  544. .irq_uninstall = msm_irq_uninstall,
  545. .get_vblank_counter = drm_vblank_count,
  546. .enable_vblank = msm_enable_vblank,
  547. .disable_vblank = msm_disable_vblank,
  548. .gem_free_object = msm_gem_free_object,
  549. .gem_vm_ops = &vm_ops,
  550. .dumb_create = msm_gem_dumb_create,
  551. .dumb_map_offset = msm_gem_dumb_map_offset,
  552. .dumb_destroy = msm_gem_dumb_destroy,
  553. #ifdef CONFIG_DEBUG_FS
  554. .debugfs_init = msm_debugfs_init,
  555. .debugfs_cleanup = msm_debugfs_cleanup,
  556. #endif
  557. .ioctls = msm_ioctls,
  558. .num_ioctls = DRM_MSM_NUM_IOCTLS,
  559. .fops = &fops,
  560. .name = "msm",
  561. .desc = "MSM Snapdragon DRM",
  562. .date = "20130625",
  563. .major = 1,
  564. .minor = 0,
  565. };
  566. #ifdef CONFIG_PM_SLEEP
  567. static int msm_pm_suspend(struct device *dev)
  568. {
  569. struct drm_device *ddev = dev_get_drvdata(dev);
  570. drm_kms_helper_poll_disable(ddev);
  571. return 0;
  572. }
  573. static int msm_pm_resume(struct device *dev)
  574. {
  575. struct drm_device *ddev = dev_get_drvdata(dev);
  576. drm_kms_helper_poll_enable(ddev);
  577. return 0;
  578. }
  579. #endif
  580. static const struct dev_pm_ops msm_pm_ops = {
  581. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  582. };
  583. /*
  584. * Platform driver:
  585. */
  586. static int msm_pdev_probe(struct platform_device *pdev)
  587. {
  588. return drm_platform_init(&msm_driver, pdev);
  589. }
  590. static int msm_pdev_remove(struct platform_device *pdev)
  591. {
  592. drm_platform_exit(&msm_driver, pdev);
  593. return 0;
  594. }
  595. static const struct platform_device_id msm_id[] = {
  596. { "mdp", 0 },
  597. { }
  598. };
  599. static struct platform_driver msm_platform_driver = {
  600. .probe = msm_pdev_probe,
  601. .remove = msm_pdev_remove,
  602. .driver = {
  603. .owner = THIS_MODULE,
  604. .name = "msm",
  605. .pm = &msm_pm_ops,
  606. },
  607. .id_table = msm_id,
  608. };
  609. static int __init msm_drm_register(void)
  610. {
  611. DBG("init");
  612. hdmi_register();
  613. a3xx_register();
  614. return platform_driver_register(&msm_platform_driver);
  615. }
  616. static void __exit msm_drm_unregister(void)
  617. {
  618. DBG("fini");
  619. platform_driver_unregister(&msm_platform_driver);
  620. hdmi_unregister();
  621. a3xx_unregister();
  622. }
  623. module_init(msm_drm_register);
  624. module_exit(msm_drm_unregister);
  625. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  626. MODULE_DESCRIPTION("MSM DRM Driver");
  627. MODULE_LICENSE("GPL");