msm_drv.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_gpu.h"
  19. static void msm_fb_output_poll_changed(struct drm_device *dev)
  20. {
  21. struct msm_drm_private *priv = dev->dev_private;
  22. if (priv->fbdev)
  23. drm_fb_helper_hotplug_event(priv->fbdev);
  24. }
  25. static const struct drm_mode_config_funcs mode_config_funcs = {
  26. .fb_create = msm_framebuffer_create,
  27. .output_poll_changed = msm_fb_output_poll_changed,
  28. };
  29. static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
  30. unsigned long iova, int flags, void *arg)
  31. {
  32. DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
  33. return 0;
  34. }
  35. int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
  36. {
  37. struct msm_drm_private *priv = dev->dev_private;
  38. int idx = priv->num_iommus++;
  39. if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
  40. return -EINVAL;
  41. priv->iommus[idx] = iommu;
  42. iommu_set_fault_handler(iommu, msm_fault_handler, dev);
  43. /* need to iommu_attach_device() somewhere?? on resume?? */
  44. return idx;
  45. }
  46. int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
  47. const char **names, int cnt)
  48. {
  49. int i, ret;
  50. for (i = 0; i < cnt; i++) {
  51. /* TODO maybe some day msm iommu won't require this hack: */
  52. struct device *msm_iommu_get_ctx(const char *ctx_name);
  53. struct device *ctx = msm_iommu_get_ctx(names[i]);
  54. if (!ctx)
  55. continue;
  56. ret = iommu_attach_device(iommu, ctx);
  57. if (ret) {
  58. dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
  59. return ret;
  60. }
  61. }
  62. return 0;
  63. }
  64. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  65. static bool reglog = false;
  66. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  67. module_param(reglog, bool, 0600);
  68. #else
  69. #define reglog 0
  70. #endif
  71. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  72. const char *dbgname)
  73. {
  74. struct resource *res;
  75. unsigned long size;
  76. void __iomem *ptr;
  77. if (name)
  78. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  79. else
  80. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  81. if (!res) {
  82. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  83. return ERR_PTR(-EINVAL);
  84. }
  85. size = resource_size(res);
  86. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  87. if (!ptr) {
  88. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  89. return ERR_PTR(-ENOMEM);
  90. }
  91. if (reglog)
  92. printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
  93. return ptr;
  94. }
  95. void msm_writel(u32 data, void __iomem *addr)
  96. {
  97. if (reglog)
  98. printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
  99. writel(data, addr);
  100. }
  101. u32 msm_readl(const void __iomem *addr)
  102. {
  103. u32 val = readl(addr);
  104. if (reglog)
  105. printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
  106. return val;
  107. }
  108. /*
  109. * DRM operations:
  110. */
  111. static int msm_unload(struct drm_device *dev)
  112. {
  113. struct msm_drm_private *priv = dev->dev_private;
  114. struct msm_kms *kms = priv->kms;
  115. struct msm_gpu *gpu = priv->gpu;
  116. drm_kms_helper_poll_fini(dev);
  117. drm_mode_config_cleanup(dev);
  118. drm_vblank_cleanup(dev);
  119. pm_runtime_get_sync(dev->dev);
  120. drm_irq_uninstall(dev);
  121. pm_runtime_put_sync(dev->dev);
  122. flush_workqueue(priv->wq);
  123. destroy_workqueue(priv->wq);
  124. if (kms) {
  125. pm_runtime_disable(dev->dev);
  126. kms->funcs->destroy(kms);
  127. }
  128. if (gpu) {
  129. mutex_lock(&dev->struct_mutex);
  130. gpu->funcs->pm_suspend(gpu);
  131. gpu->funcs->destroy(gpu);
  132. mutex_unlock(&dev->struct_mutex);
  133. }
  134. dev->dev_private = NULL;
  135. kfree(priv);
  136. return 0;
  137. }
  138. static int msm_load(struct drm_device *dev, unsigned long flags)
  139. {
  140. struct platform_device *pdev = dev->platformdev;
  141. struct msm_drm_private *priv;
  142. struct msm_kms *kms;
  143. int ret;
  144. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  145. if (!priv) {
  146. dev_err(dev->dev, "failed to allocate private data\n");
  147. return -ENOMEM;
  148. }
  149. dev->dev_private = priv;
  150. priv->wq = alloc_ordered_workqueue("msm", 0);
  151. init_waitqueue_head(&priv->fence_event);
  152. INIT_LIST_HEAD(&priv->inactive_list);
  153. drm_mode_config_init(dev);
  154. kms = mdp4_kms_init(dev);
  155. if (IS_ERR(kms)) {
  156. /*
  157. * NOTE: once we have GPU support, having no kms should not
  158. * be considered fatal.. ideally we would still support gpu
  159. * and (for example) use dmabuf/prime to share buffers with
  160. * imx drm driver on iMX5
  161. */
  162. dev_err(dev->dev, "failed to load kms\n");
  163. ret = PTR_ERR(kms);
  164. goto fail;
  165. }
  166. priv->kms = kms;
  167. if (kms) {
  168. pm_runtime_enable(dev->dev);
  169. ret = kms->funcs->hw_init(kms);
  170. if (ret) {
  171. dev_err(dev->dev, "kms hw init failed: %d\n", ret);
  172. goto fail;
  173. }
  174. }
  175. dev->mode_config.min_width = 0;
  176. dev->mode_config.min_height = 0;
  177. dev->mode_config.max_width = 2048;
  178. dev->mode_config.max_height = 2048;
  179. dev->mode_config.funcs = &mode_config_funcs;
  180. ret = drm_vblank_init(dev, 1);
  181. if (ret < 0) {
  182. dev_err(dev->dev, "failed to initialize vblank\n");
  183. goto fail;
  184. }
  185. pm_runtime_get_sync(dev->dev);
  186. ret = drm_irq_install(dev);
  187. pm_runtime_put_sync(dev->dev);
  188. if (ret < 0) {
  189. dev_err(dev->dev, "failed to install IRQ handler\n");
  190. goto fail;
  191. }
  192. platform_set_drvdata(pdev, dev);
  193. #ifdef CONFIG_DRM_MSM_FBDEV
  194. priv->fbdev = msm_fbdev_init(dev);
  195. #endif
  196. drm_kms_helper_poll_init(dev);
  197. return 0;
  198. fail:
  199. msm_unload(dev);
  200. return ret;
  201. }
  202. static void load_gpu(struct drm_device *dev)
  203. {
  204. struct msm_drm_private *priv = dev->dev_private;
  205. struct msm_gpu *gpu;
  206. if (priv->gpu)
  207. return;
  208. mutex_lock(&dev->struct_mutex);
  209. gpu = a3xx_gpu_init(dev);
  210. if (IS_ERR(gpu)) {
  211. dev_warn(dev->dev, "failed to load a3xx gpu\n");
  212. gpu = NULL;
  213. /* not fatal */
  214. }
  215. mutex_unlock(&dev->struct_mutex);
  216. if (gpu) {
  217. int ret;
  218. gpu->funcs->pm_resume(gpu);
  219. ret = gpu->funcs->hw_init(gpu);
  220. if (ret) {
  221. dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
  222. gpu->funcs->destroy(gpu);
  223. gpu = NULL;
  224. }
  225. }
  226. priv->gpu = gpu;
  227. }
  228. static int msm_open(struct drm_device *dev, struct drm_file *file)
  229. {
  230. struct msm_file_private *ctx;
  231. /* For now, load gpu on open.. to avoid the requirement of having
  232. * firmware in the initrd.
  233. */
  234. load_gpu(dev);
  235. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  236. if (!ctx)
  237. return -ENOMEM;
  238. file->driver_priv = ctx;
  239. return 0;
  240. }
  241. static void msm_preclose(struct drm_device *dev, struct drm_file *file)
  242. {
  243. struct msm_drm_private *priv = dev->dev_private;
  244. struct msm_file_private *ctx = file->driver_priv;
  245. struct msm_kms *kms = priv->kms;
  246. if (kms)
  247. kms->funcs->preclose(kms, file);
  248. mutex_lock(&dev->struct_mutex);
  249. if (ctx == priv->lastctx)
  250. priv->lastctx = NULL;
  251. mutex_unlock(&dev->struct_mutex);
  252. kfree(ctx);
  253. }
  254. static void msm_lastclose(struct drm_device *dev)
  255. {
  256. struct msm_drm_private *priv = dev->dev_private;
  257. if (priv->fbdev) {
  258. drm_modeset_lock_all(dev);
  259. drm_fb_helper_restore_fbdev_mode(priv->fbdev);
  260. drm_modeset_unlock_all(dev);
  261. }
  262. }
  263. static irqreturn_t msm_irq(DRM_IRQ_ARGS)
  264. {
  265. struct drm_device *dev = arg;
  266. struct msm_drm_private *priv = dev->dev_private;
  267. struct msm_kms *kms = priv->kms;
  268. BUG_ON(!kms);
  269. return kms->funcs->irq(kms);
  270. }
  271. static void msm_irq_preinstall(struct drm_device *dev)
  272. {
  273. struct msm_drm_private *priv = dev->dev_private;
  274. struct msm_kms *kms = priv->kms;
  275. BUG_ON(!kms);
  276. kms->funcs->irq_preinstall(kms);
  277. }
  278. static int msm_irq_postinstall(struct drm_device *dev)
  279. {
  280. struct msm_drm_private *priv = dev->dev_private;
  281. struct msm_kms *kms = priv->kms;
  282. BUG_ON(!kms);
  283. return kms->funcs->irq_postinstall(kms);
  284. }
  285. static void msm_irq_uninstall(struct drm_device *dev)
  286. {
  287. struct msm_drm_private *priv = dev->dev_private;
  288. struct msm_kms *kms = priv->kms;
  289. BUG_ON(!kms);
  290. kms->funcs->irq_uninstall(kms);
  291. }
  292. static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
  293. {
  294. struct msm_drm_private *priv = dev->dev_private;
  295. struct msm_kms *kms = priv->kms;
  296. if (!kms)
  297. return -ENXIO;
  298. DBG("dev=%p, crtc=%d", dev, crtc_id);
  299. return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
  300. }
  301. static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
  302. {
  303. struct msm_drm_private *priv = dev->dev_private;
  304. struct msm_kms *kms = priv->kms;
  305. if (!kms)
  306. return;
  307. DBG("dev=%p, crtc=%d", dev, crtc_id);
  308. kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
  309. }
  310. /*
  311. * DRM debugfs:
  312. */
  313. #ifdef CONFIG_DEBUG_FS
  314. static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
  315. {
  316. struct msm_drm_private *priv = dev->dev_private;
  317. struct msm_gpu *gpu = priv->gpu;
  318. if (gpu) {
  319. seq_printf(m, "%s Status:\n", gpu->name);
  320. gpu->funcs->show(gpu, m);
  321. }
  322. return 0;
  323. }
  324. static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
  325. {
  326. struct msm_drm_private *priv = dev->dev_private;
  327. struct msm_gpu *gpu = priv->gpu;
  328. if (gpu) {
  329. seq_printf(m, "Active Objects (%s):\n", gpu->name);
  330. msm_gem_describe_objects(&gpu->active_list, m);
  331. }
  332. seq_printf(m, "Inactive Objects:\n");
  333. msm_gem_describe_objects(&priv->inactive_list, m);
  334. return 0;
  335. }
  336. static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
  337. {
  338. return drm_mm_dump_table(m, dev->mm_private);
  339. }
  340. static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
  341. {
  342. struct msm_drm_private *priv = dev->dev_private;
  343. struct drm_framebuffer *fb, *fbdev_fb = NULL;
  344. if (priv->fbdev) {
  345. seq_printf(m, "fbcon ");
  346. fbdev_fb = priv->fbdev->fb;
  347. msm_framebuffer_describe(fbdev_fb, m);
  348. }
  349. mutex_lock(&dev->mode_config.fb_lock);
  350. list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
  351. if (fb == fbdev_fb)
  352. continue;
  353. seq_printf(m, "user ");
  354. msm_framebuffer_describe(fb, m);
  355. }
  356. mutex_unlock(&dev->mode_config.fb_lock);
  357. return 0;
  358. }
  359. static int show_locked(struct seq_file *m, void *arg)
  360. {
  361. struct drm_info_node *node = (struct drm_info_node *) m->private;
  362. struct drm_device *dev = node->minor->dev;
  363. int (*show)(struct drm_device *dev, struct seq_file *m) =
  364. node->info_ent->data;
  365. int ret;
  366. ret = mutex_lock_interruptible(&dev->struct_mutex);
  367. if (ret)
  368. return ret;
  369. ret = show(dev, m);
  370. mutex_unlock(&dev->struct_mutex);
  371. return ret;
  372. }
  373. static struct drm_info_list msm_debugfs_list[] = {
  374. {"gpu", show_locked, 0, msm_gpu_show},
  375. {"gem", show_locked, 0, msm_gem_show},
  376. { "mm", show_locked, 0, msm_mm_show },
  377. { "fb", show_locked, 0, msm_fb_show },
  378. };
  379. static int msm_debugfs_init(struct drm_minor *minor)
  380. {
  381. struct drm_device *dev = minor->dev;
  382. int ret;
  383. ret = drm_debugfs_create_files(msm_debugfs_list,
  384. ARRAY_SIZE(msm_debugfs_list),
  385. minor->debugfs_root, minor);
  386. if (ret) {
  387. dev_err(dev->dev, "could not install msm_debugfs_list\n");
  388. return ret;
  389. }
  390. return ret;
  391. }
  392. static void msm_debugfs_cleanup(struct drm_minor *minor)
  393. {
  394. drm_debugfs_remove_files(msm_debugfs_list,
  395. ARRAY_SIZE(msm_debugfs_list), minor);
  396. }
  397. #endif
  398. /*
  399. * Fences:
  400. */
  401. int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
  402. struct timespec *timeout)
  403. {
  404. struct msm_drm_private *priv = dev->dev_private;
  405. int ret;
  406. if (!priv->gpu)
  407. return 0;
  408. if (fence > priv->gpu->submitted_fence) {
  409. DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
  410. fence, priv->gpu->submitted_fence);
  411. return -EINVAL;
  412. }
  413. if (!timeout) {
  414. /* no-wait: */
  415. ret = fence_completed(dev, fence) ? 0 : -EBUSY;
  416. } else {
  417. unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
  418. unsigned long start_jiffies = jiffies;
  419. unsigned long remaining_jiffies;
  420. if (time_after(start_jiffies, timeout_jiffies))
  421. remaining_jiffies = 0;
  422. else
  423. remaining_jiffies = timeout_jiffies - start_jiffies;
  424. ret = wait_event_interruptible_timeout(priv->fence_event,
  425. fence_completed(dev, fence),
  426. remaining_jiffies);
  427. if (ret == 0) {
  428. DBG("timeout waiting for fence: %u (completed: %u)",
  429. fence, priv->completed_fence);
  430. ret = -ETIMEDOUT;
  431. } else if (ret != -ERESTARTSYS) {
  432. ret = 0;
  433. }
  434. }
  435. return ret;
  436. }
  437. /* call under struct_mutex */
  438. void msm_update_fence(struct drm_device *dev, uint32_t fence)
  439. {
  440. struct msm_drm_private *priv = dev->dev_private;
  441. if (fence > priv->completed_fence) {
  442. priv->completed_fence = fence;
  443. wake_up_all(&priv->fence_event);
  444. }
  445. }
  446. /*
  447. * DRM ioctls:
  448. */
  449. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  450. struct drm_file *file)
  451. {
  452. struct msm_drm_private *priv = dev->dev_private;
  453. struct drm_msm_param *args = data;
  454. struct msm_gpu *gpu;
  455. /* for now, we just have 3d pipe.. eventually this would need to
  456. * be more clever to dispatch to appropriate gpu module:
  457. */
  458. if (args->pipe != MSM_PIPE_3D0)
  459. return -EINVAL;
  460. gpu = priv->gpu;
  461. if (!gpu)
  462. return -ENXIO;
  463. return gpu->funcs->get_param(gpu, args->param, &args->value);
  464. }
  465. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  466. struct drm_file *file)
  467. {
  468. struct drm_msm_gem_new *args = data;
  469. return msm_gem_new_handle(dev, file, args->size,
  470. args->flags, &args->handle);
  471. }
  472. #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
  473. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  474. struct drm_file *file)
  475. {
  476. struct drm_msm_gem_cpu_prep *args = data;
  477. struct drm_gem_object *obj;
  478. int ret;
  479. obj = drm_gem_object_lookup(dev, file, args->handle);
  480. if (!obj)
  481. return -ENOENT;
  482. ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  483. drm_gem_object_unreference_unlocked(obj);
  484. return ret;
  485. }
  486. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  487. struct drm_file *file)
  488. {
  489. struct drm_msm_gem_cpu_fini *args = data;
  490. struct drm_gem_object *obj;
  491. int ret;
  492. obj = drm_gem_object_lookup(dev, file, args->handle);
  493. if (!obj)
  494. return -ENOENT;
  495. ret = msm_gem_cpu_fini(obj);
  496. drm_gem_object_unreference_unlocked(obj);
  497. return ret;
  498. }
  499. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  500. struct drm_file *file)
  501. {
  502. struct drm_msm_gem_info *args = data;
  503. struct drm_gem_object *obj;
  504. int ret = 0;
  505. if (args->pad)
  506. return -EINVAL;
  507. obj = drm_gem_object_lookup(dev, file, args->handle);
  508. if (!obj)
  509. return -ENOENT;
  510. args->offset = msm_gem_mmap_offset(obj);
  511. drm_gem_object_unreference_unlocked(obj);
  512. return ret;
  513. }
  514. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  515. struct drm_file *file)
  516. {
  517. struct drm_msm_wait_fence *args = data;
  518. return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
  519. }
  520. static const struct drm_ioctl_desc msm_ioctls[] = {
  521. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
  522. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
  523. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
  524. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
  525. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
  526. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
  527. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
  528. };
  529. static const struct vm_operations_struct vm_ops = {
  530. .fault = msm_gem_fault,
  531. .open = drm_gem_vm_open,
  532. .close = drm_gem_vm_close,
  533. };
  534. static const struct file_operations fops = {
  535. .owner = THIS_MODULE,
  536. .open = drm_open,
  537. .release = drm_release,
  538. .unlocked_ioctl = drm_ioctl,
  539. #ifdef CONFIG_COMPAT
  540. .compat_ioctl = drm_compat_ioctl,
  541. #endif
  542. .poll = drm_poll,
  543. .read = drm_read,
  544. .llseek = no_llseek,
  545. .mmap = msm_gem_mmap,
  546. };
  547. static struct drm_driver msm_driver = {
  548. .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
  549. .load = msm_load,
  550. .unload = msm_unload,
  551. .open = msm_open,
  552. .preclose = msm_preclose,
  553. .lastclose = msm_lastclose,
  554. .irq_handler = msm_irq,
  555. .irq_preinstall = msm_irq_preinstall,
  556. .irq_postinstall = msm_irq_postinstall,
  557. .irq_uninstall = msm_irq_uninstall,
  558. .get_vblank_counter = drm_vblank_count,
  559. .enable_vblank = msm_enable_vblank,
  560. .disable_vblank = msm_disable_vblank,
  561. .gem_free_object = msm_gem_free_object,
  562. .gem_vm_ops = &vm_ops,
  563. .dumb_create = msm_gem_dumb_create,
  564. .dumb_map_offset = msm_gem_dumb_map_offset,
  565. .dumb_destroy = drm_gem_dumb_destroy,
  566. #ifdef CONFIG_DEBUG_FS
  567. .debugfs_init = msm_debugfs_init,
  568. .debugfs_cleanup = msm_debugfs_cleanup,
  569. #endif
  570. .ioctls = msm_ioctls,
  571. .num_ioctls = DRM_MSM_NUM_IOCTLS,
  572. .fops = &fops,
  573. .name = "msm",
  574. .desc = "MSM Snapdragon DRM",
  575. .date = "20130625",
  576. .major = 1,
  577. .minor = 0,
  578. };
  579. #ifdef CONFIG_PM_SLEEP
  580. static int msm_pm_suspend(struct device *dev)
  581. {
  582. struct drm_device *ddev = dev_get_drvdata(dev);
  583. drm_kms_helper_poll_disable(ddev);
  584. return 0;
  585. }
  586. static int msm_pm_resume(struct device *dev)
  587. {
  588. struct drm_device *ddev = dev_get_drvdata(dev);
  589. drm_kms_helper_poll_enable(ddev);
  590. return 0;
  591. }
  592. #endif
  593. static const struct dev_pm_ops msm_pm_ops = {
  594. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  595. };
  596. /*
  597. * Platform driver:
  598. */
  599. static int msm_pdev_probe(struct platform_device *pdev)
  600. {
  601. return drm_platform_init(&msm_driver, pdev);
  602. }
  603. static int msm_pdev_remove(struct platform_device *pdev)
  604. {
  605. drm_platform_exit(&msm_driver, pdev);
  606. return 0;
  607. }
  608. static const struct platform_device_id msm_id[] = {
  609. { "mdp", 0 },
  610. { }
  611. };
  612. static struct platform_driver msm_platform_driver = {
  613. .probe = msm_pdev_probe,
  614. .remove = msm_pdev_remove,
  615. .driver = {
  616. .owner = THIS_MODULE,
  617. .name = "msm",
  618. .pm = &msm_pm_ops,
  619. },
  620. .id_table = msm_id,
  621. };
  622. static int __init msm_drm_register(void)
  623. {
  624. DBG("init");
  625. hdmi_register();
  626. a3xx_register();
  627. return platform_driver_register(&msm_platform_driver);
  628. }
  629. static void __exit msm_drm_unregister(void)
  630. {
  631. DBG("fini");
  632. platform_driver_unregister(&msm_platform_driver);
  633. hdmi_unregister();
  634. a3xx_unregister();
  635. }
  636. module_init(msm_drm_register);
  637. module_exit(msm_drm_unregister);
  638. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  639. MODULE_DESCRIPTION("MSM DRM Driver");
  640. MODULE_LICENSE("GPL");