msm_drv.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_gpu.h"
  19. static void msm_fb_output_poll_changed(struct drm_device *dev)
  20. {
  21. struct msm_drm_private *priv = dev->dev_private;
  22. if (priv->fbdev)
  23. drm_fb_helper_hotplug_event(priv->fbdev);
  24. }
  25. static const struct drm_mode_config_funcs mode_config_funcs = {
  26. .fb_create = msm_framebuffer_create,
  27. .output_poll_changed = msm_fb_output_poll_changed,
  28. };
  29. static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
  30. unsigned long iova, int flags, void *arg)
  31. {
  32. DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
  33. return 0;
  34. }
  35. int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
  36. {
  37. struct msm_drm_private *priv = dev->dev_private;
  38. int idx = priv->num_iommus++;
  39. if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
  40. return -EINVAL;
  41. priv->iommus[idx] = iommu;
  42. iommu_set_fault_handler(iommu, msm_fault_handler, dev);
  43. /* need to iommu_attach_device() somewhere?? on resume?? */
  44. return idx;
  45. }
  46. int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
  47. const char **names, int cnt)
  48. {
  49. int i, ret;
  50. for (i = 0; i < cnt; i++) {
  51. /* TODO maybe some day msm iommu won't require this hack: */
  52. struct device *msm_iommu_get_ctx(const char *ctx_name);
  53. struct device *ctx = msm_iommu_get_ctx(names[i]);
  54. if (!ctx)
  55. continue;
  56. ret = iommu_attach_device(iommu, ctx);
  57. if (ret) {
  58. dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
  59. return ret;
  60. }
  61. }
  62. return 0;
  63. }
  64. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  65. static bool reglog = false;
  66. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  67. module_param(reglog, bool, 0600);
  68. #else
  69. #define reglog 0
  70. #endif
  71. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  72. const char *dbgname)
  73. {
  74. struct resource *res;
  75. unsigned long size;
  76. void __iomem *ptr;
  77. if (name)
  78. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  79. else
  80. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  81. if (!res) {
  82. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  83. return ERR_PTR(-EINVAL);
  84. }
  85. size = resource_size(res);
  86. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  87. if (!ptr) {
  88. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  89. return ERR_PTR(-ENOMEM);
  90. }
  91. if (reglog)
  92. printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
  93. return ptr;
  94. }
  95. void msm_writel(u32 data, void __iomem *addr)
  96. {
  97. if (reglog)
  98. printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
  99. writel(data, addr);
  100. }
  101. u32 msm_readl(const void __iomem *addr)
  102. {
  103. u32 val = readl(addr);
  104. if (reglog)
  105. printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
  106. return val;
  107. }
  108. /*
  109. * DRM operations:
  110. */
  111. static int msm_unload(struct drm_device *dev)
  112. {
  113. struct msm_drm_private *priv = dev->dev_private;
  114. struct msm_kms *kms = priv->kms;
  115. struct msm_gpu *gpu = priv->gpu;
  116. drm_kms_helper_poll_fini(dev);
  117. drm_mode_config_cleanup(dev);
  118. drm_vblank_cleanup(dev);
  119. pm_runtime_get_sync(dev->dev);
  120. drm_irq_uninstall(dev);
  121. pm_runtime_put_sync(dev->dev);
  122. flush_workqueue(priv->wq);
  123. destroy_workqueue(priv->wq);
  124. if (kms) {
  125. pm_runtime_disable(dev->dev);
  126. kms->funcs->destroy(kms);
  127. }
  128. if (gpu) {
  129. mutex_lock(&dev->struct_mutex);
  130. gpu->funcs->pm_suspend(gpu);
  131. gpu->funcs->destroy(gpu);
  132. mutex_unlock(&dev->struct_mutex);
  133. }
  134. dev->dev_private = NULL;
  135. kfree(priv);
  136. return 0;
  137. }
  138. static int msm_load(struct drm_device *dev, unsigned long flags)
  139. {
  140. struct platform_device *pdev = dev->platformdev;
  141. struct msm_drm_private *priv;
  142. struct msm_kms *kms;
  143. int ret;
  144. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  145. if (!priv) {
  146. dev_err(dev->dev, "failed to allocate private data\n");
  147. return -ENOMEM;
  148. }
  149. dev->dev_private = priv;
  150. priv->wq = alloc_ordered_workqueue("msm", 0);
  151. init_waitqueue_head(&priv->fence_event);
  152. INIT_LIST_HEAD(&priv->inactive_list);
  153. INIT_LIST_HEAD(&priv->fence_cbs);
  154. drm_mode_config_init(dev);
  155. kms = mdp4_kms_init(dev);
  156. if (IS_ERR(kms)) {
  157. /*
  158. * NOTE: once we have GPU support, having no kms should not
  159. * be considered fatal.. ideally we would still support gpu
  160. * and (for example) use dmabuf/prime to share buffers with
  161. * imx drm driver on iMX5
  162. */
  163. dev_err(dev->dev, "failed to load kms\n");
  164. ret = PTR_ERR(kms);
  165. goto fail;
  166. }
  167. priv->kms = kms;
  168. if (kms) {
  169. pm_runtime_enable(dev->dev);
  170. ret = kms->funcs->hw_init(kms);
  171. if (ret) {
  172. dev_err(dev->dev, "kms hw init failed: %d\n", ret);
  173. goto fail;
  174. }
  175. }
  176. dev->mode_config.min_width = 0;
  177. dev->mode_config.min_height = 0;
  178. dev->mode_config.max_width = 2048;
  179. dev->mode_config.max_height = 2048;
  180. dev->mode_config.funcs = &mode_config_funcs;
  181. ret = drm_vblank_init(dev, 1);
  182. if (ret < 0) {
  183. dev_err(dev->dev, "failed to initialize vblank\n");
  184. goto fail;
  185. }
  186. pm_runtime_get_sync(dev->dev);
  187. ret = drm_irq_install(dev);
  188. pm_runtime_put_sync(dev->dev);
  189. if (ret < 0) {
  190. dev_err(dev->dev, "failed to install IRQ handler\n");
  191. goto fail;
  192. }
  193. platform_set_drvdata(pdev, dev);
  194. #ifdef CONFIG_DRM_MSM_FBDEV
  195. priv->fbdev = msm_fbdev_init(dev);
  196. #endif
  197. drm_kms_helper_poll_init(dev);
  198. return 0;
  199. fail:
  200. msm_unload(dev);
  201. return ret;
  202. }
  203. static void load_gpu(struct drm_device *dev)
  204. {
  205. struct msm_drm_private *priv = dev->dev_private;
  206. struct msm_gpu *gpu;
  207. if (priv->gpu)
  208. return;
  209. mutex_lock(&dev->struct_mutex);
  210. gpu = a3xx_gpu_init(dev);
  211. if (IS_ERR(gpu)) {
  212. dev_warn(dev->dev, "failed to load a3xx gpu\n");
  213. gpu = NULL;
  214. /* not fatal */
  215. }
  216. mutex_unlock(&dev->struct_mutex);
  217. if (gpu) {
  218. int ret;
  219. gpu->funcs->pm_resume(gpu);
  220. ret = gpu->funcs->hw_init(gpu);
  221. if (ret) {
  222. dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
  223. gpu->funcs->destroy(gpu);
  224. gpu = NULL;
  225. }
  226. }
  227. priv->gpu = gpu;
  228. }
  229. static int msm_open(struct drm_device *dev, struct drm_file *file)
  230. {
  231. struct msm_file_private *ctx;
  232. /* For now, load gpu on open.. to avoid the requirement of having
  233. * firmware in the initrd.
  234. */
  235. load_gpu(dev);
  236. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  237. if (!ctx)
  238. return -ENOMEM;
  239. file->driver_priv = ctx;
  240. return 0;
  241. }
  242. static void msm_preclose(struct drm_device *dev, struct drm_file *file)
  243. {
  244. struct msm_drm_private *priv = dev->dev_private;
  245. struct msm_file_private *ctx = file->driver_priv;
  246. struct msm_kms *kms = priv->kms;
  247. if (kms)
  248. kms->funcs->preclose(kms, file);
  249. mutex_lock(&dev->struct_mutex);
  250. if (ctx == priv->lastctx)
  251. priv->lastctx = NULL;
  252. mutex_unlock(&dev->struct_mutex);
  253. kfree(ctx);
  254. }
  255. static void msm_lastclose(struct drm_device *dev)
  256. {
  257. struct msm_drm_private *priv = dev->dev_private;
  258. if (priv->fbdev) {
  259. drm_modeset_lock_all(dev);
  260. drm_fb_helper_restore_fbdev_mode(priv->fbdev);
  261. drm_modeset_unlock_all(dev);
  262. }
  263. }
  264. static irqreturn_t msm_irq(DRM_IRQ_ARGS)
  265. {
  266. struct drm_device *dev = arg;
  267. struct msm_drm_private *priv = dev->dev_private;
  268. struct msm_kms *kms = priv->kms;
  269. BUG_ON(!kms);
  270. return kms->funcs->irq(kms);
  271. }
  272. static void msm_irq_preinstall(struct drm_device *dev)
  273. {
  274. struct msm_drm_private *priv = dev->dev_private;
  275. struct msm_kms *kms = priv->kms;
  276. BUG_ON(!kms);
  277. kms->funcs->irq_preinstall(kms);
  278. }
  279. static int msm_irq_postinstall(struct drm_device *dev)
  280. {
  281. struct msm_drm_private *priv = dev->dev_private;
  282. struct msm_kms *kms = priv->kms;
  283. BUG_ON(!kms);
  284. return kms->funcs->irq_postinstall(kms);
  285. }
  286. static void msm_irq_uninstall(struct drm_device *dev)
  287. {
  288. struct msm_drm_private *priv = dev->dev_private;
  289. struct msm_kms *kms = priv->kms;
  290. BUG_ON(!kms);
  291. kms->funcs->irq_uninstall(kms);
  292. }
  293. static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
  294. {
  295. struct msm_drm_private *priv = dev->dev_private;
  296. struct msm_kms *kms = priv->kms;
  297. if (!kms)
  298. return -ENXIO;
  299. DBG("dev=%p, crtc=%d", dev, crtc_id);
  300. return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
  301. }
  302. static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
  303. {
  304. struct msm_drm_private *priv = dev->dev_private;
  305. struct msm_kms *kms = priv->kms;
  306. if (!kms)
  307. return;
  308. DBG("dev=%p, crtc=%d", dev, crtc_id);
  309. kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
  310. }
  311. /*
  312. * DRM debugfs:
  313. */
  314. #ifdef CONFIG_DEBUG_FS
  315. static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
  316. {
  317. struct msm_drm_private *priv = dev->dev_private;
  318. struct msm_gpu *gpu = priv->gpu;
  319. if (gpu) {
  320. seq_printf(m, "%s Status:\n", gpu->name);
  321. gpu->funcs->show(gpu, m);
  322. }
  323. return 0;
  324. }
  325. static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
  326. {
  327. struct msm_drm_private *priv = dev->dev_private;
  328. struct msm_gpu *gpu = priv->gpu;
  329. if (gpu) {
  330. seq_printf(m, "Active Objects (%s):\n", gpu->name);
  331. msm_gem_describe_objects(&gpu->active_list, m);
  332. }
  333. seq_printf(m, "Inactive Objects:\n");
  334. msm_gem_describe_objects(&priv->inactive_list, m);
  335. return 0;
  336. }
  337. static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
  338. {
  339. return drm_mm_dump_table(m, dev->mm_private);
  340. }
  341. static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
  342. {
  343. struct msm_drm_private *priv = dev->dev_private;
  344. struct drm_framebuffer *fb, *fbdev_fb = NULL;
  345. if (priv->fbdev) {
  346. seq_printf(m, "fbcon ");
  347. fbdev_fb = priv->fbdev->fb;
  348. msm_framebuffer_describe(fbdev_fb, m);
  349. }
  350. mutex_lock(&dev->mode_config.fb_lock);
  351. list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
  352. if (fb == fbdev_fb)
  353. continue;
  354. seq_printf(m, "user ");
  355. msm_framebuffer_describe(fb, m);
  356. }
  357. mutex_unlock(&dev->mode_config.fb_lock);
  358. return 0;
  359. }
  360. static int show_locked(struct seq_file *m, void *arg)
  361. {
  362. struct drm_info_node *node = (struct drm_info_node *) m->private;
  363. struct drm_device *dev = node->minor->dev;
  364. int (*show)(struct drm_device *dev, struct seq_file *m) =
  365. node->info_ent->data;
  366. int ret;
  367. ret = mutex_lock_interruptible(&dev->struct_mutex);
  368. if (ret)
  369. return ret;
  370. ret = show(dev, m);
  371. mutex_unlock(&dev->struct_mutex);
  372. return ret;
  373. }
  374. static struct drm_info_list msm_debugfs_list[] = {
  375. {"gpu", show_locked, 0, msm_gpu_show},
  376. {"gem", show_locked, 0, msm_gem_show},
  377. { "mm", show_locked, 0, msm_mm_show },
  378. { "fb", show_locked, 0, msm_fb_show },
  379. };
  380. static int msm_debugfs_init(struct drm_minor *minor)
  381. {
  382. struct drm_device *dev = minor->dev;
  383. int ret;
  384. ret = drm_debugfs_create_files(msm_debugfs_list,
  385. ARRAY_SIZE(msm_debugfs_list),
  386. minor->debugfs_root, minor);
  387. if (ret) {
  388. dev_err(dev->dev, "could not install msm_debugfs_list\n");
  389. return ret;
  390. }
  391. return ret;
  392. }
  393. static void msm_debugfs_cleanup(struct drm_minor *minor)
  394. {
  395. drm_debugfs_remove_files(msm_debugfs_list,
  396. ARRAY_SIZE(msm_debugfs_list), minor);
  397. }
  398. #endif
  399. /*
  400. * Fences:
  401. */
  402. int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
  403. struct timespec *timeout)
  404. {
  405. struct msm_drm_private *priv = dev->dev_private;
  406. int ret;
  407. if (!priv->gpu)
  408. return 0;
  409. if (fence > priv->gpu->submitted_fence) {
  410. DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
  411. fence, priv->gpu->submitted_fence);
  412. return -EINVAL;
  413. }
  414. if (!timeout) {
  415. /* no-wait: */
  416. ret = fence_completed(dev, fence) ? 0 : -EBUSY;
  417. } else {
  418. unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
  419. unsigned long start_jiffies = jiffies;
  420. unsigned long remaining_jiffies;
  421. if (time_after(start_jiffies, timeout_jiffies))
  422. remaining_jiffies = 0;
  423. else
  424. remaining_jiffies = timeout_jiffies - start_jiffies;
  425. ret = wait_event_interruptible_timeout(priv->fence_event,
  426. fence_completed(dev, fence),
  427. remaining_jiffies);
  428. if (ret == 0) {
  429. DBG("timeout waiting for fence: %u (completed: %u)",
  430. fence, priv->completed_fence);
  431. ret = -ETIMEDOUT;
  432. } else if (ret != -ERESTARTSYS) {
  433. ret = 0;
  434. }
  435. }
  436. return ret;
  437. }
  438. /* called from workqueue */
  439. void msm_update_fence(struct drm_device *dev, uint32_t fence)
  440. {
  441. struct msm_drm_private *priv = dev->dev_private;
  442. mutex_lock(&dev->struct_mutex);
  443. priv->completed_fence = max(fence, priv->completed_fence);
  444. while (!list_empty(&priv->fence_cbs)) {
  445. struct msm_fence_cb *cb;
  446. cb = list_first_entry(&priv->fence_cbs,
  447. struct msm_fence_cb, work.entry);
  448. if (cb->fence > priv->completed_fence)
  449. break;
  450. list_del_init(&cb->work.entry);
  451. queue_work(priv->wq, &cb->work);
  452. }
  453. mutex_unlock(&dev->struct_mutex);
  454. wake_up_all(&priv->fence_event);
  455. }
  456. void __msm_fence_worker(struct work_struct *work)
  457. {
  458. struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
  459. cb->func(cb);
  460. }
  461. /*
  462. * DRM ioctls:
  463. */
  464. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  465. struct drm_file *file)
  466. {
  467. struct msm_drm_private *priv = dev->dev_private;
  468. struct drm_msm_param *args = data;
  469. struct msm_gpu *gpu;
  470. /* for now, we just have 3d pipe.. eventually this would need to
  471. * be more clever to dispatch to appropriate gpu module:
  472. */
  473. if (args->pipe != MSM_PIPE_3D0)
  474. return -EINVAL;
  475. gpu = priv->gpu;
  476. if (!gpu)
  477. return -ENXIO;
  478. return gpu->funcs->get_param(gpu, args->param, &args->value);
  479. }
  480. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  481. struct drm_file *file)
  482. {
  483. struct drm_msm_gem_new *args = data;
  484. return msm_gem_new_handle(dev, file, args->size,
  485. args->flags, &args->handle);
  486. }
  487. #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
  488. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  489. struct drm_file *file)
  490. {
  491. struct drm_msm_gem_cpu_prep *args = data;
  492. struct drm_gem_object *obj;
  493. int ret;
  494. obj = drm_gem_object_lookup(dev, file, args->handle);
  495. if (!obj)
  496. return -ENOENT;
  497. ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  498. drm_gem_object_unreference_unlocked(obj);
  499. return ret;
  500. }
  501. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  502. struct drm_file *file)
  503. {
  504. struct drm_msm_gem_cpu_fini *args = data;
  505. struct drm_gem_object *obj;
  506. int ret;
  507. obj = drm_gem_object_lookup(dev, file, args->handle);
  508. if (!obj)
  509. return -ENOENT;
  510. ret = msm_gem_cpu_fini(obj);
  511. drm_gem_object_unreference_unlocked(obj);
  512. return ret;
  513. }
  514. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  515. struct drm_file *file)
  516. {
  517. struct drm_msm_gem_info *args = data;
  518. struct drm_gem_object *obj;
  519. int ret = 0;
  520. if (args->pad)
  521. return -EINVAL;
  522. obj = drm_gem_object_lookup(dev, file, args->handle);
  523. if (!obj)
  524. return -ENOENT;
  525. args->offset = msm_gem_mmap_offset(obj);
  526. drm_gem_object_unreference_unlocked(obj);
  527. return ret;
  528. }
  529. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  530. struct drm_file *file)
  531. {
  532. struct drm_msm_wait_fence *args = data;
  533. return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
  534. }
  535. static const struct drm_ioctl_desc msm_ioctls[] = {
  536. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  537. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  538. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  539. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  540. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  541. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  542. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  543. };
  544. static const struct vm_operations_struct vm_ops = {
  545. .fault = msm_gem_fault,
  546. .open = drm_gem_vm_open,
  547. .close = drm_gem_vm_close,
  548. };
  549. static const struct file_operations fops = {
  550. .owner = THIS_MODULE,
  551. .open = drm_open,
  552. .release = drm_release,
  553. .unlocked_ioctl = drm_ioctl,
  554. #ifdef CONFIG_COMPAT
  555. .compat_ioctl = drm_compat_ioctl,
  556. #endif
  557. .poll = drm_poll,
  558. .read = drm_read,
  559. .llseek = no_llseek,
  560. .mmap = msm_gem_mmap,
  561. };
  562. static struct drm_driver msm_driver = {
  563. .driver_features = DRIVER_HAVE_IRQ |
  564. DRIVER_GEM |
  565. DRIVER_PRIME |
  566. DRIVER_RENDER |
  567. DRIVER_MODESET,
  568. .load = msm_load,
  569. .unload = msm_unload,
  570. .open = msm_open,
  571. .preclose = msm_preclose,
  572. .lastclose = msm_lastclose,
  573. .irq_handler = msm_irq,
  574. .irq_preinstall = msm_irq_preinstall,
  575. .irq_postinstall = msm_irq_postinstall,
  576. .irq_uninstall = msm_irq_uninstall,
  577. .get_vblank_counter = drm_vblank_count,
  578. .enable_vblank = msm_enable_vblank,
  579. .disable_vblank = msm_disable_vblank,
  580. .gem_free_object = msm_gem_free_object,
  581. .gem_vm_ops = &vm_ops,
  582. .dumb_create = msm_gem_dumb_create,
  583. .dumb_map_offset = msm_gem_dumb_map_offset,
  584. .dumb_destroy = drm_gem_dumb_destroy,
  585. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  586. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  587. .gem_prime_export = drm_gem_prime_export,
  588. .gem_prime_import = drm_gem_prime_import,
  589. .gem_prime_pin = msm_gem_prime_pin,
  590. .gem_prime_unpin = msm_gem_prime_unpin,
  591. .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
  592. .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
  593. .gem_prime_vmap = msm_gem_prime_vmap,
  594. .gem_prime_vunmap = msm_gem_prime_vunmap,
  595. #ifdef CONFIG_DEBUG_FS
  596. .debugfs_init = msm_debugfs_init,
  597. .debugfs_cleanup = msm_debugfs_cleanup,
  598. #endif
  599. .ioctls = msm_ioctls,
  600. .num_ioctls = DRM_MSM_NUM_IOCTLS,
  601. .fops = &fops,
  602. .name = "msm",
  603. .desc = "MSM Snapdragon DRM",
  604. .date = "20130625",
  605. .major = 1,
  606. .minor = 0,
  607. };
  608. #ifdef CONFIG_PM_SLEEP
  609. static int msm_pm_suspend(struct device *dev)
  610. {
  611. struct drm_device *ddev = dev_get_drvdata(dev);
  612. drm_kms_helper_poll_disable(ddev);
  613. return 0;
  614. }
  615. static int msm_pm_resume(struct device *dev)
  616. {
  617. struct drm_device *ddev = dev_get_drvdata(dev);
  618. drm_kms_helper_poll_enable(ddev);
  619. return 0;
  620. }
  621. #endif
  622. static const struct dev_pm_ops msm_pm_ops = {
  623. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  624. };
  625. /*
  626. * Platform driver:
  627. */
  628. static int msm_pdev_probe(struct platform_device *pdev)
  629. {
  630. return drm_platform_init(&msm_driver, pdev);
  631. }
  632. static int msm_pdev_remove(struct platform_device *pdev)
  633. {
  634. drm_platform_exit(&msm_driver, pdev);
  635. return 0;
  636. }
  637. static const struct platform_device_id msm_id[] = {
  638. { "mdp", 0 },
  639. { }
  640. };
  641. static struct platform_driver msm_platform_driver = {
  642. .probe = msm_pdev_probe,
  643. .remove = msm_pdev_remove,
  644. .driver = {
  645. .owner = THIS_MODULE,
  646. .name = "msm",
  647. .pm = &msm_pm_ops,
  648. },
  649. .id_table = msm_id,
  650. };
  651. static int __init msm_drm_register(void)
  652. {
  653. DBG("init");
  654. hdmi_register();
  655. a3xx_register();
  656. return platform_driver_register(&msm_platform_driver);
  657. }
  658. static void __exit msm_drm_unregister(void)
  659. {
  660. DBG("fini");
  661. platform_driver_unregister(&msm_platform_driver);
  662. hdmi_unregister();
  663. a3xx_unregister();
  664. }
  665. module_init(msm_drm_register);
  666. module_exit(msm_drm_unregister);
  667. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  668. MODULE_DESCRIPTION("MSM DRM Driver");
  669. MODULE_LICENSE("GPL");