drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/mm.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/pagemap.h>
  36. #include <linux/shmem_fs.h>
  37. #include <linux/dma-buf.h>
  38. #include <drm/drmP.h>
  39. /** @file drm_gem.c
  40. *
  41. * This file provides some of the base ioctls and library routines for
  42. * the graphics memory manager implemented by each device driver.
  43. *
  44. * Because various devices have different requirements in terms of
  45. * synchronization and migration strategies, implementing that is left up to
  46. * the driver, and all that the general API provides should be generic --
  47. * allocating objects, reading/writing data with the cpu, freeing objects.
  48. * Even there, platform-dependent optimizations for reading/writing data with
  49. * the CPU mean we'll likely hook those out to driver-specific calls. However,
  50. * the DRI2 implementation wants to have at least allocate/mmap be generic.
  51. *
  52. * The goal was to have swap-backed object allocation managed through
  53. * struct file. However, file descriptors as handles to a struct file have
  54. * two major failings:
  55. * - Process limits prevent more than 1024 or so being used at a time by
  56. * default.
  57. * - Inability to allocate high fds will aggravate the X Server's select()
  58. * handling, and likely that of many GL client applications as well.
  59. *
  60. * This led to a plan of using our own integer IDs (called handles, following
  61. * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  62. * ioctls. The objects themselves will still include the struct file so
  63. * that we can transition to fds if the required kernel infrastructure shows
  64. * up at a later date, and as our interface with shmfs for memory allocation.
  65. */
  66. /*
  67. * We make up offsets for buffer objects so we can recognize them at
  68. * mmap time.
  69. */
  70. /* pgoff in mmap is an unsigned long, so we need to make sure that
  71. * the faked up offset will fit
  72. */
  73. #if BITS_PER_LONG == 64
  74. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  75. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  76. #else
  77. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  78. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  79. #endif
  80. /**
  81. * Initialize the GEM device fields
  82. */
  83. int
  84. drm_gem_init(struct drm_device *dev)
  85. {
  86. struct drm_gem_mm *mm;
  87. spin_lock_init(&dev->object_name_lock);
  88. idr_init(&dev->object_name_idr);
  89. mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
  90. if (!mm) {
  91. DRM_ERROR("out of memory\n");
  92. return -ENOMEM;
  93. }
  94. dev->mm_private = mm;
  95. if (drm_ht_create(&mm->offset_hash, 12)) {
  96. kfree(mm);
  97. return -ENOMEM;
  98. }
  99. if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
  100. DRM_FILE_PAGE_OFFSET_SIZE)) {
  101. drm_ht_remove(&mm->offset_hash);
  102. kfree(mm);
  103. return -ENOMEM;
  104. }
  105. return 0;
  106. }
  107. void
  108. drm_gem_destroy(struct drm_device *dev)
  109. {
  110. struct drm_gem_mm *mm = dev->mm_private;
  111. drm_mm_takedown(&mm->offset_manager);
  112. drm_ht_remove(&mm->offset_hash);
  113. kfree(mm);
  114. dev->mm_private = NULL;
  115. }
  116. /**
  117. * Initialize an already allocated GEM object of the specified size with
  118. * shmfs backing store.
  119. */
  120. int drm_gem_object_init(struct drm_device *dev,
  121. struct drm_gem_object *obj, size_t size)
  122. {
  123. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  124. obj->dev = dev;
  125. obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  126. if (IS_ERR(obj->filp))
  127. return PTR_ERR(obj->filp);
  128. kref_init(&obj->refcount);
  129. atomic_set(&obj->handle_count, 0);
  130. obj->size = size;
  131. return 0;
  132. }
  133. EXPORT_SYMBOL(drm_gem_object_init);
  134. /**
  135. * Initialize an already allocated GEM object of the specified size with
  136. * no GEM provided backing store. Instead the caller is responsible for
  137. * backing the object and handling it.
  138. */
  139. int drm_gem_private_object_init(struct drm_device *dev,
  140. struct drm_gem_object *obj, size_t size)
  141. {
  142. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  143. obj->dev = dev;
  144. obj->filp = NULL;
  145. kref_init(&obj->refcount);
  146. atomic_set(&obj->handle_count, 0);
  147. obj->size = size;
  148. return 0;
  149. }
  150. EXPORT_SYMBOL(drm_gem_private_object_init);
  151. /**
  152. * Allocate a GEM object of the specified size with shmfs backing store
  153. */
  154. struct drm_gem_object *
  155. drm_gem_object_alloc(struct drm_device *dev, size_t size)
  156. {
  157. struct drm_gem_object *obj;
  158. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  159. if (!obj)
  160. goto free;
  161. if (drm_gem_object_init(dev, obj, size) != 0)
  162. goto free;
  163. if (dev->driver->gem_init_object != NULL &&
  164. dev->driver->gem_init_object(obj) != 0) {
  165. goto fput;
  166. }
  167. return obj;
  168. fput:
  169. /* Object_init mangles the global counters - readjust them. */
  170. fput(obj->filp);
  171. free:
  172. kfree(obj);
  173. return NULL;
  174. }
  175. EXPORT_SYMBOL(drm_gem_object_alloc);
  176. static void
  177. drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
  178. {
  179. if (obj->import_attach) {
  180. drm_prime_remove_imported_buf_handle(&filp->prime,
  181. obj->import_attach->dmabuf);
  182. }
  183. if (obj->export_dma_buf) {
  184. drm_prime_remove_imported_buf_handle(&filp->prime,
  185. obj->export_dma_buf);
  186. }
  187. }
  188. /**
  189. * Removes the mapping from handle to filp for this object.
  190. */
  191. int
  192. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  193. {
  194. struct drm_device *dev;
  195. struct drm_gem_object *obj;
  196. /* This is gross. The idr system doesn't let us try a delete and
  197. * return an error code. It just spews if you fail at deleting.
  198. * So, we have to grab a lock around finding the object and then
  199. * doing the delete on it and dropping the refcount, or the user
  200. * could race us to double-decrement the refcount and cause a
  201. * use-after-free later. Given the frequency of our handle lookups,
  202. * we may want to use ida for number allocation and a hash table
  203. * for the pointers, anyway.
  204. */
  205. spin_lock(&filp->table_lock);
  206. /* Check if we currently have a reference on the object */
  207. obj = idr_find(&filp->object_idr, handle);
  208. if (obj == NULL) {
  209. spin_unlock(&filp->table_lock);
  210. return -EINVAL;
  211. }
  212. dev = obj->dev;
  213. /* Release reference and decrement refcount. */
  214. idr_remove(&filp->object_idr, handle);
  215. spin_unlock(&filp->table_lock);
  216. drm_gem_remove_prime_handles(obj, filp);
  217. if (dev->driver->gem_close_object)
  218. dev->driver->gem_close_object(obj, filp);
  219. drm_gem_object_handle_unreference_unlocked(obj);
  220. return 0;
  221. }
  222. EXPORT_SYMBOL(drm_gem_handle_delete);
  223. /**
  224. * Create a handle for this object. This adds a handle reference
  225. * to the object, which includes a regular reference count. Callers
  226. * will likely want to dereference the object afterwards.
  227. */
  228. int
  229. drm_gem_handle_create(struct drm_file *file_priv,
  230. struct drm_gem_object *obj,
  231. u32 *handlep)
  232. {
  233. struct drm_device *dev = obj->dev;
  234. int ret;
  235. /*
  236. * Get the user-visible handle using idr. Preload and perform
  237. * allocation under our spinlock.
  238. */
  239. idr_preload(GFP_KERNEL);
  240. spin_lock(&file_priv->table_lock);
  241. ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  242. spin_unlock(&file_priv->table_lock);
  243. idr_preload_end();
  244. if (ret < 0)
  245. return ret;
  246. *handlep = ret;
  247. drm_gem_object_handle_reference(obj);
  248. if (dev->driver->gem_open_object) {
  249. ret = dev->driver->gem_open_object(obj, file_priv);
  250. if (ret) {
  251. drm_gem_handle_delete(file_priv, *handlep);
  252. return ret;
  253. }
  254. }
  255. return 0;
  256. }
  257. EXPORT_SYMBOL(drm_gem_handle_create);
  258. /**
  259. * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  260. * @obj: obj in question
  261. *
  262. * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  263. */
  264. void
  265. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  266. {
  267. struct drm_device *dev = obj->dev;
  268. struct drm_gem_mm *mm = dev->mm_private;
  269. struct drm_map_list *list = &obj->map_list;
  270. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  271. drm_mm_put_block(list->file_offset_node);
  272. kfree(list->map);
  273. list->map = NULL;
  274. }
  275. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  276. /**
  277. * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  278. * @obj: obj in question
  279. *
  280. * GEM memory mapping works by handing back to userspace a fake mmap offset
  281. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  282. * up the object based on the offset and sets up the various memory mapping
  283. * structures.
  284. *
  285. * This routine allocates and attaches a fake offset for @obj.
  286. */
  287. int
  288. drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  289. {
  290. struct drm_device *dev = obj->dev;
  291. struct drm_gem_mm *mm = dev->mm_private;
  292. struct drm_map_list *list;
  293. struct drm_local_map *map;
  294. int ret;
  295. /* Set the object up for mmap'ing */
  296. list = &obj->map_list;
  297. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  298. if (!list->map)
  299. return -ENOMEM;
  300. map = list->map;
  301. map->type = _DRM_GEM;
  302. map->size = obj->size;
  303. map->handle = obj;
  304. /* Get a DRM GEM mmap offset allocated... */
  305. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  306. obj->size / PAGE_SIZE, 0, false);
  307. if (!list->file_offset_node) {
  308. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  309. ret = -ENOSPC;
  310. goto out_free_list;
  311. }
  312. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  313. obj->size / PAGE_SIZE, 0);
  314. if (!list->file_offset_node) {
  315. ret = -ENOMEM;
  316. goto out_free_list;
  317. }
  318. list->hash.key = list->file_offset_node->start;
  319. ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  320. if (ret) {
  321. DRM_ERROR("failed to add to map hash\n");
  322. goto out_free_mm;
  323. }
  324. return 0;
  325. out_free_mm:
  326. drm_mm_put_block(list->file_offset_node);
  327. out_free_list:
  328. kfree(list->map);
  329. list->map = NULL;
  330. return ret;
  331. }
  332. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  333. /** Returns a reference to the object named by the handle. */
  334. struct drm_gem_object *
  335. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  336. u32 handle)
  337. {
  338. struct drm_gem_object *obj;
  339. spin_lock(&filp->table_lock);
  340. /* Check if we currently have a reference on the object */
  341. obj = idr_find(&filp->object_idr, handle);
  342. if (obj == NULL) {
  343. spin_unlock(&filp->table_lock);
  344. return NULL;
  345. }
  346. drm_gem_object_reference(obj);
  347. spin_unlock(&filp->table_lock);
  348. return obj;
  349. }
  350. EXPORT_SYMBOL(drm_gem_object_lookup);
  351. /**
  352. * Releases the handle to an mm object.
  353. */
  354. int
  355. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  356. struct drm_file *file_priv)
  357. {
  358. struct drm_gem_close *args = data;
  359. int ret;
  360. if (!(dev->driver->driver_features & DRIVER_GEM))
  361. return -ENODEV;
  362. ret = drm_gem_handle_delete(file_priv, args->handle);
  363. return ret;
  364. }
  365. /**
  366. * Create a global name for an object, returning the name.
  367. *
  368. * Note that the name does not hold a reference; when the object
  369. * is freed, the name goes away.
  370. */
  371. int
  372. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  373. struct drm_file *file_priv)
  374. {
  375. struct drm_gem_flink *args = data;
  376. struct drm_gem_object *obj;
  377. int ret;
  378. if (!(dev->driver->driver_features & DRIVER_GEM))
  379. return -ENODEV;
  380. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  381. if (obj == NULL)
  382. return -ENOENT;
  383. idr_preload(GFP_KERNEL);
  384. spin_lock(&dev->object_name_lock);
  385. if (!obj->name) {
  386. ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  387. obj->name = ret;
  388. args->name = (uint64_t) obj->name;
  389. spin_unlock(&dev->object_name_lock);
  390. idr_preload_end();
  391. if (ret < 0)
  392. goto err;
  393. ret = 0;
  394. /* Allocate a reference for the name table. */
  395. drm_gem_object_reference(obj);
  396. } else {
  397. args->name = (uint64_t) obj->name;
  398. spin_unlock(&dev->object_name_lock);
  399. idr_preload_end();
  400. ret = 0;
  401. }
  402. err:
  403. drm_gem_object_unreference_unlocked(obj);
  404. return ret;
  405. }
  406. /**
  407. * Open an object using the global name, returning a handle and the size.
  408. *
  409. * This handle (of course) holds a reference to the object, so the object
  410. * will not go away until the handle is deleted.
  411. */
  412. int
  413. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  414. struct drm_file *file_priv)
  415. {
  416. struct drm_gem_open *args = data;
  417. struct drm_gem_object *obj;
  418. int ret;
  419. u32 handle;
  420. if (!(dev->driver->driver_features & DRIVER_GEM))
  421. return -ENODEV;
  422. spin_lock(&dev->object_name_lock);
  423. obj = idr_find(&dev->object_name_idr, (int) args->name);
  424. if (obj)
  425. drm_gem_object_reference(obj);
  426. spin_unlock(&dev->object_name_lock);
  427. if (!obj)
  428. return -ENOENT;
  429. ret = drm_gem_handle_create(file_priv, obj, &handle);
  430. drm_gem_object_unreference_unlocked(obj);
  431. if (ret)
  432. return ret;
  433. args->handle = handle;
  434. args->size = obj->size;
  435. return 0;
  436. }
  437. /**
  438. * Called at device open time, sets up the structure for handling refcounting
  439. * of mm objects.
  440. */
  441. void
  442. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  443. {
  444. idr_init(&file_private->object_idr);
  445. spin_lock_init(&file_private->table_lock);
  446. }
  447. /**
  448. * Called at device close to release the file's
  449. * handle references on objects.
  450. */
  451. static int
  452. drm_gem_object_release_handle(int id, void *ptr, void *data)
  453. {
  454. struct drm_file *file_priv = data;
  455. struct drm_gem_object *obj = ptr;
  456. struct drm_device *dev = obj->dev;
  457. drm_gem_remove_prime_handles(obj, file_priv);
  458. if (dev->driver->gem_close_object)
  459. dev->driver->gem_close_object(obj, file_priv);
  460. drm_gem_object_handle_unreference_unlocked(obj);
  461. return 0;
  462. }
  463. /**
  464. * Called at close time when the filp is going away.
  465. *
  466. * Releases any remaining references on objects by this filp.
  467. */
  468. void
  469. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  470. {
  471. idr_for_each(&file_private->object_idr,
  472. &drm_gem_object_release_handle, file_private);
  473. idr_destroy(&file_private->object_idr);
  474. }
  475. void
  476. drm_gem_object_release(struct drm_gem_object *obj)
  477. {
  478. if (obj->filp)
  479. fput(obj->filp);
  480. }
  481. EXPORT_SYMBOL(drm_gem_object_release);
  482. /**
  483. * Called after the last reference to the object has been lost.
  484. * Must be called holding struct_ mutex
  485. *
  486. * Frees the object
  487. */
  488. void
  489. drm_gem_object_free(struct kref *kref)
  490. {
  491. struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  492. struct drm_device *dev = obj->dev;
  493. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  494. if (dev->driver->gem_free_object != NULL)
  495. dev->driver->gem_free_object(obj);
  496. }
  497. EXPORT_SYMBOL(drm_gem_object_free);
  498. static void drm_gem_object_ref_bug(struct kref *list_kref)
  499. {
  500. BUG();
  501. }
  502. /**
  503. * Called after the last handle to the object has been closed
  504. *
  505. * Removes any name for the object. Note that this must be
  506. * called before drm_gem_object_free or we'll be touching
  507. * freed memory
  508. */
  509. void drm_gem_object_handle_free(struct drm_gem_object *obj)
  510. {
  511. struct drm_device *dev = obj->dev;
  512. /* Remove any name for this object */
  513. spin_lock(&dev->object_name_lock);
  514. if (obj->name) {
  515. idr_remove(&dev->object_name_idr, obj->name);
  516. obj->name = 0;
  517. spin_unlock(&dev->object_name_lock);
  518. /*
  519. * The object name held a reference to this object, drop
  520. * that now.
  521. *
  522. * This cannot be the last reference, since the handle holds one too.
  523. */
  524. kref_put(&obj->refcount, drm_gem_object_ref_bug);
  525. } else
  526. spin_unlock(&dev->object_name_lock);
  527. }
  528. EXPORT_SYMBOL(drm_gem_object_handle_free);
  529. void drm_gem_vm_open(struct vm_area_struct *vma)
  530. {
  531. struct drm_gem_object *obj = vma->vm_private_data;
  532. drm_gem_object_reference(obj);
  533. mutex_lock(&obj->dev->struct_mutex);
  534. drm_vm_open_locked(obj->dev, vma);
  535. mutex_unlock(&obj->dev->struct_mutex);
  536. }
  537. EXPORT_SYMBOL(drm_gem_vm_open);
  538. void drm_gem_vm_close(struct vm_area_struct *vma)
  539. {
  540. struct drm_gem_object *obj = vma->vm_private_data;
  541. struct drm_device *dev = obj->dev;
  542. mutex_lock(&dev->struct_mutex);
  543. drm_vm_close_locked(obj->dev, vma);
  544. drm_gem_object_unreference(obj);
  545. mutex_unlock(&dev->struct_mutex);
  546. }
  547. EXPORT_SYMBOL(drm_gem_vm_close);
  548. /**
  549. * drm_gem_mmap - memory map routine for GEM objects
  550. * @filp: DRM file pointer
  551. * @vma: VMA for the area to be mapped
  552. *
  553. * If a driver supports GEM object mapping, mmap calls on the DRM file
  554. * descriptor will end up here.
  555. *
  556. * If we find the object based on the offset passed in (vma->vm_pgoff will
  557. * contain the fake offset we created when the GTT map ioctl was called on
  558. * the object), we set up the driver fault handler so that any accesses
  559. * to the object can be trapped, to perform migration, GTT binding, surface
  560. * register allocation, or performance monitoring.
  561. */
  562. int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  563. {
  564. struct drm_file *priv = filp->private_data;
  565. struct drm_device *dev = priv->minor->dev;
  566. struct drm_gem_mm *mm = dev->mm_private;
  567. struct drm_local_map *map = NULL;
  568. struct drm_gem_object *obj;
  569. struct drm_hash_item *hash;
  570. int ret = 0;
  571. if (drm_device_is_unplugged(dev))
  572. return -ENODEV;
  573. mutex_lock(&dev->struct_mutex);
  574. if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
  575. mutex_unlock(&dev->struct_mutex);
  576. return drm_mmap(filp, vma);
  577. }
  578. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  579. if (!map ||
  580. ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
  581. ret = -EPERM;
  582. goto out_unlock;
  583. }
  584. /* Check for valid size. */
  585. if (map->size < vma->vm_end - vma->vm_start) {
  586. ret = -EINVAL;
  587. goto out_unlock;
  588. }
  589. obj = map->handle;
  590. if (!obj->dev->driver->gem_vm_ops) {
  591. ret = -EINVAL;
  592. goto out_unlock;
  593. }
  594. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  595. vma->vm_ops = obj->dev->driver->gem_vm_ops;
  596. vma->vm_private_data = map->handle;
  597. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  598. /* Take a ref for this mapping of the object, so that the fault
  599. * handler can dereference the mmap offset's pointer to the object.
  600. * This reference is cleaned up by the corresponding vm_close
  601. * (which should happen whether the vma was created by this call, or
  602. * by a vm_open due to mremap or partial unmap or whatever).
  603. */
  604. drm_gem_object_reference(obj);
  605. drm_vm_open_locked(dev, vma);
  606. out_unlock:
  607. mutex_unlock(&dev->struct_mutex);
  608. return ret;
  609. }
  610. EXPORT_SYMBOL(drm_gem_mmap);