drm_gem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/mm.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/pagemap.h>
  36. #include <linux/shmem_fs.h>
  37. #include <linux/dma-buf.h>
  38. #include <drm/drmP.h>
  39. /** @file drm_gem.c
  40. *
  41. * This file provides some of the base ioctls and library routines for
  42. * the graphics memory manager implemented by each device driver.
  43. *
  44. * Because various devices have different requirements in terms of
  45. * synchronization and migration strategies, implementing that is left up to
  46. * the driver, and all that the general API provides should be generic --
  47. * allocating objects, reading/writing data with the cpu, freeing objects.
  48. * Even there, platform-dependent optimizations for reading/writing data with
  49. * the CPU mean we'll likely hook those out to driver-specific calls. However,
  50. * the DRI2 implementation wants to have at least allocate/mmap be generic.
  51. *
  52. * The goal was to have swap-backed object allocation managed through
  53. * struct file. However, file descriptors as handles to a struct file have
  54. * two major failings:
  55. * - Process limits prevent more than 1024 or so being used at a time by
  56. * default.
  57. * - Inability to allocate high fds will aggravate the X Server's select()
  58. * handling, and likely that of many GL client applications as well.
  59. *
  60. * This led to a plan of using our own integer IDs (called handles, following
  61. * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  62. * ioctls. The objects themselves will still include the struct file so
  63. * that we can transition to fds if the required kernel infrastructure shows
  64. * up at a later date, and as our interface with shmfs for memory allocation.
  65. */
  66. /*
  67. * We make up offsets for buffer objects so we can recognize them at
  68. * mmap time.
  69. */
  70. /* pgoff in mmap is an unsigned long, so we need to make sure that
  71. * the faked up offset will fit
  72. */
  73. #if BITS_PER_LONG == 64
  74. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  75. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  76. #else
  77. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  78. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  79. #endif
  80. /**
  81. * Initialize the GEM device fields
  82. */
  83. int
  84. drm_gem_init(struct drm_device *dev)
  85. {
  86. struct drm_gem_mm *mm;
  87. spin_lock_init(&dev->object_name_lock);
  88. idr_init(&dev->object_name_idr);
  89. mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
  90. if (!mm) {
  91. DRM_ERROR("out of memory\n");
  92. return -ENOMEM;
  93. }
  94. dev->mm_private = mm;
  95. if (drm_ht_create(&mm->offset_hash, 12)) {
  96. kfree(mm);
  97. return -ENOMEM;
  98. }
  99. if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
  100. DRM_FILE_PAGE_OFFSET_SIZE)) {
  101. drm_ht_remove(&mm->offset_hash);
  102. kfree(mm);
  103. return -ENOMEM;
  104. }
  105. return 0;
  106. }
  107. void
  108. drm_gem_destroy(struct drm_device *dev)
  109. {
  110. struct drm_gem_mm *mm = dev->mm_private;
  111. drm_mm_takedown(&mm->offset_manager);
  112. drm_ht_remove(&mm->offset_hash);
  113. kfree(mm);
  114. dev->mm_private = NULL;
  115. }
  116. /**
  117. * Initialize an already allocated GEM object of the specified size with
  118. * shmfs backing store.
  119. */
  120. int drm_gem_object_init(struct drm_device *dev,
  121. struct drm_gem_object *obj, size_t size)
  122. {
  123. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  124. obj->dev = dev;
  125. obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  126. if (IS_ERR(obj->filp))
  127. return PTR_ERR(obj->filp);
  128. kref_init(&obj->refcount);
  129. atomic_set(&obj->handle_count, 0);
  130. obj->size = size;
  131. return 0;
  132. }
  133. EXPORT_SYMBOL(drm_gem_object_init);
  134. /**
  135. * Initialize an already allocated GEM object of the specified size with
  136. * no GEM provided backing store. Instead the caller is responsible for
  137. * backing the object and handling it.
  138. */
  139. int drm_gem_private_object_init(struct drm_device *dev,
  140. struct drm_gem_object *obj, size_t size)
  141. {
  142. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  143. obj->dev = dev;
  144. obj->filp = NULL;
  145. kref_init(&obj->refcount);
  146. atomic_set(&obj->handle_count, 0);
  147. obj->size = size;
  148. return 0;
  149. }
  150. EXPORT_SYMBOL(drm_gem_private_object_init);
  151. /**
  152. * Allocate a GEM object of the specified size with shmfs backing store
  153. */
  154. struct drm_gem_object *
  155. drm_gem_object_alloc(struct drm_device *dev, size_t size)
  156. {
  157. struct drm_gem_object *obj;
  158. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  159. if (!obj)
  160. goto free;
  161. if (drm_gem_object_init(dev, obj, size) != 0)
  162. goto free;
  163. if (dev->driver->gem_init_object != NULL &&
  164. dev->driver->gem_init_object(obj) != 0) {
  165. goto fput;
  166. }
  167. return obj;
  168. fput:
  169. /* Object_init mangles the global counters - readjust them. */
  170. fput(obj->filp);
  171. free:
  172. kfree(obj);
  173. return NULL;
  174. }
  175. EXPORT_SYMBOL(drm_gem_object_alloc);
  176. static void
  177. drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
  178. {
  179. if (obj->import_attach) {
  180. drm_prime_remove_imported_buf_handle(&filp->prime,
  181. obj->import_attach->dmabuf);
  182. }
  183. if (obj->export_dma_buf) {
  184. drm_prime_remove_imported_buf_handle(&filp->prime,
  185. obj->export_dma_buf);
  186. }
  187. }
  188. /**
  189. * Removes the mapping from handle to filp for this object.
  190. */
  191. int
  192. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  193. {
  194. struct drm_device *dev;
  195. struct drm_gem_object *obj;
  196. /* This is gross. The idr system doesn't let us try a delete and
  197. * return an error code. It just spews if you fail at deleting.
  198. * So, we have to grab a lock around finding the object and then
  199. * doing the delete on it and dropping the refcount, or the user
  200. * could race us to double-decrement the refcount and cause a
  201. * use-after-free later. Given the frequency of our handle lookups,
  202. * we may want to use ida for number allocation and a hash table
  203. * for the pointers, anyway.
  204. */
  205. spin_lock(&filp->table_lock);
  206. /* Check if we currently have a reference on the object */
  207. obj = idr_find(&filp->object_idr, handle);
  208. if (obj == NULL) {
  209. spin_unlock(&filp->table_lock);
  210. return -EINVAL;
  211. }
  212. dev = obj->dev;
  213. /* Release reference and decrement refcount. */
  214. idr_remove(&filp->object_idr, handle);
  215. spin_unlock(&filp->table_lock);
  216. drm_gem_remove_prime_handles(obj, filp);
  217. if (dev->driver->gem_close_object)
  218. dev->driver->gem_close_object(obj, filp);
  219. drm_gem_object_handle_unreference_unlocked(obj);
  220. return 0;
  221. }
  222. EXPORT_SYMBOL(drm_gem_handle_delete);
  223. /**
  224. * Create a handle for this object. This adds a handle reference
  225. * to the object, which includes a regular reference count. Callers
  226. * will likely want to dereference the object afterwards.
  227. */
  228. int
  229. drm_gem_handle_create(struct drm_file *file_priv,
  230. struct drm_gem_object *obj,
  231. u32 *handlep)
  232. {
  233. struct drm_device *dev = obj->dev;
  234. int ret;
  235. /*
  236. * Get the user-visible handle using idr.
  237. */
  238. again:
  239. /* ensure there is space available to allocate a handle */
  240. if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
  241. return -ENOMEM;
  242. /* do the allocation under our spinlock */
  243. spin_lock(&file_priv->table_lock);
  244. ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
  245. spin_unlock(&file_priv->table_lock);
  246. if (ret == -EAGAIN)
  247. goto again;
  248. else if (ret)
  249. return ret;
  250. drm_gem_object_handle_reference(obj);
  251. if (dev->driver->gem_open_object) {
  252. ret = dev->driver->gem_open_object(obj, file_priv);
  253. if (ret) {
  254. drm_gem_handle_delete(file_priv, *handlep);
  255. return ret;
  256. }
  257. }
  258. return 0;
  259. }
  260. EXPORT_SYMBOL(drm_gem_handle_create);
  261. /**
  262. * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  263. * @obj: obj in question
  264. *
  265. * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  266. */
  267. void
  268. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  269. {
  270. struct drm_device *dev = obj->dev;
  271. struct drm_gem_mm *mm = dev->mm_private;
  272. struct drm_map_list *list = &obj->map_list;
  273. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  274. drm_mm_put_block(list->file_offset_node);
  275. kfree(list->map);
  276. list->map = NULL;
  277. }
  278. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  279. /**
  280. * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  281. * @obj: obj in question
  282. *
  283. * GEM memory mapping works by handing back to userspace a fake mmap offset
  284. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  285. * up the object based on the offset and sets up the various memory mapping
  286. * structures.
  287. *
  288. * This routine allocates and attaches a fake offset for @obj.
  289. */
  290. int
  291. drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  292. {
  293. struct drm_device *dev = obj->dev;
  294. struct drm_gem_mm *mm = dev->mm_private;
  295. struct drm_map_list *list;
  296. struct drm_local_map *map;
  297. int ret;
  298. /* Set the object up for mmap'ing */
  299. list = &obj->map_list;
  300. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  301. if (!list->map)
  302. return -ENOMEM;
  303. map = list->map;
  304. map->type = _DRM_GEM;
  305. map->size = obj->size;
  306. map->handle = obj;
  307. /* Get a DRM GEM mmap offset allocated... */
  308. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  309. obj->size / PAGE_SIZE, 0, false);
  310. if (!list->file_offset_node) {
  311. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  312. ret = -ENOSPC;
  313. goto out_free_list;
  314. }
  315. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  316. obj->size / PAGE_SIZE, 0);
  317. if (!list->file_offset_node) {
  318. ret = -ENOMEM;
  319. goto out_free_list;
  320. }
  321. list->hash.key = list->file_offset_node->start;
  322. ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  323. if (ret) {
  324. DRM_ERROR("failed to add to map hash\n");
  325. goto out_free_mm;
  326. }
  327. return 0;
  328. out_free_mm:
  329. drm_mm_put_block(list->file_offset_node);
  330. out_free_list:
  331. kfree(list->map);
  332. list->map = NULL;
  333. return ret;
  334. }
  335. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  336. /** Returns a reference to the object named by the handle. */
  337. struct drm_gem_object *
  338. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  339. u32 handle)
  340. {
  341. struct drm_gem_object *obj;
  342. spin_lock(&filp->table_lock);
  343. /* Check if we currently have a reference on the object */
  344. obj = idr_find(&filp->object_idr, handle);
  345. if (obj == NULL) {
  346. spin_unlock(&filp->table_lock);
  347. return NULL;
  348. }
  349. drm_gem_object_reference(obj);
  350. spin_unlock(&filp->table_lock);
  351. return obj;
  352. }
  353. EXPORT_SYMBOL(drm_gem_object_lookup);
  354. /**
  355. * Releases the handle to an mm object.
  356. */
  357. int
  358. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  359. struct drm_file *file_priv)
  360. {
  361. struct drm_gem_close *args = data;
  362. int ret;
  363. if (!(dev->driver->driver_features & DRIVER_GEM))
  364. return -ENODEV;
  365. ret = drm_gem_handle_delete(file_priv, args->handle);
  366. return ret;
  367. }
  368. /**
  369. * Create a global name for an object, returning the name.
  370. *
  371. * Note that the name does not hold a reference; when the object
  372. * is freed, the name goes away.
  373. */
  374. int
  375. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  376. struct drm_file *file_priv)
  377. {
  378. struct drm_gem_flink *args = data;
  379. struct drm_gem_object *obj;
  380. int ret;
  381. if (!(dev->driver->driver_features & DRIVER_GEM))
  382. return -ENODEV;
  383. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  384. if (obj == NULL)
  385. return -ENOENT;
  386. again:
  387. if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
  388. ret = -ENOMEM;
  389. goto err;
  390. }
  391. spin_lock(&dev->object_name_lock);
  392. if (!obj->name) {
  393. ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
  394. &obj->name);
  395. args->name = (uint64_t) obj->name;
  396. spin_unlock(&dev->object_name_lock);
  397. if (ret == -EAGAIN)
  398. goto again;
  399. else if (ret)
  400. goto err;
  401. /* Allocate a reference for the name table. */
  402. drm_gem_object_reference(obj);
  403. } else {
  404. args->name = (uint64_t) obj->name;
  405. spin_unlock(&dev->object_name_lock);
  406. ret = 0;
  407. }
  408. err:
  409. drm_gem_object_unreference_unlocked(obj);
  410. return ret;
  411. }
  412. /**
  413. * Open an object using the global name, returning a handle and the size.
  414. *
  415. * This handle (of course) holds a reference to the object, so the object
  416. * will not go away until the handle is deleted.
  417. */
  418. int
  419. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  420. struct drm_file *file_priv)
  421. {
  422. struct drm_gem_open *args = data;
  423. struct drm_gem_object *obj;
  424. int ret;
  425. u32 handle;
  426. if (!(dev->driver->driver_features & DRIVER_GEM))
  427. return -ENODEV;
  428. spin_lock(&dev->object_name_lock);
  429. obj = idr_find(&dev->object_name_idr, (int) args->name);
  430. if (obj)
  431. drm_gem_object_reference(obj);
  432. spin_unlock(&dev->object_name_lock);
  433. if (!obj)
  434. return -ENOENT;
  435. ret = drm_gem_handle_create(file_priv, obj, &handle);
  436. drm_gem_object_unreference_unlocked(obj);
  437. if (ret)
  438. return ret;
  439. args->handle = handle;
  440. args->size = obj->size;
  441. return 0;
  442. }
  443. /**
  444. * Called at device open time, sets up the structure for handling refcounting
  445. * of mm objects.
  446. */
  447. void
  448. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  449. {
  450. idr_init(&file_private->object_idr);
  451. spin_lock_init(&file_private->table_lock);
  452. }
  453. /**
  454. * Called at device close to release the file's
  455. * handle references on objects.
  456. */
  457. static int
  458. drm_gem_object_release_handle(int id, void *ptr, void *data)
  459. {
  460. struct drm_file *file_priv = data;
  461. struct drm_gem_object *obj = ptr;
  462. struct drm_device *dev = obj->dev;
  463. drm_gem_remove_prime_handles(obj, file_priv);
  464. if (dev->driver->gem_close_object)
  465. dev->driver->gem_close_object(obj, file_priv);
  466. drm_gem_object_handle_unreference_unlocked(obj);
  467. return 0;
  468. }
  469. /**
  470. * Called at close time when the filp is going away.
  471. *
  472. * Releases any remaining references on objects by this filp.
  473. */
  474. void
  475. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  476. {
  477. idr_for_each(&file_private->object_idr,
  478. &drm_gem_object_release_handle, file_private);
  479. idr_remove_all(&file_private->object_idr);
  480. idr_destroy(&file_private->object_idr);
  481. }
  482. void
  483. drm_gem_object_release(struct drm_gem_object *obj)
  484. {
  485. if (obj->filp)
  486. fput(obj->filp);
  487. }
  488. EXPORT_SYMBOL(drm_gem_object_release);
  489. /**
  490. * Called after the last reference to the object has been lost.
  491. * Must be called holding struct_ mutex
  492. *
  493. * Frees the object
  494. */
  495. void
  496. drm_gem_object_free(struct kref *kref)
  497. {
  498. struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  499. struct drm_device *dev = obj->dev;
  500. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  501. if (dev->driver->gem_free_object != NULL)
  502. dev->driver->gem_free_object(obj);
  503. }
  504. EXPORT_SYMBOL(drm_gem_object_free);
  505. static void drm_gem_object_ref_bug(struct kref *list_kref)
  506. {
  507. BUG();
  508. }
  509. /**
  510. * Called after the last handle to the object has been closed
  511. *
  512. * Removes any name for the object. Note that this must be
  513. * called before drm_gem_object_free or we'll be touching
  514. * freed memory
  515. */
  516. void drm_gem_object_handle_free(struct drm_gem_object *obj)
  517. {
  518. struct drm_device *dev = obj->dev;
  519. /* Remove any name for this object */
  520. spin_lock(&dev->object_name_lock);
  521. if (obj->name) {
  522. idr_remove(&dev->object_name_idr, obj->name);
  523. obj->name = 0;
  524. spin_unlock(&dev->object_name_lock);
  525. /*
  526. * The object name held a reference to this object, drop
  527. * that now.
  528. *
  529. * This cannot be the last reference, since the handle holds one too.
  530. */
  531. kref_put(&obj->refcount, drm_gem_object_ref_bug);
  532. } else
  533. spin_unlock(&dev->object_name_lock);
  534. }
  535. EXPORT_SYMBOL(drm_gem_object_handle_free);
  536. void drm_gem_vm_open(struct vm_area_struct *vma)
  537. {
  538. struct drm_gem_object *obj = vma->vm_private_data;
  539. drm_gem_object_reference(obj);
  540. mutex_lock(&obj->dev->struct_mutex);
  541. drm_vm_open_locked(obj->dev, vma);
  542. mutex_unlock(&obj->dev->struct_mutex);
  543. }
  544. EXPORT_SYMBOL(drm_gem_vm_open);
  545. void drm_gem_vm_close(struct vm_area_struct *vma)
  546. {
  547. struct drm_gem_object *obj = vma->vm_private_data;
  548. struct drm_device *dev = obj->dev;
  549. mutex_lock(&dev->struct_mutex);
  550. drm_vm_close_locked(obj->dev, vma);
  551. drm_gem_object_unreference(obj);
  552. mutex_unlock(&dev->struct_mutex);
  553. }
  554. EXPORT_SYMBOL(drm_gem_vm_close);
  555. /**
  556. * drm_gem_mmap - memory map routine for GEM objects
  557. * @filp: DRM file pointer
  558. * @vma: VMA for the area to be mapped
  559. *
  560. * If a driver supports GEM object mapping, mmap calls on the DRM file
  561. * descriptor will end up here.
  562. *
  563. * If we find the object based on the offset passed in (vma->vm_pgoff will
  564. * contain the fake offset we created when the GTT map ioctl was called on
  565. * the object), we set up the driver fault handler so that any accesses
  566. * to the object can be trapped, to perform migration, GTT binding, surface
  567. * register allocation, or performance monitoring.
  568. */
  569. int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  570. {
  571. struct drm_file *priv = filp->private_data;
  572. struct drm_device *dev = priv->minor->dev;
  573. struct drm_gem_mm *mm = dev->mm_private;
  574. struct drm_local_map *map = NULL;
  575. struct drm_gem_object *obj;
  576. struct drm_hash_item *hash;
  577. int ret = 0;
  578. if (drm_device_is_unplugged(dev))
  579. return -ENODEV;
  580. mutex_lock(&dev->struct_mutex);
  581. if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
  582. mutex_unlock(&dev->struct_mutex);
  583. return drm_mmap(filp, vma);
  584. }
  585. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  586. if (!map ||
  587. ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
  588. ret = -EPERM;
  589. goto out_unlock;
  590. }
  591. /* Check for valid size. */
  592. if (map->size < vma->vm_end - vma->vm_start) {
  593. ret = -EINVAL;
  594. goto out_unlock;
  595. }
  596. obj = map->handle;
  597. if (!obj->dev->driver->gem_vm_ops) {
  598. ret = -EINVAL;
  599. goto out_unlock;
  600. }
  601. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  602. vma->vm_ops = obj->dev->driver->gem_vm_ops;
  603. vma->vm_private_data = map->handle;
  604. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  605. /* Take a ref for this mapping of the object, so that the fault
  606. * handler can dereference the mmap offset's pointer to the object.
  607. * This reference is cleaned up by the corresponding vm_close
  608. * (which should happen whether the vma was created by this call, or
  609. * by a vm_open due to mremap or partial unmap or whatever).
  610. */
  611. drm_gem_object_reference(obj);
  612. drm_vm_open_locked(dev, vma);
  613. out_unlock:
  614. mutex_unlock(&dev->struct_mutex);
  615. return ret;
  616. }
  617. EXPORT_SYMBOL(drm_gem_mmap);