drm_vm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /**
  2. * \file drm_vm.c
  3. * Memory mapping for DRM
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include "drmP.h"
  35. #if defined(__ia64__)
  36. #include <linux/efi.h>
  37. #endif
  38. static void drm_vm_open(struct vm_area_struct *vma);
  39. static void drm_vm_close(struct vm_area_struct *vma);
  40. static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  41. {
  42. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  43. #if defined(__i386__) || defined(__x86_64__)
  44. if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
  45. pgprot_val(tmp) |= _PAGE_PCD;
  46. pgprot_val(tmp) &= ~_PAGE_PWT;
  47. }
  48. #elif defined(__powerpc__)
  49. pgprot_val(tmp) |= _PAGE_NO_CACHE;
  50. if (map_type == _DRM_REGISTERS)
  51. pgprot_val(tmp) |= _PAGE_GUARDED;
  52. #endif
  53. #if defined(__ia64__)
  54. if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  55. vma->vm_start))
  56. tmp = pgprot_writecombine(tmp);
  57. else
  58. tmp = pgprot_noncached(tmp);
  59. #endif
  60. return tmp;
  61. }
  62. /**
  63. * \c fault method for AGP virtual memory.
  64. *
  65. * \param vma virtual memory area.
  66. * \param address access address.
  67. * \return pointer to the page structure.
  68. *
  69. * Find the right map and if it's AGP memory find the real physical page to
  70. * map, get the page, increment the use count and return it.
  71. */
  72. #if __OS_HAS_AGP
  73. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  74. {
  75. struct drm_file *priv = vma->vm_file->private_data;
  76. struct drm_device *dev = priv->head->dev;
  77. struct drm_map *map = NULL;
  78. struct drm_map_list *r_list;
  79. struct drm_hash_item *hash;
  80. /*
  81. * Find the right map
  82. */
  83. if (!drm_core_has_AGP(dev))
  84. goto vm_fault_error;
  85. if (!dev->agp || !dev->agp->cant_use_aperture)
  86. goto vm_fault_error;
  87. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
  88. goto vm_fault_error;
  89. r_list = drm_hash_entry(hash, struct drm_map_list, hash);
  90. map = r_list->map;
  91. if (map && map->type == _DRM_AGP) {
  92. /*
  93. * Using vm_pgoff as a selector forces us to use this unusual
  94. * addressing scheme.
  95. */
  96. unsigned long offset = (unsigned long)vmf->virtual_address -
  97. vma->vm_start;
  98. unsigned long baddr = map->offset + offset;
  99. struct drm_agp_mem *agpmem;
  100. struct page *page;
  101. #ifdef __alpha__
  102. /*
  103. * Adjust to a bus-relative address
  104. */
  105. baddr -= dev->hose->mem_space->start;
  106. #endif
  107. /*
  108. * It's AGP memory - find the real physical page to map
  109. */
  110. list_for_each_entry(agpmem, &dev->agp->memory, head) {
  111. if (agpmem->bound <= baddr &&
  112. agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
  113. break;
  114. }
  115. if (!agpmem)
  116. goto vm_fault_error;
  117. /*
  118. * Get the page, inc the use count, and return it
  119. */
  120. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  121. page = virt_to_page(__va(agpmem->memory->memory[offset]));
  122. get_page(page);
  123. vmf->page = page;
  124. DRM_DEBUG
  125. ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
  126. baddr, __va(agpmem->memory->memory[offset]), offset,
  127. page_count(page));
  128. return 0;
  129. }
  130. vm_fault_error:
  131. return VM_FAULT_SIGBUS; /* Disallow mremap */
  132. }
  133. #else /* __OS_HAS_AGP */
  134. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  135. {
  136. return VM_FAULT_SIGBUS;
  137. }
  138. #endif /* __OS_HAS_AGP */
  139. /**
  140. * \c nopage method for shared virtual memory.
  141. *
  142. * \param vma virtual memory area.
  143. * \param address access address.
  144. * \return pointer to the page structure.
  145. *
  146. * Get the mapping, find the real physical page to map, get the page, and
  147. * return it.
  148. */
  149. static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  150. {
  151. struct drm_map *map = (struct drm_map *) vma->vm_private_data;
  152. unsigned long offset;
  153. unsigned long i;
  154. struct page *page;
  155. if (!map)
  156. return VM_FAULT_SIGBUS; /* Nothing allocated */
  157. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  158. i = (unsigned long)map->handle + offset;
  159. page = vmalloc_to_page((void *)i);
  160. if (!page)
  161. return VM_FAULT_SIGBUS;
  162. get_page(page);
  163. vmf->page = page;
  164. DRM_DEBUG("shm_fault 0x%lx\n", offset);
  165. return 0;
  166. }
  167. /**
  168. * \c close method for shared virtual memory.
  169. *
  170. * \param vma virtual memory area.
  171. *
  172. * Deletes map information if we are the last
  173. * person to close a mapping and it's not in the global maplist.
  174. */
  175. static void drm_vm_shm_close(struct vm_area_struct *vma)
  176. {
  177. struct drm_file *priv = vma->vm_file->private_data;
  178. struct drm_device *dev = priv->head->dev;
  179. struct drm_vma_entry *pt, *temp;
  180. struct drm_map *map;
  181. struct drm_map_list *r_list;
  182. int found_maps = 0;
  183. DRM_DEBUG("0x%08lx,0x%08lx\n",
  184. vma->vm_start, vma->vm_end - vma->vm_start);
  185. atomic_dec(&dev->vma_count);
  186. map = vma->vm_private_data;
  187. mutex_lock(&dev->struct_mutex);
  188. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  189. if (pt->vma->vm_private_data == map)
  190. found_maps++;
  191. if (pt->vma == vma) {
  192. list_del(&pt->head);
  193. drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
  194. }
  195. }
  196. /* We were the only map that was found */
  197. if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
  198. /* Check to see if we are in the maplist, if we are not, then
  199. * we delete this mappings information.
  200. */
  201. found_maps = 0;
  202. list_for_each_entry(r_list, &dev->maplist, head) {
  203. if (r_list->map == map)
  204. found_maps++;
  205. }
  206. if (!found_maps) {
  207. drm_dma_handle_t dmah;
  208. switch (map->type) {
  209. case _DRM_REGISTERS:
  210. case _DRM_FRAME_BUFFER:
  211. if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
  212. int retcode;
  213. retcode = mtrr_del(map->mtrr,
  214. map->offset,
  215. map->size);
  216. DRM_DEBUG("mtrr_del = %d\n", retcode);
  217. }
  218. iounmap(map->handle);
  219. break;
  220. case _DRM_SHM:
  221. vfree(map->handle);
  222. break;
  223. case _DRM_AGP:
  224. case _DRM_SCATTER_GATHER:
  225. break;
  226. case _DRM_CONSISTENT:
  227. dmah.vaddr = map->handle;
  228. dmah.busaddr = map->offset;
  229. dmah.size = map->size;
  230. __drm_pci_free(dev, &dmah);
  231. break;
  232. }
  233. drm_free(map, sizeof(*map), DRM_MEM_MAPS);
  234. }
  235. }
  236. mutex_unlock(&dev->struct_mutex);
  237. }
  238. /**
  239. * \c fault method for DMA virtual memory.
  240. *
  241. * \param vma virtual memory area.
  242. * \param address access address.
  243. * \return pointer to the page structure.
  244. *
  245. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  246. */
  247. static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  248. {
  249. struct drm_file *priv = vma->vm_file->private_data;
  250. struct drm_device *dev = priv->head->dev;
  251. struct drm_device_dma *dma = dev->dma;
  252. unsigned long offset;
  253. unsigned long page_nr;
  254. struct page *page;
  255. if (!dma)
  256. return VM_FAULT_SIGBUS; /* Error */
  257. if (!dma->pagelist)
  258. return VM_FAULT_SIGBUS; /* Nothing allocated */
  259. offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  260. page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
  261. page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
  262. get_page(page);
  263. vmf->page = page;
  264. DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
  265. return 0;
  266. }
  267. /**
  268. * \c fault method for scatter-gather virtual memory.
  269. *
  270. * \param vma virtual memory area.
  271. * \param address access address.
  272. * \return pointer to the page structure.
  273. *
  274. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  275. */
  276. static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  277. {
  278. struct drm_map *map = (struct drm_map *) vma->vm_private_data;
  279. struct drm_file *priv = vma->vm_file->private_data;
  280. struct drm_device *dev = priv->head->dev;
  281. struct drm_sg_mem *entry = dev->sg;
  282. unsigned long offset;
  283. unsigned long map_offset;
  284. unsigned long page_offset;
  285. struct page *page;
  286. if (!entry)
  287. return VM_FAULT_SIGBUS; /* Error */
  288. if (!entry->pagelist)
  289. return VM_FAULT_SIGBUS; /* Nothing allocated */
  290. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  291. map_offset = map->offset - (unsigned long)dev->sg->virtual;
  292. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  293. page = entry->pagelist[page_offset];
  294. get_page(page);
  295. vmf->page = page;
  296. return 0;
  297. }
  298. static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  299. {
  300. return drm_do_vm_fault(vma, vmf);
  301. }
  302. static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  303. {
  304. return drm_do_vm_shm_fault(vma, vmf);
  305. }
  306. static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  307. {
  308. return drm_do_vm_dma_fault(vma, vmf);
  309. }
  310. static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  311. {
  312. return drm_do_vm_sg_fault(vma, vmf);
  313. }
  314. /** AGP virtual memory operations */
  315. static struct vm_operations_struct drm_vm_ops = {
  316. .fault = drm_vm_fault,
  317. .open = drm_vm_open,
  318. .close = drm_vm_close,
  319. };
  320. /** Shared virtual memory operations */
  321. static struct vm_operations_struct drm_vm_shm_ops = {
  322. .fault = drm_vm_shm_fault,
  323. .open = drm_vm_open,
  324. .close = drm_vm_shm_close,
  325. };
  326. /** DMA virtual memory operations */
  327. static struct vm_operations_struct drm_vm_dma_ops = {
  328. .fault = drm_vm_dma_fault,
  329. .open = drm_vm_open,
  330. .close = drm_vm_close,
  331. };
  332. /** Scatter-gather virtual memory operations */
  333. static struct vm_operations_struct drm_vm_sg_ops = {
  334. .fault = drm_vm_sg_fault,
  335. .open = drm_vm_open,
  336. .close = drm_vm_close,
  337. };
  338. /**
  339. * \c open method for shared virtual memory.
  340. *
  341. * \param vma virtual memory area.
  342. *
  343. * Create a new drm_vma_entry structure as the \p vma private data entry and
  344. * add it to drm_device::vmalist.
  345. */
  346. static void drm_vm_open_locked(struct vm_area_struct *vma)
  347. {
  348. struct drm_file *priv = vma->vm_file->private_data;
  349. struct drm_device *dev = priv->head->dev;
  350. struct drm_vma_entry *vma_entry;
  351. DRM_DEBUG("0x%08lx,0x%08lx\n",
  352. vma->vm_start, vma->vm_end - vma->vm_start);
  353. atomic_inc(&dev->vma_count);
  354. vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
  355. if (vma_entry) {
  356. vma_entry->vma = vma;
  357. vma_entry->pid = current->pid;
  358. list_add(&vma_entry->head, &dev->vmalist);
  359. }
  360. }
  361. static void drm_vm_open(struct vm_area_struct *vma)
  362. {
  363. struct drm_file *priv = vma->vm_file->private_data;
  364. struct drm_device *dev = priv->head->dev;
  365. mutex_lock(&dev->struct_mutex);
  366. drm_vm_open_locked(vma);
  367. mutex_unlock(&dev->struct_mutex);
  368. }
  369. /**
  370. * \c close method for all virtual memory types.
  371. *
  372. * \param vma virtual memory area.
  373. *
  374. * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
  375. * free it.
  376. */
  377. static void drm_vm_close(struct vm_area_struct *vma)
  378. {
  379. struct drm_file *priv = vma->vm_file->private_data;
  380. struct drm_device *dev = priv->head->dev;
  381. struct drm_vma_entry *pt, *temp;
  382. DRM_DEBUG("0x%08lx,0x%08lx\n",
  383. vma->vm_start, vma->vm_end - vma->vm_start);
  384. atomic_dec(&dev->vma_count);
  385. mutex_lock(&dev->struct_mutex);
  386. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  387. if (pt->vma == vma) {
  388. list_del(&pt->head);
  389. drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
  390. break;
  391. }
  392. }
  393. mutex_unlock(&dev->struct_mutex);
  394. }
  395. /**
  396. * mmap DMA memory.
  397. *
  398. * \param file_priv DRM file private.
  399. * \param vma virtual memory area.
  400. * \return zero on success or a negative number on failure.
  401. *
  402. * Sets the virtual memory area operations structure to vm_dma_ops, the file
  403. * pointer, and calls vm_open().
  404. */
  405. static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  406. {
  407. struct drm_file *priv = filp->private_data;
  408. struct drm_device *dev;
  409. struct drm_device_dma *dma;
  410. unsigned long length = vma->vm_end - vma->vm_start;
  411. dev = priv->head->dev;
  412. dma = dev->dma;
  413. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  414. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  415. /* Length must match exact page count */
  416. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  417. return -EINVAL;
  418. }
  419. if (!capable(CAP_SYS_ADMIN) &&
  420. (dma->flags & _DRM_DMA_USE_PCI_RO)) {
  421. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  422. #if defined(__i386__) || defined(__x86_64__)
  423. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  424. #else
  425. /* Ye gads this is ugly. With more thought
  426. we could move this up higher and use
  427. `protection_map' instead. */
  428. vma->vm_page_prot =
  429. __pgprot(pte_val
  430. (pte_wrprotect
  431. (__pte(pgprot_val(vma->vm_page_prot)))));
  432. #endif
  433. }
  434. vma->vm_ops = &drm_vm_dma_ops;
  435. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  436. vma->vm_flags |= VM_DONTEXPAND;
  437. vma->vm_file = filp; /* Needed for drm_vm_open() */
  438. drm_vm_open_locked(vma);
  439. return 0;
  440. }
  441. unsigned long drm_core_get_map_ofs(struct drm_map * map)
  442. {
  443. return map->offset;
  444. }
  445. EXPORT_SYMBOL(drm_core_get_map_ofs);
  446. unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
  447. {
  448. #ifdef __alpha__
  449. return dev->hose->dense_mem_base - dev->hose->mem_space->start;
  450. #else
  451. return 0;
  452. #endif
  453. }
  454. EXPORT_SYMBOL(drm_core_get_reg_ofs);
  455. /**
  456. * mmap DMA memory.
  457. *
  458. * \param file_priv DRM file private.
  459. * \param vma virtual memory area.
  460. * \return zero on success or a negative number on failure.
  461. *
  462. * If the virtual memory area has no offset associated with it then it's a DMA
  463. * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
  464. * checks that the restricted flag is not set, sets the virtual memory operations
  465. * according to the mapping type and remaps the pages. Finally sets the file
  466. * pointer and calls vm_open().
  467. */
  468. static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  469. {
  470. struct drm_file *priv = filp->private_data;
  471. struct drm_device *dev = priv->head->dev;
  472. struct drm_map *map = NULL;
  473. unsigned long offset = 0;
  474. struct drm_hash_item *hash;
  475. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  476. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  477. if (!priv->authenticated)
  478. return -EACCES;
  479. /* We check for "dma". On Apple's UniNorth, it's valid to have
  480. * the AGP mapped at physical address 0
  481. * --BenH.
  482. */
  483. if (!vma->vm_pgoff
  484. #if __OS_HAS_AGP
  485. && (!dev->agp
  486. || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
  487. #endif
  488. )
  489. return drm_mmap_dma(filp, vma);
  490. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
  491. DRM_ERROR("Could not find map\n");
  492. return -EINVAL;
  493. }
  494. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  495. if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  496. return -EPERM;
  497. /* Check for valid size. */
  498. if (map->size < vma->vm_end - vma->vm_start)
  499. return -EINVAL;
  500. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  501. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  502. #if defined(__i386__) || defined(__x86_64__)
  503. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  504. #else
  505. /* Ye gads this is ugly. With more thought
  506. we could move this up higher and use
  507. `protection_map' instead. */
  508. vma->vm_page_prot =
  509. __pgprot(pte_val
  510. (pte_wrprotect
  511. (__pte(pgprot_val(vma->vm_page_prot)))));
  512. #endif
  513. }
  514. switch (map->type) {
  515. case _DRM_AGP:
  516. if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
  517. /*
  518. * On some platforms we can't talk to bus dma address from the CPU, so for
  519. * memory of type DRM_AGP, we'll deal with sorting out the real physical
  520. * pages and mappings in fault()
  521. */
  522. #if defined(__powerpc__)
  523. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
  524. #endif
  525. vma->vm_ops = &drm_vm_ops;
  526. break;
  527. }
  528. /* fall through to _DRM_FRAME_BUFFER... */
  529. case _DRM_FRAME_BUFFER:
  530. case _DRM_REGISTERS:
  531. offset = dev->driver->get_reg_ofs(dev);
  532. vma->vm_flags |= VM_IO; /* not in core dump */
  533. vma->vm_page_prot = drm_io_prot(map->type, vma);
  534. #ifdef __sparc__
  535. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  536. #endif
  537. if (io_remap_pfn_range(vma, vma->vm_start,
  538. (map->offset + offset) >> PAGE_SHIFT,
  539. vma->vm_end - vma->vm_start,
  540. vma->vm_page_prot))
  541. return -EAGAIN;
  542. DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
  543. " offset = 0x%lx\n",
  544. map->type,
  545. vma->vm_start, vma->vm_end, map->offset + offset);
  546. vma->vm_ops = &drm_vm_ops;
  547. break;
  548. case _DRM_CONSISTENT:
  549. /* Consistent memory is really like shared memory. But
  550. * it's allocated in a different way, so avoid fault */
  551. if (remap_pfn_range(vma, vma->vm_start,
  552. page_to_pfn(virt_to_page(map->handle)),
  553. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  554. return -EAGAIN;
  555. /* fall through to _DRM_SHM */
  556. case _DRM_SHM:
  557. vma->vm_ops = &drm_vm_shm_ops;
  558. vma->vm_private_data = (void *)map;
  559. /* Don't let this area swap. Change when
  560. DRM_KERNEL advisory is supported. */
  561. vma->vm_flags |= VM_RESERVED;
  562. break;
  563. case _DRM_SCATTER_GATHER:
  564. vma->vm_ops = &drm_vm_sg_ops;
  565. vma->vm_private_data = (void *)map;
  566. vma->vm_flags |= VM_RESERVED;
  567. break;
  568. default:
  569. return -EINVAL; /* This should never happen. */
  570. }
  571. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  572. vma->vm_flags |= VM_DONTEXPAND;
  573. vma->vm_file = filp; /* Needed for drm_vm_open() */
  574. drm_vm_open_locked(vma);
  575. return 0;
  576. }
  577. int drm_mmap(struct file *filp, struct vm_area_struct *vma)
  578. {
  579. struct drm_file *priv = filp->private_data;
  580. struct drm_device *dev = priv->head->dev;
  581. int ret;
  582. mutex_lock(&dev->struct_mutex);
  583. ret = drm_mmap_locked(filp, vma);
  584. mutex_unlock(&dev->struct_mutex);
  585. return ret;
  586. }
  587. EXPORT_SYMBOL(drm_mmap);