drm_vm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /**
  2. * \file drm_vm.c
  3. * Memory mapping for DRM
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include "drmP.h"
  35. #if defined(__ia64__)
  36. #include <linux/efi.h>
  37. #endif
  38. static void drm_vm_open(struct vm_area_struct *vma);
  39. static void drm_vm_close(struct vm_area_struct *vma);
  40. static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  41. {
  42. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  43. #if defined(__i386__) || defined(__x86_64__)
  44. if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
  45. pgprot_val(tmp) |= _PAGE_PCD;
  46. pgprot_val(tmp) &= ~_PAGE_PWT;
  47. }
  48. #elif defined(__powerpc__)
  49. pgprot_val(tmp) |= _PAGE_NO_CACHE;
  50. if (map_type == _DRM_REGISTERS)
  51. pgprot_val(tmp) |= _PAGE_GUARDED;
  52. #endif
  53. #if defined(__ia64__)
  54. if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  55. vma->vm_start))
  56. tmp = pgprot_writecombine(tmp);
  57. else
  58. tmp = pgprot_noncached(tmp);
  59. #endif
  60. return tmp;
  61. }
  62. /**
  63. * \c nopage method for AGP virtual memory.
  64. *
  65. * \param vma virtual memory area.
  66. * \param address access address.
  67. * \return pointer to the page structure.
  68. *
  69. * Find the right map and if it's AGP memory find the real physical page to
  70. * map, get the page, increment the use count and return it.
  71. */
  72. #if __OS_HAS_AGP
  73. static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
  74. unsigned long address)
  75. {
  76. struct drm_file *priv = vma->vm_file->private_data;
  77. struct drm_device *dev = priv->head->dev;
  78. struct drm_map *map = NULL;
  79. struct drm_map_list *r_list;
  80. struct drm_hash_item *hash;
  81. /*
  82. * Find the right map
  83. */
  84. if (!drm_core_has_AGP(dev))
  85. goto vm_nopage_error;
  86. if (!dev->agp || !dev->agp->cant_use_aperture)
  87. goto vm_nopage_error;
  88. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
  89. goto vm_nopage_error;
  90. r_list = drm_hash_entry(hash, struct drm_map_list, hash);
  91. map = r_list->map;
  92. if (map && map->type == _DRM_AGP) {
  93. unsigned long offset = address - vma->vm_start;
  94. unsigned long baddr = map->offset + offset;
  95. struct drm_agp_mem *agpmem;
  96. struct page *page;
  97. #ifdef __alpha__
  98. /*
  99. * Adjust to a bus-relative address
  100. */
  101. baddr -= dev->hose->mem_space->start;
  102. #endif
  103. /*
  104. * It's AGP memory - find the real physical page to map
  105. */
  106. list_for_each_entry(agpmem, &dev->agp->memory, head) {
  107. if (agpmem->bound <= baddr &&
  108. agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
  109. break;
  110. }
  111. if (!agpmem)
  112. goto vm_nopage_error;
  113. /*
  114. * Get the page, inc the use count, and return it
  115. */
  116. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  117. page = virt_to_page(__va(agpmem->memory->memory[offset]));
  118. get_page(page);
  119. DRM_DEBUG
  120. ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
  121. baddr, __va(agpmem->memory->memory[offset]), offset,
  122. page_count(page));
  123. return page;
  124. }
  125. vm_nopage_error:
  126. return NOPAGE_SIGBUS; /* Disallow mremap */
  127. }
  128. #else /* __OS_HAS_AGP */
  129. static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
  130. unsigned long address)
  131. {
  132. return NOPAGE_SIGBUS;
  133. }
  134. #endif /* __OS_HAS_AGP */
  135. /**
  136. * \c nopage method for shared virtual memory.
  137. *
  138. * \param vma virtual memory area.
  139. * \param address access address.
  140. * \return pointer to the page structure.
  141. *
  142. * Get the mapping, find the real physical page to map, get the page, and
  143. * return it.
  144. */
  145. static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
  146. unsigned long address)
  147. {
  148. struct drm_map *map = (struct drm_map *) vma->vm_private_data;
  149. unsigned long offset;
  150. unsigned long i;
  151. struct page *page;
  152. if (address > vma->vm_end)
  153. return NOPAGE_SIGBUS; /* Disallow mremap */
  154. if (!map)
  155. return NOPAGE_SIGBUS; /* Nothing allocated */
  156. offset = address - vma->vm_start;
  157. i = (unsigned long)map->handle + offset;
  158. page = vmalloc_to_page((void *)i);
  159. if (!page)
  160. return NOPAGE_SIGBUS;
  161. get_page(page);
  162. DRM_DEBUG("shm_nopage 0x%lx\n", address);
  163. return page;
  164. }
  165. /**
  166. * \c close method for shared virtual memory.
  167. *
  168. * \param vma virtual memory area.
  169. *
  170. * Deletes map information if we are the last
  171. * person to close a mapping and it's not in the global maplist.
  172. */
  173. static void drm_vm_shm_close(struct vm_area_struct *vma)
  174. {
  175. struct drm_file *priv = vma->vm_file->private_data;
  176. struct drm_device *dev = priv->head->dev;
  177. struct drm_vma_entry *pt, *temp;
  178. struct drm_map *map;
  179. struct drm_map_list *r_list;
  180. int found_maps = 0;
  181. DRM_DEBUG("0x%08lx,0x%08lx\n",
  182. vma->vm_start, vma->vm_end - vma->vm_start);
  183. atomic_dec(&dev->vma_count);
  184. map = vma->vm_private_data;
  185. mutex_lock(&dev->struct_mutex);
  186. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  187. if (pt->vma->vm_private_data == map)
  188. found_maps++;
  189. if (pt->vma == vma) {
  190. list_del(&pt->head);
  191. drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
  192. }
  193. }
  194. /* We were the only map that was found */
  195. if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
  196. /* Check to see if we are in the maplist, if we are not, then
  197. * we delete this mappings information.
  198. */
  199. found_maps = 0;
  200. list_for_each_entry(r_list, &dev->maplist, head) {
  201. if (r_list->map == map)
  202. found_maps++;
  203. }
  204. if (!found_maps) {
  205. drm_dma_handle_t dmah;
  206. switch (map->type) {
  207. case _DRM_REGISTERS:
  208. case _DRM_FRAME_BUFFER:
  209. if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
  210. int retcode;
  211. retcode = mtrr_del(map->mtrr,
  212. map->offset,
  213. map->size);
  214. DRM_DEBUG("mtrr_del = %d\n", retcode);
  215. }
  216. iounmap(map->handle);
  217. break;
  218. case _DRM_SHM:
  219. vfree(map->handle);
  220. break;
  221. case _DRM_AGP:
  222. case _DRM_SCATTER_GATHER:
  223. break;
  224. case _DRM_CONSISTENT:
  225. dmah.vaddr = map->handle;
  226. dmah.busaddr = map->offset;
  227. dmah.size = map->size;
  228. __drm_pci_free(dev, &dmah);
  229. break;
  230. }
  231. drm_free(map, sizeof(*map), DRM_MEM_MAPS);
  232. }
  233. }
  234. mutex_unlock(&dev->struct_mutex);
  235. }
  236. /**
  237. * \c nopage method for DMA virtual memory.
  238. *
  239. * \param vma virtual memory area.
  240. * \param address access address.
  241. * \return pointer to the page structure.
  242. *
  243. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  244. */
  245. static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
  246. unsigned long address)
  247. {
  248. struct drm_file *priv = vma->vm_file->private_data;
  249. struct drm_device *dev = priv->head->dev;
  250. struct drm_device_dma *dma = dev->dma;
  251. unsigned long offset;
  252. unsigned long page_nr;
  253. struct page *page;
  254. if (!dma)
  255. return NOPAGE_SIGBUS; /* Error */
  256. if (address > vma->vm_end)
  257. return NOPAGE_SIGBUS; /* Disallow mremap */
  258. if (!dma->pagelist)
  259. return NOPAGE_SIGBUS; /* Nothing allocated */
  260. offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  261. page_nr = offset >> PAGE_SHIFT;
  262. page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
  263. get_page(page);
  264. DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
  265. return page;
  266. }
  267. /**
  268. * \c nopage method for scatter-gather virtual memory.
  269. *
  270. * \param vma virtual memory area.
  271. * \param address access address.
  272. * \return pointer to the page structure.
  273. *
  274. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  275. */
  276. static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
  277. unsigned long address)
  278. {
  279. struct drm_map *map = (struct drm_map *) vma->vm_private_data;
  280. struct drm_file *priv = vma->vm_file->private_data;
  281. struct drm_device *dev = priv->head->dev;
  282. struct drm_sg_mem *entry = dev->sg;
  283. unsigned long offset;
  284. unsigned long map_offset;
  285. unsigned long page_offset;
  286. struct page *page;
  287. if (!entry)
  288. return NOPAGE_SIGBUS; /* Error */
  289. if (address > vma->vm_end)
  290. return NOPAGE_SIGBUS; /* Disallow mremap */
  291. if (!entry->pagelist)
  292. return NOPAGE_SIGBUS; /* Nothing allocated */
  293. offset = address - vma->vm_start;
  294. map_offset = map->offset - (unsigned long)dev->sg->virtual;
  295. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  296. page = entry->pagelist[page_offset];
  297. get_page(page);
  298. return page;
  299. }
  300. static struct page *drm_vm_nopage(struct vm_area_struct *vma,
  301. unsigned long address, int *type)
  302. {
  303. if (type)
  304. *type = VM_FAULT_MINOR;
  305. return drm_do_vm_nopage(vma, address);
  306. }
  307. static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
  308. unsigned long address, int *type)
  309. {
  310. if (type)
  311. *type = VM_FAULT_MINOR;
  312. return drm_do_vm_shm_nopage(vma, address);
  313. }
  314. static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
  315. unsigned long address, int *type)
  316. {
  317. if (type)
  318. *type = VM_FAULT_MINOR;
  319. return drm_do_vm_dma_nopage(vma, address);
  320. }
  321. static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
  322. unsigned long address, int *type)
  323. {
  324. if (type)
  325. *type = VM_FAULT_MINOR;
  326. return drm_do_vm_sg_nopage(vma, address);
  327. }
  328. /** AGP virtual memory operations */
  329. static struct vm_operations_struct drm_vm_ops = {
  330. .nopage = drm_vm_nopage,
  331. .open = drm_vm_open,
  332. .close = drm_vm_close,
  333. };
  334. /** Shared virtual memory operations */
  335. static struct vm_operations_struct drm_vm_shm_ops = {
  336. .nopage = drm_vm_shm_nopage,
  337. .open = drm_vm_open,
  338. .close = drm_vm_shm_close,
  339. };
  340. /** DMA virtual memory operations */
  341. static struct vm_operations_struct drm_vm_dma_ops = {
  342. .nopage = drm_vm_dma_nopage,
  343. .open = drm_vm_open,
  344. .close = drm_vm_close,
  345. };
  346. /** Scatter-gather virtual memory operations */
  347. static struct vm_operations_struct drm_vm_sg_ops = {
  348. .nopage = drm_vm_sg_nopage,
  349. .open = drm_vm_open,
  350. .close = drm_vm_close,
  351. };
  352. /**
  353. * \c open method for shared virtual memory.
  354. *
  355. * \param vma virtual memory area.
  356. *
  357. * Create a new drm_vma_entry structure as the \p vma private data entry and
  358. * add it to drm_device::vmalist.
  359. */
  360. static void drm_vm_open_locked(struct vm_area_struct *vma)
  361. {
  362. struct drm_file *priv = vma->vm_file->private_data;
  363. struct drm_device *dev = priv->head->dev;
  364. struct drm_vma_entry *vma_entry;
  365. DRM_DEBUG("0x%08lx,0x%08lx\n",
  366. vma->vm_start, vma->vm_end - vma->vm_start);
  367. atomic_inc(&dev->vma_count);
  368. vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
  369. if (vma_entry) {
  370. vma_entry->vma = vma;
  371. vma_entry->pid = current->pid;
  372. list_add(&vma_entry->head, &dev->vmalist);
  373. }
  374. }
  375. static void drm_vm_open(struct vm_area_struct *vma)
  376. {
  377. struct drm_file *priv = vma->vm_file->private_data;
  378. struct drm_device *dev = priv->head->dev;
  379. mutex_lock(&dev->struct_mutex);
  380. drm_vm_open_locked(vma);
  381. mutex_unlock(&dev->struct_mutex);
  382. }
  383. /**
  384. * \c close method for all virtual memory types.
  385. *
  386. * \param vma virtual memory area.
  387. *
  388. * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
  389. * free it.
  390. */
  391. static void drm_vm_close(struct vm_area_struct *vma)
  392. {
  393. struct drm_file *priv = vma->vm_file->private_data;
  394. struct drm_device *dev = priv->head->dev;
  395. struct drm_vma_entry *pt, *temp;
  396. DRM_DEBUG("0x%08lx,0x%08lx\n",
  397. vma->vm_start, vma->vm_end - vma->vm_start);
  398. atomic_dec(&dev->vma_count);
  399. mutex_lock(&dev->struct_mutex);
  400. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  401. if (pt->vma == vma) {
  402. list_del(&pt->head);
  403. drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
  404. break;
  405. }
  406. }
  407. mutex_unlock(&dev->struct_mutex);
  408. }
  409. /**
  410. * mmap DMA memory.
  411. *
  412. * \param file_priv DRM file private.
  413. * \param vma virtual memory area.
  414. * \return zero on success or a negative number on failure.
  415. *
  416. * Sets the virtual memory area operations structure to vm_dma_ops, the file
  417. * pointer, and calls vm_open().
  418. */
  419. static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  420. {
  421. struct drm_file *priv = filp->private_data;
  422. struct drm_device *dev;
  423. struct drm_device_dma *dma;
  424. unsigned long length = vma->vm_end - vma->vm_start;
  425. dev = priv->head->dev;
  426. dma = dev->dma;
  427. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  428. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  429. /* Length must match exact page count */
  430. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  431. return -EINVAL;
  432. }
  433. if (!capable(CAP_SYS_ADMIN) &&
  434. (dma->flags & _DRM_DMA_USE_PCI_RO)) {
  435. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  436. #if defined(__i386__) || defined(__x86_64__)
  437. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  438. #else
  439. /* Ye gads this is ugly. With more thought
  440. we could move this up higher and use
  441. `protection_map' instead. */
  442. vma->vm_page_prot =
  443. __pgprot(pte_val
  444. (pte_wrprotect
  445. (__pte(pgprot_val(vma->vm_page_prot)))));
  446. #endif
  447. }
  448. vma->vm_ops = &drm_vm_dma_ops;
  449. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  450. vma->vm_file = filp; /* Needed for drm_vm_open() */
  451. drm_vm_open_locked(vma);
  452. return 0;
  453. }
  454. unsigned long drm_core_get_map_ofs(struct drm_map * map)
  455. {
  456. return map->offset;
  457. }
  458. EXPORT_SYMBOL(drm_core_get_map_ofs);
  459. unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
  460. {
  461. #ifdef __alpha__
  462. return dev->hose->dense_mem_base - dev->hose->mem_space->start;
  463. #else
  464. return 0;
  465. #endif
  466. }
  467. EXPORT_SYMBOL(drm_core_get_reg_ofs);
  468. /**
  469. * mmap DMA memory.
  470. *
  471. * \param file_priv DRM file private.
  472. * \param vma virtual memory area.
  473. * \return zero on success or a negative number on failure.
  474. *
  475. * If the virtual memory area has no offset associated with it then it's a DMA
  476. * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
  477. * checks that the restricted flag is not set, sets the virtual memory operations
  478. * according to the mapping type and remaps the pages. Finally sets the file
  479. * pointer and calls vm_open().
  480. */
  481. static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  482. {
  483. struct drm_file *priv = filp->private_data;
  484. struct drm_device *dev = priv->head->dev;
  485. struct drm_map *map = NULL;
  486. unsigned long offset = 0;
  487. struct drm_hash_item *hash;
  488. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  489. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  490. if (!priv->authenticated)
  491. return -EACCES;
  492. /* We check for "dma". On Apple's UniNorth, it's valid to have
  493. * the AGP mapped at physical address 0
  494. * --BenH.
  495. */
  496. if (!vma->vm_pgoff
  497. #if __OS_HAS_AGP
  498. && (!dev->agp
  499. || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
  500. #endif
  501. )
  502. return drm_mmap_dma(filp, vma);
  503. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
  504. DRM_ERROR("Could not find map\n");
  505. return -EINVAL;
  506. }
  507. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  508. if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  509. return -EPERM;
  510. /* Check for valid size. */
  511. if (map->size < vma->vm_end - vma->vm_start)
  512. return -EINVAL;
  513. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  514. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  515. #if defined(__i386__) || defined(__x86_64__)
  516. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  517. #else
  518. /* Ye gads this is ugly. With more thought
  519. we could move this up higher and use
  520. `protection_map' instead. */
  521. vma->vm_page_prot =
  522. __pgprot(pte_val
  523. (pte_wrprotect
  524. (__pte(pgprot_val(vma->vm_page_prot)))));
  525. #endif
  526. }
  527. switch (map->type) {
  528. case _DRM_AGP:
  529. if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
  530. /*
  531. * On some platforms we can't talk to bus dma address from the CPU, so for
  532. * memory of type DRM_AGP, we'll deal with sorting out the real physical
  533. * pages and mappings in nopage()
  534. */
  535. #if defined(__powerpc__)
  536. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
  537. #endif
  538. vma->vm_ops = &drm_vm_ops;
  539. break;
  540. }
  541. /* fall through to _DRM_FRAME_BUFFER... */
  542. case _DRM_FRAME_BUFFER:
  543. case _DRM_REGISTERS:
  544. offset = dev->driver->get_reg_ofs(dev);
  545. vma->vm_flags |= VM_IO; /* not in core dump */
  546. vma->vm_page_prot = drm_io_prot(map->type, vma);
  547. #ifdef __sparc__
  548. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  549. #endif
  550. if (io_remap_pfn_range(vma, vma->vm_start,
  551. (map->offset + offset) >> PAGE_SHIFT,
  552. vma->vm_end - vma->vm_start,
  553. vma->vm_page_prot))
  554. return -EAGAIN;
  555. DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
  556. " offset = 0x%lx\n",
  557. map->type,
  558. vma->vm_start, vma->vm_end, map->offset + offset);
  559. vma->vm_ops = &drm_vm_ops;
  560. break;
  561. case _DRM_CONSISTENT:
  562. /* Consistent memory is really like shared memory. But
  563. * it's allocated in a different way, so avoid nopage */
  564. if (remap_pfn_range(vma, vma->vm_start,
  565. page_to_pfn(virt_to_page(map->handle)),
  566. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  567. return -EAGAIN;
  568. /* fall through to _DRM_SHM */
  569. case _DRM_SHM:
  570. vma->vm_ops = &drm_vm_shm_ops;
  571. vma->vm_private_data = (void *)map;
  572. /* Don't let this area swap. Change when
  573. DRM_KERNEL advisory is supported. */
  574. vma->vm_flags |= VM_RESERVED;
  575. break;
  576. case _DRM_SCATTER_GATHER:
  577. vma->vm_ops = &drm_vm_sg_ops;
  578. vma->vm_private_data = (void *)map;
  579. vma->vm_flags |= VM_RESERVED;
  580. break;
  581. default:
  582. return -EINVAL; /* This should never happen. */
  583. }
  584. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  585. vma->vm_file = filp; /* Needed for drm_vm_open() */
  586. drm_vm_open_locked(vma);
  587. return 0;
  588. }
  589. int drm_mmap(struct file *filp, struct vm_area_struct *vma)
  590. {
  591. struct drm_file *priv = filp->private_data;
  592. struct drm_device *dev = priv->head->dev;
  593. int ret;
  594. mutex_lock(&dev->struct_mutex);
  595. ret = drm_mmap_locked(filp, vma);
  596. mutex_unlock(&dev->struct_mutex);
  597. return ret;
  598. }
  599. EXPORT_SYMBOL(drm_mmap);