drm_vm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /**
  2. * \file drm_vm.c
  3. * Memory mapping for DRM
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include "drmP.h"
  35. #if defined(__ia64__)
  36. #include <linux/efi.h>
  37. #endif
  38. static void drm_vm_open(struct vm_area_struct *vma);
  39. static void drm_vm_close(struct vm_area_struct *vma);
  40. static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  41. {
  42. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  43. #if defined(__i386__) || defined(__x86_64__)
  44. if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
  45. pgprot_val(tmp) |= _PAGE_PCD;
  46. pgprot_val(tmp) &= ~_PAGE_PWT;
  47. }
  48. #elif defined(__powerpc__)
  49. pgprot_val(tmp) |= _PAGE_NO_CACHE;
  50. if (map_type == _DRM_REGISTERS)
  51. pgprot_val(tmp) |= _PAGE_GUARDED;
  52. #elif defined(__ia64__)
  53. if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  54. vma->vm_start))
  55. tmp = pgprot_writecombine(tmp);
  56. else
  57. tmp = pgprot_noncached(tmp);
  58. #elif defined(__sparc__)
  59. tmp = pgprot_noncached(tmp);
  60. #endif
  61. return tmp;
  62. }
  63. static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  64. {
  65. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  66. #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  67. tmp |= _PAGE_NO_CACHE;
  68. #endif
  69. return tmp;
  70. }
  71. /**
  72. * \c fault method for AGP virtual memory.
  73. *
  74. * \param vma virtual memory area.
  75. * \param address access address.
  76. * \return pointer to the page structure.
  77. *
  78. * Find the right map and if it's AGP memory find the real physical page to
  79. * map, get the page, increment the use count and return it.
  80. */
  81. #if __OS_HAS_AGP
  82. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  83. {
  84. struct drm_file *priv = vma->vm_file->private_data;
  85. struct drm_device *dev = priv->minor->dev;
  86. struct drm_local_map *map = NULL;
  87. struct drm_map_list *r_list;
  88. struct drm_hash_item *hash;
  89. /*
  90. * Find the right map
  91. */
  92. if (!drm_core_has_AGP(dev))
  93. goto vm_fault_error;
  94. if (!dev->agp || !dev->agp->cant_use_aperture)
  95. goto vm_fault_error;
  96. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
  97. goto vm_fault_error;
  98. r_list = drm_hash_entry(hash, struct drm_map_list, hash);
  99. map = r_list->map;
  100. if (map && map->type == _DRM_AGP) {
  101. /*
  102. * Using vm_pgoff as a selector forces us to use this unusual
  103. * addressing scheme.
  104. */
  105. resource_size_t offset = (unsigned long)vmf->virtual_address -
  106. vma->vm_start;
  107. resource_size_t baddr = map->offset + offset;
  108. struct drm_agp_mem *agpmem;
  109. struct page *page;
  110. #ifdef __alpha__
  111. /*
  112. * Adjust to a bus-relative address
  113. */
  114. baddr -= dev->hose->mem_space->start;
  115. #endif
  116. /*
  117. * It's AGP memory - find the real physical page to map
  118. */
  119. list_for_each_entry(agpmem, &dev->agp->memory, head) {
  120. if (agpmem->bound <= baddr &&
  121. agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
  122. break;
  123. }
  124. if (!agpmem)
  125. goto vm_fault_error;
  126. /*
  127. * Get the page, inc the use count, and return it
  128. */
  129. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  130. page = agpmem->memory->pages[offset];
  131. get_page(page);
  132. vmf->page = page;
  133. DRM_DEBUG
  134. ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
  135. (unsigned long long)baddr,
  136. agpmem->memory->pages[offset],
  137. (unsigned long long)offset,
  138. page_count(page));
  139. return 0;
  140. }
  141. vm_fault_error:
  142. return VM_FAULT_SIGBUS; /* Disallow mremap */
  143. }
  144. #else /* __OS_HAS_AGP */
  145. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  146. {
  147. return VM_FAULT_SIGBUS;
  148. }
  149. #endif /* __OS_HAS_AGP */
  150. /**
  151. * \c nopage method for shared virtual memory.
  152. *
  153. * \param vma virtual memory area.
  154. * \param address access address.
  155. * \return pointer to the page structure.
  156. *
  157. * Get the mapping, find the real physical page to map, get the page, and
  158. * return it.
  159. */
  160. static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  161. {
  162. struct drm_local_map *map = vma->vm_private_data;
  163. unsigned long offset;
  164. unsigned long i;
  165. struct page *page;
  166. if (!map)
  167. return VM_FAULT_SIGBUS; /* Nothing allocated */
  168. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  169. i = (unsigned long)map->handle + offset;
  170. page = vmalloc_to_page((void *)i);
  171. if (!page)
  172. return VM_FAULT_SIGBUS;
  173. get_page(page);
  174. vmf->page = page;
  175. DRM_DEBUG("shm_fault 0x%lx\n", offset);
  176. return 0;
  177. }
  178. /**
  179. * \c close method for shared virtual memory.
  180. *
  181. * \param vma virtual memory area.
  182. *
  183. * Deletes map information if we are the last
  184. * person to close a mapping and it's not in the global maplist.
  185. */
  186. static void drm_vm_shm_close(struct vm_area_struct *vma)
  187. {
  188. struct drm_file *priv = vma->vm_file->private_data;
  189. struct drm_device *dev = priv->minor->dev;
  190. struct drm_vma_entry *pt, *temp;
  191. struct drm_local_map *map;
  192. struct drm_map_list *r_list;
  193. int found_maps = 0;
  194. DRM_DEBUG("0x%08lx,0x%08lx\n",
  195. vma->vm_start, vma->vm_end - vma->vm_start);
  196. atomic_dec(&dev->vma_count);
  197. map = vma->vm_private_data;
  198. mutex_lock(&dev->struct_mutex);
  199. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  200. if (pt->vma->vm_private_data == map)
  201. found_maps++;
  202. if (pt->vma == vma) {
  203. list_del(&pt->head);
  204. kfree(pt);
  205. }
  206. }
  207. /* We were the only map that was found */
  208. if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
  209. /* Check to see if we are in the maplist, if we are not, then
  210. * we delete this mappings information.
  211. */
  212. found_maps = 0;
  213. list_for_each_entry(r_list, &dev->maplist, head) {
  214. if (r_list->map == map)
  215. found_maps++;
  216. }
  217. if (!found_maps) {
  218. drm_dma_handle_t dmah;
  219. switch (map->type) {
  220. case _DRM_REGISTERS:
  221. case _DRM_FRAME_BUFFER:
  222. if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
  223. int retcode;
  224. retcode = mtrr_del(map->mtrr,
  225. map->offset,
  226. map->size);
  227. DRM_DEBUG("mtrr_del = %d\n", retcode);
  228. }
  229. iounmap(map->handle);
  230. break;
  231. case _DRM_SHM:
  232. vfree(map->handle);
  233. break;
  234. case _DRM_AGP:
  235. case _DRM_SCATTER_GATHER:
  236. break;
  237. case _DRM_CONSISTENT:
  238. dmah.vaddr = map->handle;
  239. dmah.busaddr = map->offset;
  240. dmah.size = map->size;
  241. __drm_pci_free(dev, &dmah);
  242. break;
  243. case _DRM_GEM:
  244. DRM_ERROR("tried to rmmap GEM object\n");
  245. break;
  246. }
  247. kfree(map);
  248. }
  249. }
  250. mutex_unlock(&dev->struct_mutex);
  251. }
  252. /**
  253. * \c fault method for DMA virtual memory.
  254. *
  255. * \param vma virtual memory area.
  256. * \param address access address.
  257. * \return pointer to the page structure.
  258. *
  259. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  260. */
  261. static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  262. {
  263. struct drm_file *priv = vma->vm_file->private_data;
  264. struct drm_device *dev = priv->minor->dev;
  265. struct drm_device_dma *dma = dev->dma;
  266. unsigned long offset;
  267. unsigned long page_nr;
  268. struct page *page;
  269. if (!dma)
  270. return VM_FAULT_SIGBUS; /* Error */
  271. if (!dma->pagelist)
  272. return VM_FAULT_SIGBUS; /* Nothing allocated */
  273. offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  274. page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
  275. page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
  276. get_page(page);
  277. vmf->page = page;
  278. DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
  279. return 0;
  280. }
  281. /**
  282. * \c fault method for scatter-gather virtual memory.
  283. *
  284. * \param vma virtual memory area.
  285. * \param address access address.
  286. * \return pointer to the page structure.
  287. *
  288. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  289. */
  290. static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  291. {
  292. struct drm_local_map *map = vma->vm_private_data;
  293. struct drm_file *priv = vma->vm_file->private_data;
  294. struct drm_device *dev = priv->minor->dev;
  295. struct drm_sg_mem *entry = dev->sg;
  296. unsigned long offset;
  297. unsigned long map_offset;
  298. unsigned long page_offset;
  299. struct page *page;
  300. if (!entry)
  301. return VM_FAULT_SIGBUS; /* Error */
  302. if (!entry->pagelist)
  303. return VM_FAULT_SIGBUS; /* Nothing allocated */
  304. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  305. map_offset = map->offset - (unsigned long)dev->sg->virtual;
  306. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  307. page = entry->pagelist[page_offset];
  308. get_page(page);
  309. vmf->page = page;
  310. return 0;
  311. }
  312. static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  313. {
  314. return drm_do_vm_fault(vma, vmf);
  315. }
  316. static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  317. {
  318. return drm_do_vm_shm_fault(vma, vmf);
  319. }
  320. static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  321. {
  322. return drm_do_vm_dma_fault(vma, vmf);
  323. }
  324. static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  325. {
  326. return drm_do_vm_sg_fault(vma, vmf);
  327. }
  328. /** AGP virtual memory operations */
  329. static struct vm_operations_struct drm_vm_ops = {
  330. .fault = drm_vm_fault,
  331. .open = drm_vm_open,
  332. .close = drm_vm_close,
  333. };
  334. /** Shared virtual memory operations */
  335. static struct vm_operations_struct drm_vm_shm_ops = {
  336. .fault = drm_vm_shm_fault,
  337. .open = drm_vm_open,
  338. .close = drm_vm_shm_close,
  339. };
  340. /** DMA virtual memory operations */
  341. static struct vm_operations_struct drm_vm_dma_ops = {
  342. .fault = drm_vm_dma_fault,
  343. .open = drm_vm_open,
  344. .close = drm_vm_close,
  345. };
  346. /** Scatter-gather virtual memory operations */
  347. static struct vm_operations_struct drm_vm_sg_ops = {
  348. .fault = drm_vm_sg_fault,
  349. .open = drm_vm_open,
  350. .close = drm_vm_close,
  351. };
  352. /**
  353. * \c open method for shared virtual memory.
  354. *
  355. * \param vma virtual memory area.
  356. *
  357. * Create a new drm_vma_entry structure as the \p vma private data entry and
  358. * add it to drm_device::vmalist.
  359. */
  360. void drm_vm_open_locked(struct vm_area_struct *vma)
  361. {
  362. struct drm_file *priv = vma->vm_file->private_data;
  363. struct drm_device *dev = priv->minor->dev;
  364. struct drm_vma_entry *vma_entry;
  365. DRM_DEBUG("0x%08lx,0x%08lx\n",
  366. vma->vm_start, vma->vm_end - vma->vm_start);
  367. atomic_inc(&dev->vma_count);
  368. vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
  369. if (vma_entry) {
  370. vma_entry->vma = vma;
  371. vma_entry->pid = current->pid;
  372. list_add(&vma_entry->head, &dev->vmalist);
  373. }
  374. }
  375. static void drm_vm_open(struct vm_area_struct *vma)
  376. {
  377. struct drm_file *priv = vma->vm_file->private_data;
  378. struct drm_device *dev = priv->minor->dev;
  379. mutex_lock(&dev->struct_mutex);
  380. drm_vm_open_locked(vma);
  381. mutex_unlock(&dev->struct_mutex);
  382. }
  383. /**
  384. * \c close method for all virtual memory types.
  385. *
  386. * \param vma virtual memory area.
  387. *
  388. * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
  389. * free it.
  390. */
  391. static void drm_vm_close(struct vm_area_struct *vma)
  392. {
  393. struct drm_file *priv = vma->vm_file->private_data;
  394. struct drm_device *dev = priv->minor->dev;
  395. struct drm_vma_entry *pt, *temp;
  396. DRM_DEBUG("0x%08lx,0x%08lx\n",
  397. vma->vm_start, vma->vm_end - vma->vm_start);
  398. atomic_dec(&dev->vma_count);
  399. mutex_lock(&dev->struct_mutex);
  400. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  401. if (pt->vma == vma) {
  402. list_del(&pt->head);
  403. kfree(pt);
  404. break;
  405. }
  406. }
  407. mutex_unlock(&dev->struct_mutex);
  408. }
  409. /**
  410. * mmap DMA memory.
  411. *
  412. * \param file_priv DRM file private.
  413. * \param vma virtual memory area.
  414. * \return zero on success or a negative number on failure.
  415. *
  416. * Sets the virtual memory area operations structure to vm_dma_ops, the file
  417. * pointer, and calls vm_open().
  418. */
  419. static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  420. {
  421. struct drm_file *priv = filp->private_data;
  422. struct drm_device *dev;
  423. struct drm_device_dma *dma;
  424. unsigned long length = vma->vm_end - vma->vm_start;
  425. dev = priv->minor->dev;
  426. dma = dev->dma;
  427. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  428. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  429. /* Length must match exact page count */
  430. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  431. return -EINVAL;
  432. }
  433. if (!capable(CAP_SYS_ADMIN) &&
  434. (dma->flags & _DRM_DMA_USE_PCI_RO)) {
  435. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  436. #if defined(__i386__) || defined(__x86_64__)
  437. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  438. #else
  439. /* Ye gads this is ugly. With more thought
  440. we could move this up higher and use
  441. `protection_map' instead. */
  442. vma->vm_page_prot =
  443. __pgprot(pte_val
  444. (pte_wrprotect
  445. (__pte(pgprot_val(vma->vm_page_prot)))));
  446. #endif
  447. }
  448. vma->vm_ops = &drm_vm_dma_ops;
  449. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  450. vma->vm_flags |= VM_DONTEXPAND;
  451. vma->vm_file = filp; /* Needed for drm_vm_open() */
  452. drm_vm_open_locked(vma);
  453. return 0;
  454. }
  455. resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
  456. {
  457. return map->offset;
  458. }
  459. EXPORT_SYMBOL(drm_core_get_map_ofs);
  460. resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
  461. {
  462. #ifdef __alpha__
  463. return dev->hose->dense_mem_base - dev->hose->mem_space->start;
  464. #else
  465. return 0;
  466. #endif
  467. }
  468. EXPORT_SYMBOL(drm_core_get_reg_ofs);
  469. /**
  470. * mmap DMA memory.
  471. *
  472. * \param file_priv DRM file private.
  473. * \param vma virtual memory area.
  474. * \return zero on success or a negative number on failure.
  475. *
  476. * If the virtual memory area has no offset associated with it then it's a DMA
  477. * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
  478. * checks that the restricted flag is not set, sets the virtual memory operations
  479. * according to the mapping type and remaps the pages. Finally sets the file
  480. * pointer and calls vm_open().
  481. */
  482. int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  483. {
  484. struct drm_file *priv = filp->private_data;
  485. struct drm_device *dev = priv->minor->dev;
  486. struct drm_local_map *map = NULL;
  487. resource_size_t offset = 0;
  488. struct drm_hash_item *hash;
  489. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  490. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  491. if (!priv->authenticated)
  492. return -EACCES;
  493. /* We check for "dma". On Apple's UniNorth, it's valid to have
  494. * the AGP mapped at physical address 0
  495. * --BenH.
  496. */
  497. if (!vma->vm_pgoff
  498. #if __OS_HAS_AGP
  499. && (!dev->agp
  500. || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
  501. #endif
  502. )
  503. return drm_mmap_dma(filp, vma);
  504. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
  505. DRM_ERROR("Could not find map\n");
  506. return -EINVAL;
  507. }
  508. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  509. if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  510. return -EPERM;
  511. /* Check for valid size. */
  512. if (map->size < vma->vm_end - vma->vm_start)
  513. return -EINVAL;
  514. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  515. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  516. #if defined(__i386__) || defined(__x86_64__)
  517. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  518. #else
  519. /* Ye gads this is ugly. With more thought
  520. we could move this up higher and use
  521. `protection_map' instead. */
  522. vma->vm_page_prot =
  523. __pgprot(pte_val
  524. (pte_wrprotect
  525. (__pte(pgprot_val(vma->vm_page_prot)))));
  526. #endif
  527. }
  528. switch (map->type) {
  529. case _DRM_AGP:
  530. if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
  531. /*
  532. * On some platforms we can't talk to bus dma address from the CPU, so for
  533. * memory of type DRM_AGP, we'll deal with sorting out the real physical
  534. * pages and mappings in fault()
  535. */
  536. #if defined(__powerpc__)
  537. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
  538. #endif
  539. vma->vm_ops = &drm_vm_ops;
  540. break;
  541. }
  542. /* fall through to _DRM_FRAME_BUFFER... */
  543. case _DRM_FRAME_BUFFER:
  544. case _DRM_REGISTERS:
  545. offset = dev->driver->get_reg_ofs(dev);
  546. vma->vm_flags |= VM_IO; /* not in core dump */
  547. vma->vm_page_prot = drm_io_prot(map->type, vma);
  548. if (io_remap_pfn_range(vma, vma->vm_start,
  549. (map->offset + offset) >> PAGE_SHIFT,
  550. vma->vm_end - vma->vm_start,
  551. vma->vm_page_prot))
  552. return -EAGAIN;
  553. DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
  554. " offset = 0x%llx\n",
  555. map->type,
  556. vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
  557. vma->vm_ops = &drm_vm_ops;
  558. break;
  559. case _DRM_CONSISTENT:
  560. /* Consistent memory is really like shared memory. But
  561. * it's allocated in a different way, so avoid fault */
  562. if (remap_pfn_range(vma, vma->vm_start,
  563. page_to_pfn(virt_to_page(map->handle)),
  564. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  565. return -EAGAIN;
  566. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  567. /* fall through to _DRM_SHM */
  568. case _DRM_SHM:
  569. vma->vm_ops = &drm_vm_shm_ops;
  570. vma->vm_private_data = (void *)map;
  571. /* Don't let this area swap. Change when
  572. DRM_KERNEL advisory is supported. */
  573. vma->vm_flags |= VM_RESERVED;
  574. break;
  575. case _DRM_SCATTER_GATHER:
  576. vma->vm_ops = &drm_vm_sg_ops;
  577. vma->vm_private_data = (void *)map;
  578. vma->vm_flags |= VM_RESERVED;
  579. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  580. break;
  581. default:
  582. return -EINVAL; /* This should never happen. */
  583. }
  584. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  585. vma->vm_flags |= VM_DONTEXPAND;
  586. vma->vm_file = filp; /* Needed for drm_vm_open() */
  587. drm_vm_open_locked(vma);
  588. return 0;
  589. }
  590. int drm_mmap(struct file *filp, struct vm_area_struct *vma)
  591. {
  592. struct drm_file *priv = filp->private_data;
  593. struct drm_device *dev = priv->minor->dev;
  594. int ret;
  595. mutex_lock(&dev->struct_mutex);
  596. ret = drm_mmap_locked(filp, vma);
  597. mutex_unlock(&dev->struct_mutex);
  598. return ret;
  599. }
  600. EXPORT_SYMBOL(drm_mmap);