radeon_gart.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/radeon_drm.h>
  30. #include "radeon.h"
  31. #include "radeon_reg.h"
  32. #include "radeon_trace.h"
  33. /*
  34. * GART
  35. * The GART (Graphics Aperture Remapping Table) is an aperture
  36. * in the GPU's address space. System pages can be mapped into
  37. * the aperture and look like contiguous pages from the GPU's
  38. * perspective. A page table maps the pages in the aperture
  39. * to the actual backing pages in system memory.
  40. *
  41. * Radeon GPUs support both an internal GART, as described above,
  42. * and AGP. AGP works similarly, but the GART table is configured
  43. * and maintained by the northbridge rather than the driver.
  44. * Radeon hw has a separate AGP aperture that is programmed to
  45. * point to the AGP aperture provided by the northbridge and the
  46. * requests are passed through to the northbridge aperture.
  47. * Both AGP and internal GART can be used at the same time, however
  48. * that is not currently supported by the driver.
  49. *
  50. * This file handles the common internal GART management.
  51. */
  52. /*
  53. * Common GART table functions.
  54. */
  55. /**
  56. * radeon_gart_table_ram_alloc - allocate system ram for gart page table
  57. *
  58. * @rdev: radeon_device pointer
  59. *
  60. * Allocate system memory for GART page table
  61. * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
  62. * gart table to be in system memory.
  63. * Returns 0 for success, -ENOMEM for failure.
  64. */
  65. int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
  66. {
  67. void *ptr;
  68. ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
  69. &rdev->gart.table_addr);
  70. if (ptr == NULL) {
  71. return -ENOMEM;
  72. }
  73. #ifdef CONFIG_X86
  74. if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
  75. rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
  76. set_memory_uc((unsigned long)ptr,
  77. rdev->gart.table_size >> PAGE_SHIFT);
  78. }
  79. #endif
  80. rdev->gart.ptr = ptr;
  81. memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
  82. return 0;
  83. }
  84. /**
  85. * radeon_gart_table_ram_free - free system ram for gart page table
  86. *
  87. * @rdev: radeon_device pointer
  88. *
  89. * Free system memory for GART page table
  90. * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
  91. * gart table to be in system memory.
  92. */
  93. void radeon_gart_table_ram_free(struct radeon_device *rdev)
  94. {
  95. if (rdev->gart.ptr == NULL) {
  96. return;
  97. }
  98. #ifdef CONFIG_X86
  99. if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
  100. rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
  101. set_memory_wb((unsigned long)rdev->gart.ptr,
  102. rdev->gart.table_size >> PAGE_SHIFT);
  103. }
  104. #endif
  105. pci_free_consistent(rdev->pdev, rdev->gart.table_size,
  106. (void *)rdev->gart.ptr,
  107. rdev->gart.table_addr);
  108. rdev->gart.ptr = NULL;
  109. rdev->gart.table_addr = 0;
  110. }
  111. /**
  112. * radeon_gart_table_vram_alloc - allocate vram for gart page table
  113. *
  114. * @rdev: radeon_device pointer
  115. *
  116. * Allocate video memory for GART page table
  117. * (pcie r4xx, r5xx+). These asics require the
  118. * gart table to be in video memory.
  119. * Returns 0 for success, error for failure.
  120. */
  121. int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
  122. {
  123. int r;
  124. if (rdev->gart.robj == NULL) {
  125. r = radeon_bo_create(rdev, rdev->gart.table_size,
  126. PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
  127. NULL, &rdev->gart.robj);
  128. if (r) {
  129. return r;
  130. }
  131. }
  132. return 0;
  133. }
  134. /**
  135. * radeon_gart_table_vram_pin - pin gart page table in vram
  136. *
  137. * @rdev: radeon_device pointer
  138. *
  139. * Pin the GART page table in vram so it will not be moved
  140. * by the memory manager (pcie r4xx, r5xx+). These asics require the
  141. * gart table to be in video memory.
  142. * Returns 0 for success, error for failure.
  143. */
  144. int radeon_gart_table_vram_pin(struct radeon_device *rdev)
  145. {
  146. uint64_t gpu_addr;
  147. int r;
  148. r = radeon_bo_reserve(rdev->gart.robj, false);
  149. if (unlikely(r != 0))
  150. return r;
  151. r = radeon_bo_pin(rdev->gart.robj,
  152. RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
  153. if (r) {
  154. radeon_bo_unreserve(rdev->gart.robj);
  155. return r;
  156. }
  157. r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
  158. if (r)
  159. radeon_bo_unpin(rdev->gart.robj);
  160. radeon_bo_unreserve(rdev->gart.robj);
  161. rdev->gart.table_addr = gpu_addr;
  162. return r;
  163. }
  164. /**
  165. * radeon_gart_table_vram_unpin - unpin gart page table in vram
  166. *
  167. * @rdev: radeon_device pointer
  168. *
  169. * Unpin the GART page table in vram (pcie r4xx, r5xx+).
  170. * These asics require the gart table to be in video memory.
  171. */
  172. void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
  173. {
  174. int r;
  175. if (rdev->gart.robj == NULL) {
  176. return;
  177. }
  178. r = radeon_bo_reserve(rdev->gart.robj, false);
  179. if (likely(r == 0)) {
  180. radeon_bo_kunmap(rdev->gart.robj);
  181. radeon_bo_unpin(rdev->gart.robj);
  182. radeon_bo_unreserve(rdev->gart.robj);
  183. rdev->gart.ptr = NULL;
  184. }
  185. }
  186. /**
  187. * radeon_gart_table_vram_free - free gart page table vram
  188. *
  189. * @rdev: radeon_device pointer
  190. *
  191. * Free the video memory used for the GART page table
  192. * (pcie r4xx, r5xx+). These asics require the gart table to
  193. * be in video memory.
  194. */
  195. void radeon_gart_table_vram_free(struct radeon_device *rdev)
  196. {
  197. if (rdev->gart.robj == NULL) {
  198. return;
  199. }
  200. radeon_bo_unref(&rdev->gart.robj);
  201. }
  202. /*
  203. * Common gart functions.
  204. */
  205. /**
  206. * radeon_gart_unbind - unbind pages from the gart page table
  207. *
  208. * @rdev: radeon_device pointer
  209. * @offset: offset into the GPU's gart aperture
  210. * @pages: number of pages to unbind
  211. *
  212. * Unbinds the requested pages from the gart page table and
  213. * replaces them with the dummy page (all asics).
  214. */
  215. void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
  216. int pages)
  217. {
  218. unsigned t;
  219. unsigned p;
  220. int i, j;
  221. u64 page_base;
  222. if (!rdev->gart.ready) {
  223. WARN(1, "trying to unbind memory from uninitialized GART !\n");
  224. return;
  225. }
  226. t = offset / RADEON_GPU_PAGE_SIZE;
  227. p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
  228. for (i = 0; i < pages; i++, p++) {
  229. if (rdev->gart.pages[p]) {
  230. rdev->gart.pages[p] = NULL;
  231. rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
  232. page_base = rdev->gart.pages_addr[p];
  233. for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  234. if (rdev->gart.ptr) {
  235. radeon_gart_set_page(rdev, t, page_base);
  236. }
  237. page_base += RADEON_GPU_PAGE_SIZE;
  238. }
  239. }
  240. }
  241. mb();
  242. radeon_gart_tlb_flush(rdev);
  243. }
  244. /**
  245. * radeon_gart_bind - bind pages into the gart page table
  246. *
  247. * @rdev: radeon_device pointer
  248. * @offset: offset into the GPU's gart aperture
  249. * @pages: number of pages to bind
  250. * @pagelist: pages to bind
  251. * @dma_addr: DMA addresses of pages
  252. *
  253. * Binds the requested pages to the gart page table
  254. * (all asics).
  255. * Returns 0 for success, -EINVAL for failure.
  256. */
  257. int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
  258. int pages, struct page **pagelist, dma_addr_t *dma_addr)
  259. {
  260. unsigned t;
  261. unsigned p;
  262. uint64_t page_base;
  263. int i, j;
  264. if (!rdev->gart.ready) {
  265. WARN(1, "trying to bind memory to uninitialized GART !\n");
  266. return -EINVAL;
  267. }
  268. t = offset / RADEON_GPU_PAGE_SIZE;
  269. p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
  270. for (i = 0; i < pages; i++, p++) {
  271. rdev->gart.pages_addr[p] = dma_addr[i];
  272. rdev->gart.pages[p] = pagelist[i];
  273. if (rdev->gart.ptr) {
  274. page_base = rdev->gart.pages_addr[p];
  275. for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  276. radeon_gart_set_page(rdev, t, page_base);
  277. page_base += RADEON_GPU_PAGE_SIZE;
  278. }
  279. }
  280. }
  281. mb();
  282. radeon_gart_tlb_flush(rdev);
  283. return 0;
  284. }
  285. /**
  286. * radeon_gart_restore - bind all pages in the gart page table
  287. *
  288. * @rdev: radeon_device pointer
  289. *
  290. * Binds all pages in the gart page table (all asics).
  291. * Used to rebuild the gart table on device startup or resume.
  292. */
  293. void radeon_gart_restore(struct radeon_device *rdev)
  294. {
  295. int i, j, t;
  296. u64 page_base;
  297. if (!rdev->gart.ptr) {
  298. return;
  299. }
  300. for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
  301. page_base = rdev->gart.pages_addr[i];
  302. for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  303. radeon_gart_set_page(rdev, t, page_base);
  304. page_base += RADEON_GPU_PAGE_SIZE;
  305. }
  306. }
  307. mb();
  308. radeon_gart_tlb_flush(rdev);
  309. }
  310. /**
  311. * radeon_gart_init - init the driver info for managing the gart
  312. *
  313. * @rdev: radeon_device pointer
  314. *
  315. * Allocate the dummy page and init the gart driver info (all asics).
  316. * Returns 0 for success, error for failure.
  317. */
  318. int radeon_gart_init(struct radeon_device *rdev)
  319. {
  320. int r, i;
  321. if (rdev->gart.pages) {
  322. return 0;
  323. }
  324. /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
  325. if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
  326. DRM_ERROR("Page size is smaller than GPU page size!\n");
  327. return -EINVAL;
  328. }
  329. r = radeon_dummy_page_init(rdev);
  330. if (r)
  331. return r;
  332. /* Compute table size */
  333. rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
  334. rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
  335. DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
  336. rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
  337. /* Allocate pages table */
  338. rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
  339. if (rdev->gart.pages == NULL) {
  340. radeon_gart_fini(rdev);
  341. return -ENOMEM;
  342. }
  343. rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
  344. rdev->gart.num_cpu_pages);
  345. if (rdev->gart.pages_addr == NULL) {
  346. radeon_gart_fini(rdev);
  347. return -ENOMEM;
  348. }
  349. /* set GART entry to point to the dummy page by default */
  350. for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
  351. rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
  352. }
  353. return 0;
  354. }
  355. /**
  356. * radeon_gart_fini - tear down the driver info for managing the gart
  357. *
  358. * @rdev: radeon_device pointer
  359. *
  360. * Tear down the gart driver info and free the dummy page (all asics).
  361. */
  362. void radeon_gart_fini(struct radeon_device *rdev)
  363. {
  364. if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
  365. /* unbind pages */
  366. radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
  367. }
  368. rdev->gart.ready = false;
  369. vfree(rdev->gart.pages);
  370. vfree(rdev->gart.pages_addr);
  371. rdev->gart.pages = NULL;
  372. rdev->gart.pages_addr = NULL;
  373. radeon_dummy_page_fini(rdev);
  374. }
  375. /*
  376. * GPUVM
  377. * GPUVM is similar to the legacy gart on older asics, however
  378. * rather than there being a single global gart table
  379. * for the entire GPU, there are multiple VM page tables active
  380. * at any given time. The VM page tables can contain a mix
  381. * vram pages and system memory pages and system memory pages
  382. * can be mapped as snooped (cached system pages) or unsnooped
  383. * (uncached system pages).
  384. * Each VM has an ID associated with it and there is a page table
  385. * associated with each VMID. When execting a command buffer,
  386. * the kernel tells the the ring what VMID to use for that command
  387. * buffer. VMIDs are allocated dynamically as commands are submitted.
  388. * The userspace drivers maintain their own address space and the kernel
  389. * sets up their pages tables accordingly when they submit their
  390. * command buffers and a VMID is assigned.
  391. * Cayman/Trinity support up to 8 active VMs at any given time;
  392. * SI supports 16.
  393. */
  394. /*
  395. * vm helpers
  396. *
  397. * TODO bind a default page at vm initialization for default address
  398. */
  399. /**
  400. * radeon_vm_num_pde - return the number of page directory entries
  401. *
  402. * @rdev: radeon_device pointer
  403. *
  404. * Calculate the number of page directory entries (cayman+).
  405. */
  406. static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
  407. {
  408. return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
  409. }
  410. /**
  411. * radeon_vm_directory_size - returns the size of the page directory in bytes
  412. *
  413. * @rdev: radeon_device pointer
  414. *
  415. * Calculate the size of the page directory in bytes (cayman+).
  416. */
  417. static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
  418. {
  419. return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
  420. }
  421. /**
  422. * radeon_vm_manager_init - init the vm manager
  423. *
  424. * @rdev: radeon_device pointer
  425. *
  426. * Init the vm manager (cayman+).
  427. * Returns 0 for success, error for failure.
  428. */
  429. int radeon_vm_manager_init(struct radeon_device *rdev)
  430. {
  431. struct radeon_vm *vm;
  432. struct radeon_bo_va *bo_va;
  433. int r;
  434. unsigned size;
  435. if (!rdev->vm_manager.enabled) {
  436. /* allocate enough for 2 full VM pts */
  437. size = radeon_vm_directory_size(rdev);
  438. size += rdev->vm_manager.max_pfn * 8;
  439. size *= 2;
  440. r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
  441. RADEON_GPU_PAGE_ALIGN(size),
  442. RADEON_VM_PTB_ALIGN_SIZE,
  443. RADEON_GEM_DOMAIN_VRAM);
  444. if (r) {
  445. dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
  446. (rdev->vm_manager.max_pfn * 8) >> 10);
  447. return r;
  448. }
  449. r = radeon_asic_vm_init(rdev);
  450. if (r)
  451. return r;
  452. rdev->vm_manager.enabled = true;
  453. r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
  454. if (r)
  455. return r;
  456. }
  457. /* restore page table */
  458. list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
  459. if (vm->page_directory == NULL)
  460. continue;
  461. list_for_each_entry(bo_va, &vm->va, vm_list) {
  462. bo_va->valid = false;
  463. }
  464. }
  465. return 0;
  466. }
  467. /**
  468. * radeon_vm_free_pt - free the page table for a specific vm
  469. *
  470. * @rdev: radeon_device pointer
  471. * @vm: vm to unbind
  472. *
  473. * Free the page table of a specific vm (cayman+).
  474. *
  475. * Global and local mutex must be lock!
  476. */
  477. static void radeon_vm_free_pt(struct radeon_device *rdev,
  478. struct radeon_vm *vm)
  479. {
  480. struct radeon_bo_va *bo_va;
  481. int i;
  482. if (!vm->page_directory)
  483. return;
  484. list_del_init(&vm->list);
  485. radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
  486. list_for_each_entry(bo_va, &vm->va, vm_list) {
  487. bo_va->valid = false;
  488. }
  489. if (vm->page_tables == NULL)
  490. return;
  491. for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
  492. radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
  493. kfree(vm->page_tables);
  494. }
  495. /**
  496. * radeon_vm_manager_fini - tear down the vm manager
  497. *
  498. * @rdev: radeon_device pointer
  499. *
  500. * Tear down the VM manager (cayman+).
  501. */
  502. void radeon_vm_manager_fini(struct radeon_device *rdev)
  503. {
  504. struct radeon_vm *vm, *tmp;
  505. int i;
  506. if (!rdev->vm_manager.enabled)
  507. return;
  508. mutex_lock(&rdev->vm_manager.lock);
  509. /* free all allocated page tables */
  510. list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
  511. mutex_lock(&vm->mutex);
  512. radeon_vm_free_pt(rdev, vm);
  513. mutex_unlock(&vm->mutex);
  514. }
  515. for (i = 0; i < RADEON_NUM_VM; ++i) {
  516. radeon_fence_unref(&rdev->vm_manager.active[i]);
  517. }
  518. radeon_asic_vm_fini(rdev);
  519. mutex_unlock(&rdev->vm_manager.lock);
  520. radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
  521. radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
  522. rdev->vm_manager.enabled = false;
  523. }
  524. /**
  525. * radeon_vm_evict - evict page table to make room for new one
  526. *
  527. * @rdev: radeon_device pointer
  528. * @vm: VM we want to allocate something for
  529. *
  530. * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
  531. * Returns 0 for success, -ENOMEM for failure.
  532. *
  533. * Global and local mutex must be locked!
  534. */
  535. static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
  536. {
  537. struct radeon_vm *vm_evict;
  538. if (list_empty(&rdev->vm_manager.lru_vm))
  539. return -ENOMEM;
  540. vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
  541. struct radeon_vm, list);
  542. if (vm_evict == vm)
  543. return -ENOMEM;
  544. mutex_lock(&vm_evict->mutex);
  545. radeon_vm_free_pt(rdev, vm_evict);
  546. mutex_unlock(&vm_evict->mutex);
  547. return 0;
  548. }
  549. /**
  550. * radeon_vm_alloc_pt - allocates a page table for a VM
  551. *
  552. * @rdev: radeon_device pointer
  553. * @vm: vm to bind
  554. *
  555. * Allocate a page table for the requested vm (cayman+).
  556. * Returns 0 for success, error for failure.
  557. *
  558. * Global and local mutex must be locked!
  559. */
  560. int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
  561. {
  562. unsigned pd_size, pd_entries, pts_size;
  563. struct radeon_ib ib;
  564. int r;
  565. if (vm == NULL) {
  566. return -EINVAL;
  567. }
  568. if (vm->page_directory != NULL) {
  569. return 0;
  570. }
  571. pd_size = radeon_vm_directory_size(rdev);
  572. pd_entries = radeon_vm_num_pdes(rdev);
  573. retry:
  574. r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
  575. &vm->page_directory, pd_size,
  576. RADEON_VM_PTB_ALIGN_SIZE, false);
  577. if (r == -ENOMEM) {
  578. r = radeon_vm_evict(rdev, vm);
  579. if (r)
  580. return r;
  581. goto retry;
  582. } else if (r) {
  583. return r;
  584. }
  585. vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
  586. /* Initially clear the page directory */
  587. r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
  588. NULL, pd_entries * 2 + 64);
  589. if (r) {
  590. radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
  591. return r;
  592. }
  593. ib.length_dw = 0;
  594. radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
  595. 0, pd_entries, 0, 0);
  596. radeon_semaphore_sync_to(ib.semaphore, vm->fence);
  597. r = radeon_ib_schedule(rdev, &ib, NULL);
  598. if (r) {
  599. radeon_ib_free(rdev, &ib);
  600. radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
  601. return r;
  602. }
  603. radeon_fence_unref(&vm->fence);
  604. vm->fence = radeon_fence_ref(ib.fence);
  605. radeon_ib_free(rdev, &ib);
  606. radeon_fence_unref(&vm->last_flush);
  607. /* allocate page table array */
  608. pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
  609. vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
  610. if (vm->page_tables == NULL) {
  611. DRM_ERROR("Cannot allocate memory for page table array\n");
  612. radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
  613. return -ENOMEM;
  614. }
  615. return 0;
  616. }
  617. /**
  618. * radeon_vm_add_to_lru - add VMs page table to LRU list
  619. *
  620. * @rdev: radeon_device pointer
  621. * @vm: vm to add to LRU
  622. *
  623. * Add the allocated page table to the LRU list (cayman+).
  624. *
  625. * Global mutex must be locked!
  626. */
  627. void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
  628. {
  629. list_del_init(&vm->list);
  630. list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
  631. }
  632. /**
  633. * radeon_vm_grab_id - allocate the next free VMID
  634. *
  635. * @rdev: radeon_device pointer
  636. * @vm: vm to allocate id for
  637. * @ring: ring we want to submit job to
  638. *
  639. * Allocate an id for the vm (cayman+).
  640. * Returns the fence we need to sync to (if any).
  641. *
  642. * Global and local mutex must be locked!
  643. */
  644. struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
  645. struct radeon_vm *vm, int ring)
  646. {
  647. struct radeon_fence *best[RADEON_NUM_RINGS] = {};
  648. unsigned choices[2] = {};
  649. unsigned i;
  650. /* check if the id is still valid */
  651. if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
  652. return NULL;
  653. /* we definately need to flush */
  654. radeon_fence_unref(&vm->last_flush);
  655. /* skip over VMID 0, since it is the system VM */
  656. for (i = 1; i < rdev->vm_manager.nvm; ++i) {
  657. struct radeon_fence *fence = rdev->vm_manager.active[i];
  658. if (fence == NULL) {
  659. /* found a free one */
  660. vm->id = i;
  661. return NULL;
  662. }
  663. if (radeon_fence_is_earlier(fence, best[fence->ring])) {
  664. best[fence->ring] = fence;
  665. choices[fence->ring == ring ? 0 : 1] = i;
  666. }
  667. }
  668. for (i = 0; i < 2; ++i) {
  669. if (choices[i]) {
  670. vm->id = choices[i];
  671. trace_radeon_vm_grab_id(vm->id, ring);
  672. return rdev->vm_manager.active[choices[i]];
  673. }
  674. }
  675. /* should never happen */
  676. BUG();
  677. return NULL;
  678. }
  679. /**
  680. * radeon_vm_fence - remember fence for vm
  681. *
  682. * @rdev: radeon_device pointer
  683. * @vm: vm we want to fence
  684. * @fence: fence to remember
  685. *
  686. * Fence the vm (cayman+).
  687. * Set the fence used to protect page table and id.
  688. *
  689. * Global and local mutex must be locked!
  690. */
  691. void radeon_vm_fence(struct radeon_device *rdev,
  692. struct radeon_vm *vm,
  693. struct radeon_fence *fence)
  694. {
  695. radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
  696. rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
  697. radeon_fence_unref(&vm->fence);
  698. vm->fence = radeon_fence_ref(fence);
  699. }
  700. /**
  701. * radeon_vm_bo_find - find the bo_va for a specific vm & bo
  702. *
  703. * @vm: requested vm
  704. * @bo: requested buffer object
  705. *
  706. * Find @bo inside the requested vm (cayman+).
  707. * Search inside the @bos vm list for the requested vm
  708. * Returns the found bo_va or NULL if none is found
  709. *
  710. * Object has to be reserved!
  711. */
  712. struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
  713. struct radeon_bo *bo)
  714. {
  715. struct radeon_bo_va *bo_va;
  716. list_for_each_entry(bo_va, &bo->va, bo_list) {
  717. if (bo_va->vm == vm) {
  718. return bo_va;
  719. }
  720. }
  721. return NULL;
  722. }
  723. /**
  724. * radeon_vm_bo_add - add a bo to a specific vm
  725. *
  726. * @rdev: radeon_device pointer
  727. * @vm: requested vm
  728. * @bo: radeon buffer object
  729. *
  730. * Add @bo into the requested vm (cayman+).
  731. * Add @bo to the list of bos associated with the vm
  732. * Returns newly added bo_va or NULL for failure
  733. *
  734. * Object has to be reserved!
  735. */
  736. struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
  737. struct radeon_vm *vm,
  738. struct radeon_bo *bo)
  739. {
  740. struct radeon_bo_va *bo_va;
  741. bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
  742. if (bo_va == NULL) {
  743. return NULL;
  744. }
  745. bo_va->vm = vm;
  746. bo_va->bo = bo;
  747. bo_va->soffset = 0;
  748. bo_va->eoffset = 0;
  749. bo_va->flags = 0;
  750. bo_va->valid = false;
  751. bo_va->ref_count = 1;
  752. INIT_LIST_HEAD(&bo_va->bo_list);
  753. INIT_LIST_HEAD(&bo_va->vm_list);
  754. mutex_lock(&vm->mutex);
  755. list_add(&bo_va->vm_list, &vm->va);
  756. list_add_tail(&bo_va->bo_list, &bo->va);
  757. mutex_unlock(&vm->mutex);
  758. return bo_va;
  759. }
  760. /**
  761. * radeon_vm_bo_set_addr - set bos virtual address inside a vm
  762. *
  763. * @rdev: radeon_device pointer
  764. * @bo_va: bo_va to store the address
  765. * @soffset: requested offset of the buffer in the VM address space
  766. * @flags: attributes of pages (read/write/valid/etc.)
  767. *
  768. * Set offset of @bo_va (cayman+).
  769. * Validate and set the offset requested within the vm address space.
  770. * Returns 0 for success, error for failure.
  771. *
  772. * Object has to be reserved!
  773. */
  774. int radeon_vm_bo_set_addr(struct radeon_device *rdev,
  775. struct radeon_bo_va *bo_va,
  776. uint64_t soffset,
  777. uint32_t flags)
  778. {
  779. uint64_t size = radeon_bo_size(bo_va->bo);
  780. uint64_t eoffset, last_offset = 0;
  781. struct radeon_vm *vm = bo_va->vm;
  782. struct radeon_bo_va *tmp;
  783. struct list_head *head;
  784. unsigned last_pfn;
  785. if (soffset) {
  786. /* make sure object fit at this offset */
  787. eoffset = soffset + size;
  788. if (soffset >= eoffset) {
  789. return -EINVAL;
  790. }
  791. last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
  792. if (last_pfn > rdev->vm_manager.max_pfn) {
  793. dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
  794. last_pfn, rdev->vm_manager.max_pfn);
  795. return -EINVAL;
  796. }
  797. } else {
  798. eoffset = last_pfn = 0;
  799. }
  800. mutex_lock(&vm->mutex);
  801. head = &vm->va;
  802. last_offset = 0;
  803. list_for_each_entry(tmp, &vm->va, vm_list) {
  804. if (bo_va == tmp) {
  805. /* skip over currently modified bo */
  806. continue;
  807. }
  808. if (soffset >= last_offset && eoffset <= tmp->soffset) {
  809. /* bo can be added before this one */
  810. break;
  811. }
  812. if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
  813. /* bo and tmp overlap, invalid offset */
  814. dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
  815. bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
  816. (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
  817. mutex_unlock(&vm->mutex);
  818. return -EINVAL;
  819. }
  820. last_offset = tmp->eoffset;
  821. head = &tmp->vm_list;
  822. }
  823. bo_va->soffset = soffset;
  824. bo_va->eoffset = eoffset;
  825. bo_va->flags = flags;
  826. bo_va->valid = false;
  827. list_move(&bo_va->vm_list, head);
  828. mutex_unlock(&vm->mutex);
  829. return 0;
  830. }
  831. /**
  832. * radeon_vm_map_gart - get the physical address of a gart page
  833. *
  834. * @rdev: radeon_device pointer
  835. * @addr: the unmapped addr
  836. *
  837. * Look up the physical address of the page that the pte resolves
  838. * to (cayman+).
  839. * Returns the physical address of the page.
  840. */
  841. uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
  842. {
  843. uint64_t result;
  844. /* page table offset */
  845. result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
  846. /* in case cpu page size != gpu page size*/
  847. result |= addr & (~PAGE_MASK);
  848. return result;
  849. }
  850. /**
  851. * radeon_vm_page_flags - translate page flags to what the hw uses
  852. *
  853. * @flags: flags comming from userspace
  854. *
  855. * Translate the flags the userspace ABI uses to hw flags.
  856. */
  857. static uint32_t radeon_vm_page_flags(uint32_t flags)
  858. {
  859. uint32_t hw_flags = 0;
  860. hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
  861. hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
  862. hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
  863. if (flags & RADEON_VM_PAGE_SYSTEM) {
  864. hw_flags |= R600_PTE_SYSTEM;
  865. hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
  866. }
  867. return hw_flags;
  868. }
  869. /**
  870. * radeon_vm_update_pdes - make sure that page directory is valid
  871. *
  872. * @rdev: radeon_device pointer
  873. * @vm: requested vm
  874. * @start: start of GPU address range
  875. * @end: end of GPU address range
  876. *
  877. * Allocates new page tables if necessary
  878. * and updates the page directory (cayman+).
  879. * Returns 0 for success, error for failure.
  880. *
  881. * Global and local mutex must be locked!
  882. */
  883. static int radeon_vm_update_pdes(struct radeon_device *rdev,
  884. struct radeon_vm *vm,
  885. struct radeon_ib *ib,
  886. uint64_t start, uint64_t end)
  887. {
  888. static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
  889. uint64_t last_pde = ~0, last_pt = ~0;
  890. unsigned count = 0;
  891. uint64_t pt_idx;
  892. int r;
  893. start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
  894. end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
  895. /* walk over the address space and update the page directory */
  896. for (pt_idx = start; pt_idx <= end; ++pt_idx) {
  897. uint64_t pde, pt;
  898. if (vm->page_tables[pt_idx])
  899. continue;
  900. retry:
  901. r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
  902. &vm->page_tables[pt_idx],
  903. RADEON_VM_PTE_COUNT * 8,
  904. RADEON_GPU_PAGE_SIZE, false);
  905. if (r == -ENOMEM) {
  906. r = radeon_vm_evict(rdev, vm);
  907. if (r)
  908. return r;
  909. goto retry;
  910. } else if (r) {
  911. return r;
  912. }
  913. pde = vm->pd_gpu_addr + pt_idx * 8;
  914. pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
  915. if (((last_pde + 8 * count) != pde) ||
  916. ((last_pt + incr * count) != pt)) {
  917. if (count) {
  918. radeon_asic_vm_set_page(rdev, ib, last_pde,
  919. last_pt, count, incr,
  920. R600_PTE_VALID);
  921. count *= RADEON_VM_PTE_COUNT;
  922. radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
  923. count, 0, 0);
  924. }
  925. count = 1;
  926. last_pde = pde;
  927. last_pt = pt;
  928. } else {
  929. ++count;
  930. }
  931. }
  932. if (count) {
  933. radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
  934. incr, R600_PTE_VALID);
  935. count *= RADEON_VM_PTE_COUNT;
  936. radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
  937. count, 0, 0);
  938. }
  939. return 0;
  940. }
  941. /**
  942. * radeon_vm_update_ptes - make sure that page tables are valid
  943. *
  944. * @rdev: radeon_device pointer
  945. * @vm: requested vm
  946. * @start: start of GPU address range
  947. * @end: end of GPU address range
  948. * @dst: destination address to map to
  949. * @flags: mapping flags
  950. *
  951. * Update the page tables in the range @start - @end (cayman+).
  952. *
  953. * Global and local mutex must be locked!
  954. */
  955. static void radeon_vm_update_ptes(struct radeon_device *rdev,
  956. struct radeon_vm *vm,
  957. struct radeon_ib *ib,
  958. uint64_t start, uint64_t end,
  959. uint64_t dst, uint32_t flags)
  960. {
  961. static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
  962. uint64_t last_pte = ~0, last_dst = ~0;
  963. unsigned count = 0;
  964. uint64_t addr;
  965. start = start / RADEON_GPU_PAGE_SIZE;
  966. end = end / RADEON_GPU_PAGE_SIZE;
  967. /* walk over the address space and update the page tables */
  968. for (addr = start; addr < end; ) {
  969. uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
  970. unsigned nptes;
  971. uint64_t pte;
  972. if ((addr & ~mask) == (end & ~mask))
  973. nptes = end - addr;
  974. else
  975. nptes = RADEON_VM_PTE_COUNT - (addr & mask);
  976. pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
  977. pte += (addr & mask) * 8;
  978. if ((last_pte + 8 * count) != pte) {
  979. if (count) {
  980. radeon_asic_vm_set_page(rdev, ib, last_pte,
  981. last_dst, count,
  982. RADEON_GPU_PAGE_SIZE,
  983. flags);
  984. }
  985. count = nptes;
  986. last_pte = pte;
  987. last_dst = dst;
  988. } else {
  989. count += nptes;
  990. }
  991. addr += nptes;
  992. dst += nptes * RADEON_GPU_PAGE_SIZE;
  993. }
  994. if (count) {
  995. radeon_asic_vm_set_page(rdev, ib, last_pte,
  996. last_dst, count,
  997. RADEON_GPU_PAGE_SIZE, flags);
  998. }
  999. }
  1000. /**
  1001. * radeon_vm_bo_update_pte - map a bo into the vm page table
  1002. *
  1003. * @rdev: radeon_device pointer
  1004. * @vm: requested vm
  1005. * @bo: radeon buffer object
  1006. * @mem: ttm mem
  1007. *
  1008. * Fill in the page table entries for @bo (cayman+).
  1009. * Returns 0 for success, -EINVAL for failure.
  1010. *
  1011. * Object have to be reserved & global and local mutex must be locked!
  1012. */
  1013. int radeon_vm_bo_update_pte(struct radeon_device *rdev,
  1014. struct radeon_vm *vm,
  1015. struct radeon_bo *bo,
  1016. struct ttm_mem_reg *mem)
  1017. {
  1018. struct radeon_ib ib;
  1019. struct radeon_bo_va *bo_va;
  1020. unsigned nptes, npdes, ndw;
  1021. uint64_t addr;
  1022. int r;
  1023. /* nothing to do if vm isn't bound */
  1024. if (vm->page_directory == NULL)
  1025. return 0;
  1026. bo_va = radeon_vm_bo_find(vm, bo);
  1027. if (bo_va == NULL) {
  1028. dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
  1029. return -EINVAL;
  1030. }
  1031. if (!bo_va->soffset) {
  1032. dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
  1033. bo, vm);
  1034. return -EINVAL;
  1035. }
  1036. if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
  1037. return 0;
  1038. bo_va->flags &= ~RADEON_VM_PAGE_VALID;
  1039. bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
  1040. if (mem) {
  1041. addr = mem->start << PAGE_SHIFT;
  1042. if (mem->mem_type != TTM_PL_SYSTEM) {
  1043. bo_va->flags |= RADEON_VM_PAGE_VALID;
  1044. bo_va->valid = true;
  1045. }
  1046. if (mem->mem_type == TTM_PL_TT) {
  1047. bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
  1048. } else {
  1049. addr += rdev->vm_manager.vram_base_offset;
  1050. }
  1051. } else {
  1052. addr = 0;
  1053. bo_va->valid = false;
  1054. }
  1055. nptes = radeon_bo_ngpu_pages(bo);
  1056. /* assume two extra pdes in case the mapping overlaps the borders */
  1057. npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
  1058. /* padding, etc. */
  1059. ndw = 64;
  1060. if (RADEON_VM_BLOCK_SIZE > 11)
  1061. /* reserve space for one header for every 2k dwords */
  1062. ndw += (nptes >> 11) * 4;
  1063. else
  1064. /* reserve space for one header for
  1065. every (1 << BLOCK_SIZE) entries */
  1066. ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
  1067. /* reserve space for pte addresses */
  1068. ndw += nptes * 2;
  1069. /* reserve space for one header for every 2k dwords */
  1070. ndw += (npdes >> 11) * 4;
  1071. /* reserve space for pde addresses */
  1072. ndw += npdes * 2;
  1073. /* reserve space for clearing new page tables */
  1074. ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
  1075. /* update too big for an IB */
  1076. if (ndw > 0xfffff)
  1077. return -ENOMEM;
  1078. r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
  1079. if (r)
  1080. return r;
  1081. ib.length_dw = 0;
  1082. r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
  1083. if (r) {
  1084. radeon_ib_free(rdev, &ib);
  1085. return r;
  1086. }
  1087. radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
  1088. addr, radeon_vm_page_flags(bo_va->flags));
  1089. radeon_semaphore_sync_to(ib.semaphore, vm->fence);
  1090. r = radeon_ib_schedule(rdev, &ib, NULL);
  1091. if (r) {
  1092. radeon_ib_free(rdev, &ib);
  1093. return r;
  1094. }
  1095. radeon_fence_unref(&vm->fence);
  1096. vm->fence = radeon_fence_ref(ib.fence);
  1097. radeon_ib_free(rdev, &ib);
  1098. radeon_fence_unref(&vm->last_flush);
  1099. return 0;
  1100. }
  1101. /**
  1102. * radeon_vm_bo_rmv - remove a bo to a specific vm
  1103. *
  1104. * @rdev: radeon_device pointer
  1105. * @bo_va: requested bo_va
  1106. *
  1107. * Remove @bo_va->bo from the requested vm (cayman+).
  1108. * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
  1109. * remove the ptes for @bo_va in the page table.
  1110. * Returns 0 for success.
  1111. *
  1112. * Object have to be reserved!
  1113. */
  1114. int radeon_vm_bo_rmv(struct radeon_device *rdev,
  1115. struct radeon_bo_va *bo_va)
  1116. {
  1117. int r = 0;
  1118. mutex_lock(&rdev->vm_manager.lock);
  1119. mutex_lock(&bo_va->vm->mutex);
  1120. if (bo_va->soffset) {
  1121. r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
  1122. }
  1123. mutex_unlock(&rdev->vm_manager.lock);
  1124. list_del(&bo_va->vm_list);
  1125. mutex_unlock(&bo_va->vm->mutex);
  1126. list_del(&bo_va->bo_list);
  1127. kfree(bo_va);
  1128. return r;
  1129. }
  1130. /**
  1131. * radeon_vm_bo_invalidate - mark the bo as invalid
  1132. *
  1133. * @rdev: radeon_device pointer
  1134. * @vm: requested vm
  1135. * @bo: radeon buffer object
  1136. *
  1137. * Mark @bo as invalid (cayman+).
  1138. */
  1139. void radeon_vm_bo_invalidate(struct radeon_device *rdev,
  1140. struct radeon_bo *bo)
  1141. {
  1142. struct radeon_bo_va *bo_va;
  1143. list_for_each_entry(bo_va, &bo->va, bo_list) {
  1144. bo_va->valid = false;
  1145. }
  1146. }
  1147. /**
  1148. * radeon_vm_init - initialize a vm instance
  1149. *
  1150. * @rdev: radeon_device pointer
  1151. * @vm: requested vm
  1152. *
  1153. * Init @vm fields (cayman+).
  1154. */
  1155. void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
  1156. {
  1157. vm->id = 0;
  1158. vm->fence = NULL;
  1159. mutex_init(&vm->mutex);
  1160. INIT_LIST_HEAD(&vm->list);
  1161. INIT_LIST_HEAD(&vm->va);
  1162. }
  1163. /**
  1164. * radeon_vm_fini - tear down a vm instance
  1165. *
  1166. * @rdev: radeon_device pointer
  1167. * @vm: requested vm
  1168. *
  1169. * Tear down @vm (cayman+).
  1170. * Unbind the VM and remove all bos from the vm bo list
  1171. */
  1172. void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
  1173. {
  1174. struct radeon_bo_va *bo_va, *tmp;
  1175. int r;
  1176. mutex_lock(&rdev->vm_manager.lock);
  1177. mutex_lock(&vm->mutex);
  1178. radeon_vm_free_pt(rdev, vm);
  1179. mutex_unlock(&rdev->vm_manager.lock);
  1180. if (!list_empty(&vm->va)) {
  1181. dev_err(rdev->dev, "still active bo inside vm\n");
  1182. }
  1183. list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
  1184. list_del_init(&bo_va->vm_list);
  1185. r = radeon_bo_reserve(bo_va->bo, false);
  1186. if (!r) {
  1187. list_del_init(&bo_va->bo_list);
  1188. radeon_bo_unreserve(bo_va->bo);
  1189. kfree(bo_va);
  1190. }
  1191. }
  1192. radeon_fence_unref(&vm->fence);
  1193. radeon_fence_unref(&vm->last_flush);
  1194. mutex_unlock(&vm->mutex);
  1195. }