msm_gem.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/spinlock.h>
  18. #include <linux/shmem_fs.h>
  19. #include <linux/dma-buf.h>
  20. #include "msm_drv.h"
  21. #include "msm_gem.h"
  22. #include "msm_gpu.h"
  23. /* called with dev->struct_mutex held */
  24. static struct page **get_pages(struct drm_gem_object *obj)
  25. {
  26. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  27. if (!msm_obj->pages) {
  28. struct drm_device *dev = obj->dev;
  29. struct page **p = drm_gem_get_pages(obj, 0);
  30. int npages = obj->size >> PAGE_SHIFT;
  31. if (IS_ERR(p)) {
  32. dev_err(dev->dev, "could not get pages: %ld\n",
  33. PTR_ERR(p));
  34. return p;
  35. }
  36. msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  37. if (IS_ERR(msm_obj->sgt)) {
  38. dev_err(dev->dev, "failed to allocate sgt\n");
  39. return ERR_CAST(msm_obj->sgt);
  40. }
  41. msm_obj->pages = p;
  42. /* For non-cached buffers, ensure the new pages are clean
  43. * because display controller, GPU, etc. are not coherent:
  44. */
  45. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  46. dma_map_sg(dev->dev, msm_obj->sgt->sgl,
  47. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  48. }
  49. return msm_obj->pages;
  50. }
  51. static void put_pages(struct drm_gem_object *obj)
  52. {
  53. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  54. if (msm_obj->pages) {
  55. /* For non-cached buffers, ensure the new pages are clean
  56. * because display controller, GPU, etc. are not coherent:
  57. */
  58. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  59. dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
  60. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  61. sg_free_table(msm_obj->sgt);
  62. kfree(msm_obj->sgt);
  63. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  64. msm_obj->pages = NULL;
  65. }
  66. }
  67. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  68. {
  69. struct drm_device *dev = obj->dev;
  70. struct page **p;
  71. mutex_lock(&dev->struct_mutex);
  72. p = get_pages(obj);
  73. mutex_unlock(&dev->struct_mutex);
  74. return p;
  75. }
  76. void msm_gem_put_pages(struct drm_gem_object *obj)
  77. {
  78. /* when we start tracking the pin count, then do something here */
  79. }
  80. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  81. struct vm_area_struct *vma)
  82. {
  83. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  84. vma->vm_flags &= ~VM_PFNMAP;
  85. vma->vm_flags |= VM_MIXEDMAP;
  86. if (msm_obj->flags & MSM_BO_WC) {
  87. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  88. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  89. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  90. } else {
  91. /*
  92. * Shunt off cached objs to shmem file so they have their own
  93. * address_space (so unmap_mapping_range does what we want,
  94. * in particular in the case of mmap'd dmabufs)
  95. */
  96. fput(vma->vm_file);
  97. get_file(obj->filp);
  98. vma->vm_pgoff = 0;
  99. vma->vm_file = obj->filp;
  100. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  101. }
  102. return 0;
  103. }
  104. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  105. {
  106. int ret;
  107. ret = drm_gem_mmap(filp, vma);
  108. if (ret) {
  109. DBG("mmap failed: %d", ret);
  110. return ret;
  111. }
  112. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  113. }
  114. int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  115. {
  116. struct drm_gem_object *obj = vma->vm_private_data;
  117. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  118. struct drm_device *dev = obj->dev;
  119. struct page **pages;
  120. unsigned long pfn;
  121. pgoff_t pgoff;
  122. int ret;
  123. /* Make sure we don't parallel update on a fault, nor move or remove
  124. * something from beneath our feet
  125. */
  126. ret = mutex_lock_interruptible(&dev->struct_mutex);
  127. if (ret)
  128. goto out;
  129. /* make sure we have pages attached now */
  130. pages = get_pages(obj);
  131. if (IS_ERR(pages)) {
  132. ret = PTR_ERR(pages);
  133. goto out_unlock;
  134. }
  135. /* We don't use vmf->pgoff since that has the fake offset: */
  136. pgoff = ((unsigned long)vmf->virtual_address -
  137. vma->vm_start) >> PAGE_SHIFT;
  138. pfn = page_to_pfn(msm_obj->pages[pgoff]);
  139. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  140. pfn, pfn << PAGE_SHIFT);
  141. ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
  142. out_unlock:
  143. mutex_unlock(&dev->struct_mutex);
  144. out:
  145. switch (ret) {
  146. case -EAGAIN:
  147. case 0:
  148. case -ERESTARTSYS:
  149. case -EINTR:
  150. return VM_FAULT_NOPAGE;
  151. case -ENOMEM:
  152. return VM_FAULT_OOM;
  153. default:
  154. return VM_FAULT_SIGBUS;
  155. }
  156. }
  157. /** get mmap offset */
  158. static uint64_t mmap_offset(struct drm_gem_object *obj)
  159. {
  160. struct drm_device *dev = obj->dev;
  161. int ret;
  162. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  163. /* Make it mmapable */
  164. ret = drm_gem_create_mmap_offset(obj);
  165. if (ret) {
  166. dev_err(dev->dev, "could not allocate mmap offset\n");
  167. return 0;
  168. }
  169. return drm_vma_node_offset_addr(&obj->vma_node);
  170. }
  171. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  172. {
  173. uint64_t offset;
  174. mutex_lock(&obj->dev->struct_mutex);
  175. offset = mmap_offset(obj);
  176. mutex_unlock(&obj->dev->struct_mutex);
  177. return offset;
  178. }
  179. /* helpers for dealing w/ iommu: */
  180. static int map_range(struct iommu_domain *domain, unsigned int iova,
  181. struct sg_table *sgt, unsigned int len, int prot)
  182. {
  183. struct scatterlist *sg;
  184. unsigned int da = iova;
  185. unsigned int i, j;
  186. int ret;
  187. if (!domain || !sgt)
  188. return -EINVAL;
  189. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  190. u32 pa = sg_phys(sg) - sg->offset;
  191. size_t bytes = sg->length + sg->offset;
  192. VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
  193. ret = iommu_map(domain, da, pa, bytes, prot);
  194. if (ret)
  195. goto fail;
  196. da += bytes;
  197. }
  198. return 0;
  199. fail:
  200. da = iova;
  201. for_each_sg(sgt->sgl, sg, i, j) {
  202. size_t bytes = sg->length + sg->offset;
  203. iommu_unmap(domain, da, bytes);
  204. da += bytes;
  205. }
  206. return ret;
  207. }
  208. static void unmap_range(struct iommu_domain *domain, unsigned int iova,
  209. struct sg_table *sgt, unsigned int len)
  210. {
  211. struct scatterlist *sg;
  212. unsigned int da = iova;
  213. int i;
  214. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  215. size_t bytes = sg->length + sg->offset;
  216. size_t unmapped;
  217. unmapped = iommu_unmap(domain, da, bytes);
  218. if (unmapped < bytes)
  219. break;
  220. VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
  221. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  222. da += bytes;
  223. }
  224. }
  225. /* should be called under struct_mutex.. although it can be called
  226. * from atomic context without struct_mutex to acquire an extra
  227. * iova ref if you know one is already held.
  228. *
  229. * That means when I do eventually need to add support for unpinning
  230. * the refcnt counter needs to be atomic_t.
  231. */
  232. int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
  233. uint32_t *iova)
  234. {
  235. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  236. int ret = 0;
  237. if (!msm_obj->domain[id].iova) {
  238. struct msm_drm_private *priv = obj->dev->dev_private;
  239. uint32_t offset = (uint32_t)mmap_offset(obj);
  240. struct page **pages;
  241. pages = get_pages(obj);
  242. if (IS_ERR(pages))
  243. return PTR_ERR(pages);
  244. // XXX ideally we would not map buffers writable when not needed...
  245. ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
  246. obj->size, IOMMU_READ | IOMMU_WRITE);
  247. msm_obj->domain[id].iova = offset;
  248. }
  249. if (!ret)
  250. *iova = msm_obj->domain[id].iova;
  251. return ret;
  252. }
  253. int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
  254. {
  255. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  256. int ret;
  257. /* this is safe right now because we don't unmap until the
  258. * bo is deleted:
  259. */
  260. if (msm_obj->domain[id].iova) {
  261. *iova = msm_obj->domain[id].iova;
  262. return 0;
  263. }
  264. mutex_lock(&obj->dev->struct_mutex);
  265. ret = msm_gem_get_iova_locked(obj, id, iova);
  266. mutex_unlock(&obj->dev->struct_mutex);
  267. return ret;
  268. }
  269. void msm_gem_put_iova(struct drm_gem_object *obj, int id)
  270. {
  271. // XXX TODO ..
  272. // NOTE: probably don't need a _locked() version.. we wouldn't
  273. // normally unmap here, but instead just mark that it could be
  274. // unmapped (if the iova refcnt drops to zero), but then later
  275. // if another _get_iova_locked() fails we can start unmapping
  276. // things that are no longer needed..
  277. }
  278. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  279. struct drm_mode_create_dumb *args)
  280. {
  281. args->pitch = align_pitch(args->width, args->bpp);
  282. args->size = PAGE_ALIGN(args->pitch * args->height);
  283. return msm_gem_new_handle(dev, file, args->size,
  284. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
  285. }
  286. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  287. uint32_t handle, uint64_t *offset)
  288. {
  289. struct drm_gem_object *obj;
  290. int ret = 0;
  291. /* GEM does all our handle to object mapping */
  292. obj = drm_gem_object_lookup(dev, file, handle);
  293. if (obj == NULL) {
  294. ret = -ENOENT;
  295. goto fail;
  296. }
  297. *offset = msm_gem_mmap_offset(obj);
  298. drm_gem_object_unreference_unlocked(obj);
  299. fail:
  300. return ret;
  301. }
  302. void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
  303. {
  304. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  305. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  306. if (!msm_obj->vaddr) {
  307. struct page **pages = get_pages(obj);
  308. if (IS_ERR(pages))
  309. return ERR_CAST(pages);
  310. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  311. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  312. }
  313. return msm_obj->vaddr;
  314. }
  315. void *msm_gem_vaddr(struct drm_gem_object *obj)
  316. {
  317. void *ret;
  318. mutex_lock(&obj->dev->struct_mutex);
  319. ret = msm_gem_vaddr_locked(obj);
  320. mutex_unlock(&obj->dev->struct_mutex);
  321. return ret;
  322. }
  323. /* setup callback for when bo is no longer busy..
  324. * TODO probably want to differentiate read vs write..
  325. */
  326. int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
  327. struct msm_fence_cb *cb)
  328. {
  329. struct drm_device *dev = obj->dev;
  330. struct msm_drm_private *priv = dev->dev_private;
  331. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  332. int ret = 0;
  333. mutex_lock(&dev->struct_mutex);
  334. if (!list_empty(&cb->work.entry)) {
  335. ret = -EINVAL;
  336. } else if (is_active(msm_obj)) {
  337. cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
  338. list_add_tail(&cb->work.entry, &priv->fence_cbs);
  339. } else {
  340. queue_work(priv->wq, &cb->work);
  341. }
  342. mutex_unlock(&dev->struct_mutex);
  343. return ret;
  344. }
  345. void msm_gem_move_to_active(struct drm_gem_object *obj,
  346. struct msm_gpu *gpu, bool write, uint32_t fence)
  347. {
  348. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  349. msm_obj->gpu = gpu;
  350. if (write)
  351. msm_obj->write_fence = fence;
  352. else
  353. msm_obj->read_fence = fence;
  354. list_del_init(&msm_obj->mm_list);
  355. list_add_tail(&msm_obj->mm_list, &gpu->active_list);
  356. }
  357. void msm_gem_move_to_inactive(struct drm_gem_object *obj)
  358. {
  359. struct drm_device *dev = obj->dev;
  360. struct msm_drm_private *priv = dev->dev_private;
  361. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  362. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  363. msm_obj->gpu = NULL;
  364. msm_obj->read_fence = 0;
  365. msm_obj->write_fence = 0;
  366. list_del_init(&msm_obj->mm_list);
  367. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  368. }
  369. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
  370. struct timespec *timeout)
  371. {
  372. struct drm_device *dev = obj->dev;
  373. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  374. int ret = 0;
  375. if (is_active(msm_obj)) {
  376. uint32_t fence = 0;
  377. if (op & MSM_PREP_READ)
  378. fence = msm_obj->write_fence;
  379. if (op & MSM_PREP_WRITE)
  380. fence = max(fence, msm_obj->read_fence);
  381. if (op & MSM_PREP_NOSYNC)
  382. timeout = NULL;
  383. ret = msm_wait_fence_interruptable(dev, fence, timeout);
  384. }
  385. /* TODO cache maintenance */
  386. return ret;
  387. }
  388. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  389. {
  390. /* TODO cache maintenance */
  391. return 0;
  392. }
  393. #ifdef CONFIG_DEBUG_FS
  394. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  395. {
  396. struct drm_device *dev = obj->dev;
  397. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  398. uint64_t off = drm_vma_node_start(&obj->vma_node);
  399. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  400. seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
  401. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  402. msm_obj->read_fence, msm_obj->write_fence,
  403. obj->name, obj->refcount.refcount.counter,
  404. off, msm_obj->vaddr, obj->size);
  405. }
  406. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  407. {
  408. struct msm_gem_object *msm_obj;
  409. int count = 0;
  410. size_t size = 0;
  411. list_for_each_entry(msm_obj, list, mm_list) {
  412. struct drm_gem_object *obj = &msm_obj->base;
  413. seq_printf(m, " ");
  414. msm_gem_describe(obj, m);
  415. count++;
  416. size += obj->size;
  417. }
  418. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  419. }
  420. #endif
  421. void msm_gem_free_object(struct drm_gem_object *obj)
  422. {
  423. struct drm_device *dev = obj->dev;
  424. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  425. int id;
  426. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  427. /* object should not be on active list: */
  428. WARN_ON(is_active(msm_obj));
  429. list_del(&msm_obj->mm_list);
  430. for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
  431. if (msm_obj->domain[id].iova) {
  432. struct msm_drm_private *priv = obj->dev->dev_private;
  433. uint32_t offset = (uint32_t)mmap_offset(obj);
  434. unmap_range(priv->iommus[id], offset,
  435. msm_obj->sgt, obj->size);
  436. }
  437. }
  438. drm_gem_free_mmap_offset(obj);
  439. if (obj->import_attach) {
  440. if (msm_obj->vaddr)
  441. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  442. /* Don't drop the pages for imported dmabuf, as they are not
  443. * ours, just free the array we allocated:
  444. */
  445. if (msm_obj->pages)
  446. drm_free_large(msm_obj->pages);
  447. } else {
  448. if (msm_obj->vaddr)
  449. vunmap(msm_obj->vaddr);
  450. put_pages(obj);
  451. }
  452. if (msm_obj->resv == &msm_obj->_resv)
  453. reservation_object_fini(msm_obj->resv);
  454. drm_gem_object_release(obj);
  455. kfree(msm_obj);
  456. }
  457. /* convenience method to construct a GEM buffer object, and userspace handle */
  458. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  459. uint32_t size, uint32_t flags, uint32_t *handle)
  460. {
  461. struct drm_gem_object *obj;
  462. int ret;
  463. ret = mutex_lock_interruptible(&dev->struct_mutex);
  464. if (ret)
  465. return ret;
  466. obj = msm_gem_new(dev, size, flags);
  467. mutex_unlock(&dev->struct_mutex);
  468. if (IS_ERR(obj))
  469. return PTR_ERR(obj);
  470. ret = drm_gem_handle_create(file, obj, handle);
  471. /* drop reference from allocate - handle holds it now */
  472. drm_gem_object_unreference_unlocked(obj);
  473. return ret;
  474. }
  475. static int msm_gem_new_impl(struct drm_device *dev,
  476. uint32_t size, uint32_t flags,
  477. struct drm_gem_object **obj)
  478. {
  479. struct msm_drm_private *priv = dev->dev_private;
  480. struct msm_gem_object *msm_obj;
  481. switch (flags & MSM_BO_CACHE_MASK) {
  482. case MSM_BO_UNCACHED:
  483. case MSM_BO_CACHED:
  484. case MSM_BO_WC:
  485. break;
  486. default:
  487. dev_err(dev->dev, "invalid cache flag: %x\n",
  488. (flags & MSM_BO_CACHE_MASK));
  489. return -EINVAL;
  490. }
  491. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  492. if (!msm_obj)
  493. return -ENOMEM;
  494. msm_obj->flags = flags;
  495. msm_obj->resv = &msm_obj->_resv;
  496. reservation_object_init(msm_obj->resv);
  497. INIT_LIST_HEAD(&msm_obj->submit_entry);
  498. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  499. *obj = &msm_obj->base;
  500. return 0;
  501. }
  502. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  503. uint32_t size, uint32_t flags)
  504. {
  505. struct drm_gem_object *obj;
  506. int ret;
  507. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  508. size = PAGE_ALIGN(size);
  509. ret = msm_gem_new_impl(dev, size, flags, &obj);
  510. if (ret)
  511. goto fail;
  512. ret = drm_gem_object_init(dev, obj, size);
  513. if (ret)
  514. goto fail;
  515. return obj;
  516. fail:
  517. if (obj)
  518. drm_gem_object_unreference_unlocked(obj);
  519. return ERR_PTR(ret);
  520. }
  521. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  522. uint32_t size, struct sg_table *sgt)
  523. {
  524. struct msm_gem_object *msm_obj;
  525. struct drm_gem_object *obj;
  526. int ret, npages;
  527. size = PAGE_ALIGN(size);
  528. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
  529. if (ret)
  530. goto fail;
  531. drm_gem_private_object_init(dev, obj, size);
  532. npages = size / PAGE_SIZE;
  533. msm_obj = to_msm_bo(obj);
  534. msm_obj->sgt = sgt;
  535. msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
  536. if (!msm_obj->pages) {
  537. ret = -ENOMEM;
  538. goto fail;
  539. }
  540. ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
  541. if (ret)
  542. goto fail;
  543. return obj;
  544. fail:
  545. if (obj)
  546. drm_gem_object_unreference_unlocked(obj);
  547. return ERR_PTR(ret);
  548. }