i915_gem_execbuffer.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. /*
  2. * Copyright © 2008,2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. #include <linux/dma_remapping.h>
  35. struct change_domains {
  36. uint32_t invalidate_domains;
  37. uint32_t flush_domains;
  38. uint32_t flush_rings;
  39. uint32_t flips;
  40. };
  41. /*
  42. * Set the next domain for the specified object. This
  43. * may not actually perform the necessary flushing/invaliding though,
  44. * as that may want to be batched with other set_domain operations
  45. *
  46. * This is (we hope) the only really tricky part of gem. The goal
  47. * is fairly simple -- track which caches hold bits of the object
  48. * and make sure they remain coherent. A few concrete examples may
  49. * help to explain how it works. For shorthand, we use the notation
  50. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  51. * a pair of read and write domain masks.
  52. *
  53. * Case 1: the batch buffer
  54. *
  55. * 1. Allocated
  56. * 2. Written by CPU
  57. * 3. Mapped to GTT
  58. * 4. Read by GPU
  59. * 5. Unmapped from GTT
  60. * 6. Freed
  61. *
  62. * Let's take these a step at a time
  63. *
  64. * 1. Allocated
  65. * Pages allocated from the kernel may still have
  66. * cache contents, so we set them to (CPU, CPU) always.
  67. * 2. Written by CPU (using pwrite)
  68. * The pwrite function calls set_domain (CPU, CPU) and
  69. * this function does nothing (as nothing changes)
  70. * 3. Mapped by GTT
  71. * This function asserts that the object is not
  72. * currently in any GPU-based read or write domains
  73. * 4. Read by GPU
  74. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  75. * As write_domain is zero, this function adds in the
  76. * current read domains (CPU+COMMAND, 0).
  77. * flush_domains is set to CPU.
  78. * invalidate_domains is set to COMMAND
  79. * clflush is run to get data out of the CPU caches
  80. * then i915_dev_set_domain calls i915_gem_flush to
  81. * emit an MI_FLUSH and drm_agp_chipset_flush
  82. * 5. Unmapped from GTT
  83. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  84. * flush_domains and invalidate_domains end up both zero
  85. * so no flushing/invalidating happens
  86. * 6. Freed
  87. * yay, done
  88. *
  89. * Case 2: The shared render buffer
  90. *
  91. * 1. Allocated
  92. * 2. Mapped to GTT
  93. * 3. Read/written by GPU
  94. * 4. set_domain to (CPU,CPU)
  95. * 5. Read/written by CPU
  96. * 6. Read/written by GPU
  97. *
  98. * 1. Allocated
  99. * Same as last example, (CPU, CPU)
  100. * 2. Mapped to GTT
  101. * Nothing changes (assertions find that it is not in the GPU)
  102. * 3. Read/written by GPU
  103. * execbuffer calls set_domain (RENDER, RENDER)
  104. * flush_domains gets CPU
  105. * invalidate_domains gets GPU
  106. * clflush (obj)
  107. * MI_FLUSH and drm_agp_chipset_flush
  108. * 4. set_domain (CPU, CPU)
  109. * flush_domains gets GPU
  110. * invalidate_domains gets CPU
  111. * wait_rendering (obj) to make sure all drawing is complete.
  112. * This will include an MI_FLUSH to get the data from GPU
  113. * to memory
  114. * clflush (obj) to invalidate the CPU cache
  115. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  116. * 5. Read/written by CPU
  117. * cache lines are loaded and dirtied
  118. * 6. Read written by GPU
  119. * Same as last GPU access
  120. *
  121. * Case 3: The constant buffer
  122. *
  123. * 1. Allocated
  124. * 2. Written by CPU
  125. * 3. Read by GPU
  126. * 4. Updated (written) by CPU again
  127. * 5. Read by GPU
  128. *
  129. * 1. Allocated
  130. * (CPU, CPU)
  131. * 2. Written by CPU
  132. * (CPU, CPU)
  133. * 3. Read by GPU
  134. * (CPU+RENDER, 0)
  135. * flush_domains = CPU
  136. * invalidate_domains = RENDER
  137. * clflush (obj)
  138. * MI_FLUSH
  139. * drm_agp_chipset_flush
  140. * 4. Updated (written) by CPU again
  141. * (CPU, CPU)
  142. * flush_domains = 0 (no previous write domain)
  143. * invalidate_domains = 0 (no new read domains)
  144. * 5. Read by GPU
  145. * (CPU+RENDER, 0)
  146. * flush_domains = CPU
  147. * invalidate_domains = RENDER
  148. * clflush (obj)
  149. * MI_FLUSH
  150. * drm_agp_chipset_flush
  151. */
  152. static void
  153. i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
  154. struct intel_ring_buffer *ring,
  155. struct change_domains *cd)
  156. {
  157. uint32_t invalidate_domains = 0, flush_domains = 0;
  158. /*
  159. * If the object isn't moving to a new write domain,
  160. * let the object stay in multiple read domains
  161. */
  162. if (obj->base.pending_write_domain == 0)
  163. obj->base.pending_read_domains |= obj->base.read_domains;
  164. /*
  165. * Flush the current write domain if
  166. * the new read domains don't match. Invalidate
  167. * any read domains which differ from the old
  168. * write domain
  169. */
  170. if (obj->base.write_domain &&
  171. (((obj->base.write_domain != obj->base.pending_read_domains ||
  172. obj->ring != ring)) ||
  173. (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
  174. flush_domains |= obj->base.write_domain;
  175. invalidate_domains |=
  176. obj->base.pending_read_domains & ~obj->base.write_domain;
  177. }
  178. /*
  179. * Invalidate any read caches which may have
  180. * stale data. That is, any new read domains.
  181. */
  182. invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
  183. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
  184. i915_gem_clflush_object(obj);
  185. if (obj->base.pending_write_domain)
  186. cd->flips |= atomic_read(&obj->pending_flip);
  187. /* The actual obj->write_domain will be updated with
  188. * pending_write_domain after we emit the accumulated flush for all
  189. * of our domain changes in execbuffers (which clears objects'
  190. * write_domains). So if we have a current write domain that we
  191. * aren't changing, set pending_write_domain to that.
  192. */
  193. if (flush_domains == 0 && obj->base.pending_write_domain == 0)
  194. obj->base.pending_write_domain = obj->base.write_domain;
  195. cd->invalidate_domains |= invalidate_domains;
  196. cd->flush_domains |= flush_domains;
  197. if (flush_domains & I915_GEM_GPU_DOMAINS)
  198. cd->flush_rings |= intel_ring_flag(obj->ring);
  199. if (invalidate_domains & I915_GEM_GPU_DOMAINS)
  200. cd->flush_rings |= intel_ring_flag(ring);
  201. }
  202. struct eb_objects {
  203. int and;
  204. struct hlist_head buckets[0];
  205. };
  206. static struct eb_objects *
  207. eb_create(int size)
  208. {
  209. struct eb_objects *eb;
  210. int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  211. while (count > size)
  212. count >>= 1;
  213. eb = kzalloc(count*sizeof(struct hlist_head) +
  214. sizeof(struct eb_objects),
  215. GFP_KERNEL);
  216. if (eb == NULL)
  217. return eb;
  218. eb->and = count - 1;
  219. return eb;
  220. }
  221. static void
  222. eb_reset(struct eb_objects *eb)
  223. {
  224. memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  225. }
  226. static void
  227. eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
  228. {
  229. hlist_add_head(&obj->exec_node,
  230. &eb->buckets[obj->exec_handle & eb->and]);
  231. }
  232. static struct drm_i915_gem_object *
  233. eb_get_object(struct eb_objects *eb, unsigned long handle)
  234. {
  235. struct hlist_head *head;
  236. struct hlist_node *node;
  237. struct drm_i915_gem_object *obj;
  238. head = &eb->buckets[handle & eb->and];
  239. hlist_for_each(node, head) {
  240. obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
  241. if (obj->exec_handle == handle)
  242. return obj;
  243. }
  244. return NULL;
  245. }
  246. static void
  247. eb_destroy(struct eb_objects *eb)
  248. {
  249. kfree(eb);
  250. }
  251. static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
  252. {
  253. return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
  254. obj->cache_level != I915_CACHE_NONE);
  255. }
  256. static int
  257. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  258. struct eb_objects *eb,
  259. struct drm_i915_gem_relocation_entry *reloc)
  260. {
  261. struct drm_device *dev = obj->base.dev;
  262. struct drm_gem_object *target_obj;
  263. struct drm_i915_gem_object *target_i915_obj;
  264. uint32_t target_offset;
  265. int ret = -EINVAL;
  266. /* we've already hold a reference to all valid objects */
  267. target_obj = &eb_get_object(eb, reloc->target_handle)->base;
  268. if (unlikely(target_obj == NULL))
  269. return -ENOENT;
  270. target_i915_obj = to_intel_bo(target_obj);
  271. target_offset = target_i915_obj->gtt_offset;
  272. /* The target buffer should have appeared before us in the
  273. * exec_object list, so it should have a GTT space bound by now.
  274. */
  275. if (unlikely(target_offset == 0)) {
  276. DRM_DEBUG("No GTT space found for object %d\n",
  277. reloc->target_handle);
  278. return ret;
  279. }
  280. /* Validate that the target is in a valid r/w GPU domain */
  281. if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
  282. DRM_DEBUG("reloc with multiple write domains: "
  283. "obj %p target %d offset %d "
  284. "read %08x write %08x",
  285. obj, reloc->target_handle,
  286. (int) reloc->offset,
  287. reloc->read_domains,
  288. reloc->write_domain);
  289. return ret;
  290. }
  291. if (unlikely((reloc->write_domain | reloc->read_domains)
  292. & ~I915_GEM_GPU_DOMAINS)) {
  293. DRM_DEBUG("reloc with read/write non-GPU domains: "
  294. "obj %p target %d offset %d "
  295. "read %08x write %08x",
  296. obj, reloc->target_handle,
  297. (int) reloc->offset,
  298. reloc->read_domains,
  299. reloc->write_domain);
  300. return ret;
  301. }
  302. if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
  303. reloc->write_domain != target_obj->pending_write_domain)) {
  304. DRM_DEBUG("Write domain conflict: "
  305. "obj %p target %d offset %d "
  306. "new %08x old %08x\n",
  307. obj, reloc->target_handle,
  308. (int) reloc->offset,
  309. reloc->write_domain,
  310. target_obj->pending_write_domain);
  311. return ret;
  312. }
  313. target_obj->pending_read_domains |= reloc->read_domains;
  314. target_obj->pending_write_domain |= reloc->write_domain;
  315. /* If the relocation already has the right value in it, no
  316. * more work needs to be done.
  317. */
  318. if (target_offset == reloc->presumed_offset)
  319. return 0;
  320. /* Check that the relocation address is valid... */
  321. if (unlikely(reloc->offset > obj->base.size - 4)) {
  322. DRM_DEBUG("Relocation beyond object bounds: "
  323. "obj %p target %d offset %d size %d.\n",
  324. obj, reloc->target_handle,
  325. (int) reloc->offset,
  326. (int) obj->base.size);
  327. return ret;
  328. }
  329. if (unlikely(reloc->offset & 3)) {
  330. DRM_DEBUG("Relocation not 4-byte aligned: "
  331. "obj %p target %d offset %d.\n",
  332. obj, reloc->target_handle,
  333. (int) reloc->offset);
  334. return ret;
  335. }
  336. /* We can't wait for rendering with pagefaults disabled */
  337. if (obj->active && in_atomic())
  338. return -EFAULT;
  339. reloc->delta += target_offset;
  340. if (use_cpu_reloc(obj)) {
  341. uint32_t page_offset = reloc->offset & ~PAGE_MASK;
  342. char *vaddr;
  343. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  344. if (ret)
  345. return ret;
  346. vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
  347. *(uint32_t *)(vaddr + page_offset) = reloc->delta;
  348. kunmap_atomic(vaddr);
  349. } else {
  350. struct drm_i915_private *dev_priv = dev->dev_private;
  351. uint32_t __iomem *reloc_entry;
  352. void __iomem *reloc_page;
  353. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  354. if (ret)
  355. return ret;
  356. /* Map the page containing the relocation we're going to perform. */
  357. reloc->offset += obj->gtt_offset;
  358. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  359. reloc->offset & PAGE_MASK);
  360. reloc_entry = (uint32_t __iomem *)
  361. (reloc_page + (reloc->offset & ~PAGE_MASK));
  362. iowrite32(reloc->delta, reloc_entry);
  363. io_mapping_unmap_atomic(reloc_page);
  364. }
  365. /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
  366. * pipe_control writes because the gpu doesn't properly redirect them
  367. * through the ppgtt for non_secure batchbuffers. */
  368. if (unlikely(IS_GEN6(dev) &&
  369. reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
  370. !target_i915_obj->has_global_gtt_mapping)) {
  371. i915_gem_gtt_bind_object(target_i915_obj,
  372. target_i915_obj->cache_level);
  373. }
  374. /* and update the user's relocation entry */
  375. reloc->presumed_offset = target_offset;
  376. return 0;
  377. }
  378. static int
  379. i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
  380. struct eb_objects *eb)
  381. {
  382. #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
  383. struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
  384. struct drm_i915_gem_relocation_entry __user *user_relocs;
  385. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  386. int remain, ret;
  387. user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
  388. remain = entry->relocation_count;
  389. while (remain) {
  390. struct drm_i915_gem_relocation_entry *r = stack_reloc;
  391. int count = remain;
  392. if (count > ARRAY_SIZE(stack_reloc))
  393. count = ARRAY_SIZE(stack_reloc);
  394. remain -= count;
  395. if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
  396. return -EFAULT;
  397. do {
  398. u64 offset = r->presumed_offset;
  399. ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
  400. if (ret)
  401. return ret;
  402. if (r->presumed_offset != offset &&
  403. __copy_to_user_inatomic(&user_relocs->presumed_offset,
  404. &r->presumed_offset,
  405. sizeof(r->presumed_offset))) {
  406. return -EFAULT;
  407. }
  408. user_relocs++;
  409. r++;
  410. } while (--count);
  411. }
  412. return 0;
  413. #undef N_RELOC
  414. }
  415. static int
  416. i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
  417. struct eb_objects *eb,
  418. struct drm_i915_gem_relocation_entry *relocs)
  419. {
  420. const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  421. int i, ret;
  422. for (i = 0; i < entry->relocation_count; i++) {
  423. ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
  424. if (ret)
  425. return ret;
  426. }
  427. return 0;
  428. }
  429. static int
  430. i915_gem_execbuffer_relocate(struct drm_device *dev,
  431. struct eb_objects *eb,
  432. struct list_head *objects)
  433. {
  434. struct drm_i915_gem_object *obj;
  435. int ret = 0;
  436. /* This is the fast path and we cannot handle a pagefault whilst
  437. * holding the struct mutex lest the user pass in the relocations
  438. * contained within a mmaped bo. For in such a case we, the page
  439. * fault handler would call i915_gem_fault() and we would try to
  440. * acquire the struct mutex again. Obviously this is bad and so
  441. * lockdep complains vehemently.
  442. */
  443. pagefault_disable();
  444. list_for_each_entry(obj, objects, exec_list) {
  445. ret = i915_gem_execbuffer_relocate_object(obj, eb);
  446. if (ret)
  447. break;
  448. }
  449. pagefault_enable();
  450. return ret;
  451. }
  452. #define __EXEC_OBJECT_HAS_FENCE (1<<31)
  453. static int
  454. need_reloc_mappable(struct drm_i915_gem_object *obj)
  455. {
  456. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  457. return entry->relocation_count && !use_cpu_reloc(obj);
  458. }
  459. static int
  460. pin_and_fence_object(struct drm_i915_gem_object *obj,
  461. struct intel_ring_buffer *ring)
  462. {
  463. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  464. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  465. bool need_fence, need_mappable;
  466. int ret;
  467. need_fence =
  468. has_fenced_gpu_access &&
  469. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  470. obj->tiling_mode != I915_TILING_NONE;
  471. need_mappable = need_fence || need_reloc_mappable(obj);
  472. ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
  473. if (ret)
  474. return ret;
  475. if (has_fenced_gpu_access) {
  476. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  477. if (obj->tiling_mode) {
  478. ret = i915_gem_object_get_fence(obj, ring);
  479. if (ret)
  480. goto err_unpin;
  481. entry->flags |= __EXEC_OBJECT_HAS_FENCE;
  482. i915_gem_object_pin_fence(obj);
  483. } else {
  484. ret = i915_gem_object_put_fence(obj);
  485. if (ret)
  486. goto err_unpin;
  487. }
  488. obj->pending_fenced_gpu_access = true;
  489. }
  490. }
  491. entry->offset = obj->gtt_offset;
  492. return 0;
  493. err_unpin:
  494. i915_gem_object_unpin(obj);
  495. return ret;
  496. }
  497. static int
  498. i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
  499. struct drm_file *file,
  500. struct list_head *objects)
  501. {
  502. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  503. struct drm_i915_gem_object *obj;
  504. int ret, retry;
  505. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  506. struct list_head ordered_objects;
  507. INIT_LIST_HEAD(&ordered_objects);
  508. while (!list_empty(objects)) {
  509. struct drm_i915_gem_exec_object2 *entry;
  510. bool need_fence, need_mappable;
  511. obj = list_first_entry(objects,
  512. struct drm_i915_gem_object,
  513. exec_list);
  514. entry = obj->exec_entry;
  515. need_fence =
  516. has_fenced_gpu_access &&
  517. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  518. obj->tiling_mode != I915_TILING_NONE;
  519. need_mappable = need_fence || need_reloc_mappable(obj);
  520. if (need_mappable)
  521. list_move(&obj->exec_list, &ordered_objects);
  522. else
  523. list_move_tail(&obj->exec_list, &ordered_objects);
  524. obj->base.pending_read_domains = 0;
  525. obj->base.pending_write_domain = 0;
  526. }
  527. list_splice(&ordered_objects, objects);
  528. /* Attempt to pin all of the buffers into the GTT.
  529. * This is done in 3 phases:
  530. *
  531. * 1a. Unbind all objects that do not match the GTT constraints for
  532. * the execbuffer (fenceable, mappable, alignment etc).
  533. * 1b. Increment pin count for already bound objects.
  534. * 2. Bind new objects.
  535. * 3. Decrement pin count.
  536. *
  537. * This avoid unnecessary unbinding of later objects in order to makr
  538. * room for the earlier objects *unless* we need to defragment.
  539. */
  540. retry = 0;
  541. do {
  542. ret = 0;
  543. /* Unbind any ill-fitting objects or pin. */
  544. list_for_each_entry(obj, objects, exec_list) {
  545. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  546. bool need_fence, need_mappable;
  547. if (!obj->gtt_space)
  548. continue;
  549. need_fence =
  550. has_fenced_gpu_access &&
  551. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  552. obj->tiling_mode != I915_TILING_NONE;
  553. need_mappable = need_fence || need_reloc_mappable(obj);
  554. if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
  555. (need_mappable && !obj->map_and_fenceable))
  556. ret = i915_gem_object_unbind(obj);
  557. else
  558. ret = pin_and_fence_object(obj, ring);
  559. if (ret)
  560. goto err;
  561. }
  562. /* Bind fresh objects */
  563. list_for_each_entry(obj, objects, exec_list) {
  564. if (obj->gtt_space)
  565. continue;
  566. ret = pin_and_fence_object(obj, ring);
  567. if (ret) {
  568. int ret_ignore;
  569. /* This can potentially raise a harmless
  570. * -EINVAL if we failed to bind in the above
  571. * call. It cannot raise -EINTR since we know
  572. * that the bo is freshly bound and so will
  573. * not need to be flushed or waited upon.
  574. */
  575. ret_ignore = i915_gem_object_unbind(obj);
  576. (void)ret_ignore;
  577. WARN_ON(obj->gtt_space);
  578. break;
  579. }
  580. }
  581. /* Decrement pin count for bound objects */
  582. list_for_each_entry(obj, objects, exec_list) {
  583. struct drm_i915_gem_exec_object2 *entry;
  584. if (!obj->gtt_space)
  585. continue;
  586. entry = obj->exec_entry;
  587. if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
  588. i915_gem_object_unpin_fence(obj);
  589. entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
  590. }
  591. i915_gem_object_unpin(obj);
  592. /* ... and ensure ppgtt mapping exist if needed. */
  593. if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
  594. i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
  595. obj, obj->cache_level);
  596. obj->has_aliasing_ppgtt_mapping = 1;
  597. }
  598. }
  599. if (ret != -ENOSPC || retry > 1)
  600. return ret;
  601. /* First attempt, just clear anything that is purgeable.
  602. * Second attempt, clear the entire GTT.
  603. */
  604. ret = i915_gem_evict_everything(ring->dev, retry == 0);
  605. if (ret)
  606. return ret;
  607. retry++;
  608. } while (1);
  609. err:
  610. list_for_each_entry_continue_reverse(obj, objects, exec_list) {
  611. struct drm_i915_gem_exec_object2 *entry;
  612. if (!obj->gtt_space)
  613. continue;
  614. entry = obj->exec_entry;
  615. if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
  616. i915_gem_object_unpin_fence(obj);
  617. entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
  618. }
  619. i915_gem_object_unpin(obj);
  620. }
  621. return ret;
  622. }
  623. static int
  624. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  625. struct drm_file *file,
  626. struct intel_ring_buffer *ring,
  627. struct list_head *objects,
  628. struct eb_objects *eb,
  629. struct drm_i915_gem_exec_object2 *exec,
  630. int count)
  631. {
  632. struct drm_i915_gem_relocation_entry *reloc;
  633. struct drm_i915_gem_object *obj;
  634. int *reloc_offset;
  635. int i, total, ret;
  636. /* We may process another execbuffer during the unlock... */
  637. while (!list_empty(objects)) {
  638. obj = list_first_entry(objects,
  639. struct drm_i915_gem_object,
  640. exec_list);
  641. list_del_init(&obj->exec_list);
  642. drm_gem_object_unreference(&obj->base);
  643. }
  644. mutex_unlock(&dev->struct_mutex);
  645. total = 0;
  646. for (i = 0; i < count; i++)
  647. total += exec[i].relocation_count;
  648. reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
  649. reloc = drm_malloc_ab(total, sizeof(*reloc));
  650. if (reloc == NULL || reloc_offset == NULL) {
  651. drm_free_large(reloc);
  652. drm_free_large(reloc_offset);
  653. mutex_lock(&dev->struct_mutex);
  654. return -ENOMEM;
  655. }
  656. total = 0;
  657. for (i = 0; i < count; i++) {
  658. struct drm_i915_gem_relocation_entry __user *user_relocs;
  659. user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
  660. if (copy_from_user(reloc+total, user_relocs,
  661. exec[i].relocation_count * sizeof(*reloc))) {
  662. ret = -EFAULT;
  663. mutex_lock(&dev->struct_mutex);
  664. goto err;
  665. }
  666. reloc_offset[i] = total;
  667. total += exec[i].relocation_count;
  668. }
  669. ret = i915_mutex_lock_interruptible(dev);
  670. if (ret) {
  671. mutex_lock(&dev->struct_mutex);
  672. goto err;
  673. }
  674. /* reacquire the objects */
  675. eb_reset(eb);
  676. for (i = 0; i < count; i++) {
  677. obj = to_intel_bo(drm_gem_object_lookup(dev, file,
  678. exec[i].handle));
  679. if (&obj->base == NULL) {
  680. DRM_DEBUG("Invalid object handle %d at index %d\n",
  681. exec[i].handle, i);
  682. ret = -ENOENT;
  683. goto err;
  684. }
  685. list_add_tail(&obj->exec_list, objects);
  686. obj->exec_handle = exec[i].handle;
  687. obj->exec_entry = &exec[i];
  688. eb_add_object(eb, obj);
  689. }
  690. ret = i915_gem_execbuffer_reserve(ring, file, objects);
  691. if (ret)
  692. goto err;
  693. list_for_each_entry(obj, objects, exec_list) {
  694. int offset = obj->exec_entry - exec;
  695. ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
  696. reloc + reloc_offset[offset]);
  697. if (ret)
  698. goto err;
  699. }
  700. /* Leave the user relocations as are, this is the painfully slow path,
  701. * and we want to avoid the complication of dropping the lock whilst
  702. * having buffers reserved in the aperture and so causing spurious
  703. * ENOSPC for random operations.
  704. */
  705. err:
  706. drm_free_large(reloc);
  707. drm_free_large(reloc_offset);
  708. return ret;
  709. }
  710. static int
  711. i915_gem_execbuffer_flush(struct drm_device *dev,
  712. uint32_t invalidate_domains,
  713. uint32_t flush_domains,
  714. uint32_t flush_rings)
  715. {
  716. drm_i915_private_t *dev_priv = dev->dev_private;
  717. int i, ret;
  718. if (flush_domains & I915_GEM_DOMAIN_CPU)
  719. intel_gtt_chipset_flush();
  720. if (flush_domains & I915_GEM_DOMAIN_GTT)
  721. wmb();
  722. if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
  723. for (i = 0; i < I915_NUM_RINGS; i++)
  724. if (flush_rings & (1 << i)) {
  725. ret = i915_gem_flush_ring(&dev_priv->ring[i],
  726. invalidate_domains,
  727. flush_domains);
  728. if (ret)
  729. return ret;
  730. }
  731. }
  732. return 0;
  733. }
  734. static bool
  735. intel_enable_semaphores(struct drm_device *dev)
  736. {
  737. if (INTEL_INFO(dev)->gen < 6)
  738. return 0;
  739. if (i915_semaphores >= 0)
  740. return i915_semaphores;
  741. /* Disable semaphores on SNB */
  742. if (INTEL_INFO(dev)->gen == 6)
  743. return 0;
  744. return 1;
  745. }
  746. static int
  747. i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
  748. struct intel_ring_buffer *to)
  749. {
  750. struct intel_ring_buffer *from = obj->ring;
  751. u32 seqno;
  752. int ret, idx;
  753. if (from == NULL || to == from)
  754. return 0;
  755. /* XXX gpu semaphores are implicated in various hard hangs on SNB */
  756. if (!intel_enable_semaphores(obj->base.dev))
  757. return i915_gem_object_wait_rendering(obj);
  758. idx = intel_ring_sync_index(from, to);
  759. seqno = obj->last_rendering_seqno;
  760. if (seqno <= from->sync_seqno[idx])
  761. return 0;
  762. if (seqno == from->outstanding_lazy_request) {
  763. struct drm_i915_gem_request *request;
  764. request = kzalloc(sizeof(*request), GFP_KERNEL);
  765. if (request == NULL)
  766. return -ENOMEM;
  767. ret = i915_add_request(from, NULL, request);
  768. if (ret) {
  769. kfree(request);
  770. return ret;
  771. }
  772. seqno = request->seqno;
  773. }
  774. from->sync_seqno[idx] = seqno;
  775. return to->sync_to(to, from, seqno - 1);
  776. }
  777. static int
  778. i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
  779. {
  780. u32 plane, flip_mask;
  781. int ret;
  782. /* Check for any pending flips. As we only maintain a flip queue depth
  783. * of 1, we can simply insert a WAIT for the next display flip prior
  784. * to executing the batch and avoid stalling the CPU.
  785. */
  786. for (plane = 0; flips >> plane; plane++) {
  787. if (((flips >> plane) & 1) == 0)
  788. continue;
  789. if (plane)
  790. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  791. else
  792. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  793. ret = intel_ring_begin(ring, 2);
  794. if (ret)
  795. return ret;
  796. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  797. intel_ring_emit(ring, MI_NOOP);
  798. intel_ring_advance(ring);
  799. }
  800. return 0;
  801. }
  802. static int
  803. i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
  804. struct list_head *objects)
  805. {
  806. struct drm_i915_gem_object *obj;
  807. struct change_domains cd;
  808. int ret;
  809. memset(&cd, 0, sizeof(cd));
  810. list_for_each_entry(obj, objects, exec_list)
  811. i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
  812. if (cd.invalidate_domains | cd.flush_domains) {
  813. ret = i915_gem_execbuffer_flush(ring->dev,
  814. cd.invalidate_domains,
  815. cd.flush_domains,
  816. cd.flush_rings);
  817. if (ret)
  818. return ret;
  819. }
  820. if (cd.flips) {
  821. ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
  822. if (ret)
  823. return ret;
  824. }
  825. list_for_each_entry(obj, objects, exec_list) {
  826. ret = i915_gem_execbuffer_sync_rings(obj, ring);
  827. if (ret)
  828. return ret;
  829. }
  830. return 0;
  831. }
  832. static bool
  833. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
  834. {
  835. return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
  836. }
  837. static int
  838. validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
  839. int count)
  840. {
  841. int i;
  842. for (i = 0; i < count; i++) {
  843. char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
  844. int length; /* limited by fault_in_pages_readable() */
  845. /* First check for malicious input causing overflow */
  846. if (exec[i].relocation_count >
  847. INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
  848. return -EINVAL;
  849. length = exec[i].relocation_count *
  850. sizeof(struct drm_i915_gem_relocation_entry);
  851. if (!access_ok(VERIFY_READ, ptr, length))
  852. return -EFAULT;
  853. /* we may also need to update the presumed offsets */
  854. if (!access_ok(VERIFY_WRITE, ptr, length))
  855. return -EFAULT;
  856. if (fault_in_multipages_readable(ptr, length))
  857. return -EFAULT;
  858. }
  859. return 0;
  860. }
  861. static void
  862. i915_gem_execbuffer_move_to_active(struct list_head *objects,
  863. struct intel_ring_buffer *ring,
  864. u32 seqno)
  865. {
  866. struct drm_i915_gem_object *obj;
  867. list_for_each_entry(obj, objects, exec_list) {
  868. u32 old_read = obj->base.read_domains;
  869. u32 old_write = obj->base.write_domain;
  870. obj->base.read_domains = obj->base.pending_read_domains;
  871. obj->base.write_domain = obj->base.pending_write_domain;
  872. obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
  873. i915_gem_object_move_to_active(obj, ring, seqno);
  874. if (obj->base.write_domain) {
  875. obj->dirty = 1;
  876. obj->pending_gpu_write = true;
  877. list_move_tail(&obj->gpu_write_list,
  878. &ring->gpu_write_list);
  879. intel_mark_busy(ring->dev, obj);
  880. }
  881. trace_i915_gem_object_change_domain(obj, old_read, old_write);
  882. }
  883. }
  884. static void
  885. i915_gem_execbuffer_retire_commands(struct drm_device *dev,
  886. struct drm_file *file,
  887. struct intel_ring_buffer *ring)
  888. {
  889. struct drm_i915_gem_request *request;
  890. u32 invalidate;
  891. /*
  892. * Ensure that the commands in the batch buffer are
  893. * finished before the interrupt fires.
  894. *
  895. * The sampler always gets flushed on i965 (sigh).
  896. */
  897. invalidate = I915_GEM_DOMAIN_COMMAND;
  898. if (INTEL_INFO(dev)->gen >= 4)
  899. invalidate |= I915_GEM_DOMAIN_SAMPLER;
  900. if (ring->flush(ring, invalidate, 0)) {
  901. i915_gem_next_request_seqno(ring);
  902. return;
  903. }
  904. /* Add a breadcrumb for the completion of the batch buffer */
  905. request = kzalloc(sizeof(*request), GFP_KERNEL);
  906. if (request == NULL || i915_add_request(ring, file, request)) {
  907. i915_gem_next_request_seqno(ring);
  908. kfree(request);
  909. }
  910. }
  911. static int
  912. i915_reset_gen7_sol_offsets(struct drm_device *dev,
  913. struct intel_ring_buffer *ring)
  914. {
  915. drm_i915_private_t *dev_priv = dev->dev_private;
  916. int ret, i;
  917. if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
  918. return 0;
  919. ret = intel_ring_begin(ring, 4 * 3);
  920. if (ret)
  921. return ret;
  922. for (i = 0; i < 4; i++) {
  923. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  924. intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
  925. intel_ring_emit(ring, 0);
  926. }
  927. intel_ring_advance(ring);
  928. return 0;
  929. }
  930. static int
  931. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  932. struct drm_file *file,
  933. struct drm_i915_gem_execbuffer2 *args,
  934. struct drm_i915_gem_exec_object2 *exec)
  935. {
  936. drm_i915_private_t *dev_priv = dev->dev_private;
  937. struct list_head objects;
  938. struct eb_objects *eb;
  939. struct drm_i915_gem_object *batch_obj;
  940. struct drm_clip_rect *cliprects = NULL;
  941. struct intel_ring_buffer *ring;
  942. u32 exec_start, exec_len;
  943. u32 seqno;
  944. u32 mask;
  945. int ret, mode, i;
  946. if (!i915_gem_check_execbuffer(args)) {
  947. DRM_DEBUG("execbuf with invalid offset/length\n");
  948. return -EINVAL;
  949. }
  950. ret = validate_exec_list(exec, args->buffer_count);
  951. if (ret)
  952. return ret;
  953. switch (args->flags & I915_EXEC_RING_MASK) {
  954. case I915_EXEC_DEFAULT:
  955. case I915_EXEC_RENDER:
  956. ring = &dev_priv->ring[RCS];
  957. break;
  958. case I915_EXEC_BSD:
  959. if (!HAS_BSD(dev)) {
  960. DRM_DEBUG("execbuf with invalid ring (BSD)\n");
  961. return -EINVAL;
  962. }
  963. ring = &dev_priv->ring[VCS];
  964. break;
  965. case I915_EXEC_BLT:
  966. if (!HAS_BLT(dev)) {
  967. DRM_DEBUG("execbuf with invalid ring (BLT)\n");
  968. return -EINVAL;
  969. }
  970. ring = &dev_priv->ring[BCS];
  971. break;
  972. default:
  973. DRM_DEBUG("execbuf with unknown ring: %d\n",
  974. (int)(args->flags & I915_EXEC_RING_MASK));
  975. return -EINVAL;
  976. }
  977. mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  978. mask = I915_EXEC_CONSTANTS_MASK;
  979. switch (mode) {
  980. case I915_EXEC_CONSTANTS_REL_GENERAL:
  981. case I915_EXEC_CONSTANTS_ABSOLUTE:
  982. case I915_EXEC_CONSTANTS_REL_SURFACE:
  983. if (ring == &dev_priv->ring[RCS] &&
  984. mode != dev_priv->relative_constants_mode) {
  985. if (INTEL_INFO(dev)->gen < 4)
  986. return -EINVAL;
  987. if (INTEL_INFO(dev)->gen > 5 &&
  988. mode == I915_EXEC_CONSTANTS_REL_SURFACE)
  989. return -EINVAL;
  990. /* The HW changed the meaning on this bit on gen6 */
  991. if (INTEL_INFO(dev)->gen >= 6)
  992. mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  993. }
  994. break;
  995. default:
  996. DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
  997. return -EINVAL;
  998. }
  999. if (args->buffer_count < 1) {
  1000. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1001. return -EINVAL;
  1002. }
  1003. if (args->num_cliprects != 0) {
  1004. if (ring != &dev_priv->ring[RCS]) {
  1005. DRM_DEBUG("clip rectangles are only valid with the render ring\n");
  1006. return -EINVAL;
  1007. }
  1008. cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
  1009. GFP_KERNEL);
  1010. if (cliprects == NULL) {
  1011. ret = -ENOMEM;
  1012. goto pre_mutex_err;
  1013. }
  1014. if (copy_from_user(cliprects,
  1015. (struct drm_clip_rect __user *)(uintptr_t)
  1016. args->cliprects_ptr,
  1017. sizeof(*cliprects)*args->num_cliprects)) {
  1018. ret = -EFAULT;
  1019. goto pre_mutex_err;
  1020. }
  1021. }
  1022. ret = i915_mutex_lock_interruptible(dev);
  1023. if (ret)
  1024. goto pre_mutex_err;
  1025. if (dev_priv->mm.suspended) {
  1026. mutex_unlock(&dev->struct_mutex);
  1027. ret = -EBUSY;
  1028. goto pre_mutex_err;
  1029. }
  1030. eb = eb_create(args->buffer_count);
  1031. if (eb == NULL) {
  1032. mutex_unlock(&dev->struct_mutex);
  1033. ret = -ENOMEM;
  1034. goto pre_mutex_err;
  1035. }
  1036. /* Look up object handles */
  1037. INIT_LIST_HEAD(&objects);
  1038. for (i = 0; i < args->buffer_count; i++) {
  1039. struct drm_i915_gem_object *obj;
  1040. obj = to_intel_bo(drm_gem_object_lookup(dev, file,
  1041. exec[i].handle));
  1042. if (&obj->base == NULL) {
  1043. DRM_DEBUG("Invalid object handle %d at index %d\n",
  1044. exec[i].handle, i);
  1045. /* prevent error path from reading uninitialized data */
  1046. ret = -ENOENT;
  1047. goto err;
  1048. }
  1049. if (!list_empty(&obj->exec_list)) {
  1050. DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
  1051. obj, exec[i].handle, i);
  1052. ret = -EINVAL;
  1053. goto err;
  1054. }
  1055. list_add_tail(&obj->exec_list, &objects);
  1056. obj->exec_handle = exec[i].handle;
  1057. obj->exec_entry = &exec[i];
  1058. eb_add_object(eb, obj);
  1059. }
  1060. /* take note of the batch buffer before we might reorder the lists */
  1061. batch_obj = list_entry(objects.prev,
  1062. struct drm_i915_gem_object,
  1063. exec_list);
  1064. /* Move the objects en-masse into the GTT, evicting if necessary. */
  1065. ret = i915_gem_execbuffer_reserve(ring, file, &objects);
  1066. if (ret)
  1067. goto err;
  1068. /* The objects are in their final locations, apply the relocations. */
  1069. ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
  1070. if (ret) {
  1071. if (ret == -EFAULT) {
  1072. ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
  1073. &objects, eb,
  1074. exec,
  1075. args->buffer_count);
  1076. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1077. }
  1078. if (ret)
  1079. goto err;
  1080. }
  1081. /* Set the pending read domains for the batch buffer to COMMAND */
  1082. if (batch_obj->base.pending_write_domain) {
  1083. DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
  1084. ret = -EINVAL;
  1085. goto err;
  1086. }
  1087. batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  1088. ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
  1089. if (ret)
  1090. goto err;
  1091. seqno = i915_gem_next_request_seqno(ring);
  1092. for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
  1093. if (seqno < ring->sync_seqno[i]) {
  1094. /* The GPU can not handle its semaphore value wrapping,
  1095. * so every billion or so execbuffers, we need to stall
  1096. * the GPU in order to reset the counters.
  1097. */
  1098. ret = i915_gpu_idle(dev, true);
  1099. if (ret)
  1100. goto err;
  1101. BUG_ON(ring->sync_seqno[i]);
  1102. }
  1103. }
  1104. if (ring == &dev_priv->ring[RCS] &&
  1105. mode != dev_priv->relative_constants_mode) {
  1106. ret = intel_ring_begin(ring, 4);
  1107. if (ret)
  1108. goto err;
  1109. intel_ring_emit(ring, MI_NOOP);
  1110. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1111. intel_ring_emit(ring, INSTPM);
  1112. intel_ring_emit(ring, mask << 16 | mode);
  1113. intel_ring_advance(ring);
  1114. dev_priv->relative_constants_mode = mode;
  1115. }
  1116. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  1117. ret = i915_reset_gen7_sol_offsets(dev, ring);
  1118. if (ret)
  1119. goto err;
  1120. }
  1121. trace_i915_gem_ring_dispatch(ring, seqno);
  1122. exec_start = batch_obj->gtt_offset + args->batch_start_offset;
  1123. exec_len = args->batch_len;
  1124. if (cliprects) {
  1125. for (i = 0; i < args->num_cliprects; i++) {
  1126. ret = i915_emit_box(dev, &cliprects[i],
  1127. args->DR1, args->DR4);
  1128. if (ret)
  1129. goto err;
  1130. ret = ring->dispatch_execbuffer(ring,
  1131. exec_start, exec_len);
  1132. if (ret)
  1133. goto err;
  1134. }
  1135. } else {
  1136. ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
  1137. if (ret)
  1138. goto err;
  1139. }
  1140. i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
  1141. i915_gem_execbuffer_retire_commands(dev, file, ring);
  1142. err:
  1143. eb_destroy(eb);
  1144. while (!list_empty(&objects)) {
  1145. struct drm_i915_gem_object *obj;
  1146. obj = list_first_entry(&objects,
  1147. struct drm_i915_gem_object,
  1148. exec_list);
  1149. list_del_init(&obj->exec_list);
  1150. drm_gem_object_unreference(&obj->base);
  1151. }
  1152. mutex_unlock(&dev->struct_mutex);
  1153. pre_mutex_err:
  1154. kfree(cliprects);
  1155. return ret;
  1156. }
  1157. /*
  1158. * Legacy execbuffer just creates an exec2 list from the original exec object
  1159. * list array and passes it to the real function.
  1160. */
  1161. int
  1162. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1163. struct drm_file *file)
  1164. {
  1165. struct drm_i915_gem_execbuffer *args = data;
  1166. struct drm_i915_gem_execbuffer2 exec2;
  1167. struct drm_i915_gem_exec_object *exec_list = NULL;
  1168. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1169. int ret, i;
  1170. if (args->buffer_count < 1) {
  1171. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1172. return -EINVAL;
  1173. }
  1174. /* Copy in the exec list from userland */
  1175. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  1176. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  1177. if (exec_list == NULL || exec2_list == NULL) {
  1178. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1179. args->buffer_count);
  1180. drm_free_large(exec_list);
  1181. drm_free_large(exec2_list);
  1182. return -ENOMEM;
  1183. }
  1184. ret = copy_from_user(exec_list,
  1185. (struct drm_i915_relocation_entry __user *)
  1186. (uintptr_t) args->buffers_ptr,
  1187. sizeof(*exec_list) * args->buffer_count);
  1188. if (ret != 0) {
  1189. DRM_DEBUG("copy %d exec entries failed %d\n",
  1190. args->buffer_count, ret);
  1191. drm_free_large(exec_list);
  1192. drm_free_large(exec2_list);
  1193. return -EFAULT;
  1194. }
  1195. for (i = 0; i < args->buffer_count; i++) {
  1196. exec2_list[i].handle = exec_list[i].handle;
  1197. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  1198. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  1199. exec2_list[i].alignment = exec_list[i].alignment;
  1200. exec2_list[i].offset = exec_list[i].offset;
  1201. if (INTEL_INFO(dev)->gen < 4)
  1202. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  1203. else
  1204. exec2_list[i].flags = 0;
  1205. }
  1206. exec2.buffers_ptr = args->buffers_ptr;
  1207. exec2.buffer_count = args->buffer_count;
  1208. exec2.batch_start_offset = args->batch_start_offset;
  1209. exec2.batch_len = args->batch_len;
  1210. exec2.DR1 = args->DR1;
  1211. exec2.DR4 = args->DR4;
  1212. exec2.num_cliprects = args->num_cliprects;
  1213. exec2.cliprects_ptr = args->cliprects_ptr;
  1214. exec2.flags = I915_EXEC_RENDER;
  1215. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  1216. if (!ret) {
  1217. /* Copy the new buffer offsets back to the user's exec list. */
  1218. for (i = 0; i < args->buffer_count; i++)
  1219. exec_list[i].offset = exec2_list[i].offset;
  1220. /* ... and back out to userspace */
  1221. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  1222. (uintptr_t) args->buffers_ptr,
  1223. exec_list,
  1224. sizeof(*exec_list) * args->buffer_count);
  1225. if (ret) {
  1226. ret = -EFAULT;
  1227. DRM_DEBUG("failed to copy %d exec entries "
  1228. "back to user (%d)\n",
  1229. args->buffer_count, ret);
  1230. }
  1231. }
  1232. drm_free_large(exec_list);
  1233. drm_free_large(exec2_list);
  1234. return ret;
  1235. }
  1236. int
  1237. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1238. struct drm_file *file)
  1239. {
  1240. struct drm_i915_gem_execbuffer2 *args = data;
  1241. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1242. int ret;
  1243. if (args->buffer_count < 1) {
  1244. DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
  1245. return -EINVAL;
  1246. }
  1247. exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
  1248. GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  1249. if (exec2_list == NULL)
  1250. exec2_list = drm_malloc_ab(sizeof(*exec2_list),
  1251. args->buffer_count);
  1252. if (exec2_list == NULL) {
  1253. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1254. args->buffer_count);
  1255. return -ENOMEM;
  1256. }
  1257. ret = copy_from_user(exec2_list,
  1258. (struct drm_i915_relocation_entry __user *)
  1259. (uintptr_t) args->buffers_ptr,
  1260. sizeof(*exec2_list) * args->buffer_count);
  1261. if (ret != 0) {
  1262. DRM_DEBUG("copy %d exec entries failed %d\n",
  1263. args->buffer_count, ret);
  1264. drm_free_large(exec2_list);
  1265. return -EFAULT;
  1266. }
  1267. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  1268. if (!ret) {
  1269. /* Copy the new buffer offsets back to the user's exec list. */
  1270. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  1271. (uintptr_t) args->buffers_ptr,
  1272. exec2_list,
  1273. sizeof(*exec2_list) * args->buffer_count);
  1274. if (ret) {
  1275. ret = -EFAULT;
  1276. DRM_DEBUG("failed to copy %d exec entries "
  1277. "back to user (%d)\n",
  1278. args->buffer_count, ret);
  1279. }
  1280. }
  1281. drm_free_large(exec2_list);
  1282. return ret;
  1283. }