i915_gem_execbuffer.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. /*
  2. * Copyright © 2008,2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/dma_remapping.h>
  34. struct eb_objects {
  35. int and;
  36. struct hlist_head buckets[0];
  37. };
  38. static struct eb_objects *
  39. eb_create(int size)
  40. {
  41. struct eb_objects *eb;
  42. int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  43. BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
  44. while (count > size)
  45. count >>= 1;
  46. eb = kzalloc(count*sizeof(struct hlist_head) +
  47. sizeof(struct eb_objects),
  48. GFP_KERNEL);
  49. if (eb == NULL)
  50. return eb;
  51. eb->and = count - 1;
  52. return eb;
  53. }
  54. static void
  55. eb_reset(struct eb_objects *eb)
  56. {
  57. memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  58. }
  59. static void
  60. eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
  61. {
  62. hlist_add_head(&obj->exec_node,
  63. &eb->buckets[obj->exec_handle & eb->and]);
  64. }
  65. static struct drm_i915_gem_object *
  66. eb_get_object(struct eb_objects *eb, unsigned long handle)
  67. {
  68. struct hlist_head *head;
  69. struct hlist_node *node;
  70. struct drm_i915_gem_object *obj;
  71. head = &eb->buckets[handle & eb->and];
  72. hlist_for_each(node, head) {
  73. obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
  74. if (obj->exec_handle == handle)
  75. return obj;
  76. }
  77. return NULL;
  78. }
  79. static void
  80. eb_destroy(struct eb_objects *eb)
  81. {
  82. kfree(eb);
  83. }
  84. static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
  85. {
  86. return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
  87. !obj->map_and_fenceable ||
  88. obj->cache_level != I915_CACHE_NONE);
  89. }
  90. static int
  91. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  92. struct eb_objects *eb,
  93. struct drm_i915_gem_relocation_entry *reloc)
  94. {
  95. struct drm_device *dev = obj->base.dev;
  96. struct drm_gem_object *target_obj;
  97. struct drm_i915_gem_object *target_i915_obj;
  98. uint32_t target_offset;
  99. int ret = -EINVAL;
  100. /* we've already hold a reference to all valid objects */
  101. target_obj = &eb_get_object(eb, reloc->target_handle)->base;
  102. if (unlikely(target_obj == NULL))
  103. return -ENOENT;
  104. target_i915_obj = to_intel_bo(target_obj);
  105. target_offset = target_i915_obj->gtt_offset;
  106. /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
  107. * pipe_control writes because the gpu doesn't properly redirect them
  108. * through the ppgtt for non_secure batchbuffers. */
  109. if (unlikely(IS_GEN6(dev) &&
  110. reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
  111. !target_i915_obj->has_global_gtt_mapping)) {
  112. i915_gem_gtt_bind_object(target_i915_obj,
  113. target_i915_obj->cache_level);
  114. }
  115. /* Validate that the target is in a valid r/w GPU domain */
  116. if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
  117. DRM_DEBUG("reloc with multiple write domains: "
  118. "obj %p target %d offset %d "
  119. "read %08x write %08x",
  120. obj, reloc->target_handle,
  121. (int) reloc->offset,
  122. reloc->read_domains,
  123. reloc->write_domain);
  124. return ret;
  125. }
  126. if (unlikely((reloc->write_domain | reloc->read_domains)
  127. & ~I915_GEM_GPU_DOMAINS)) {
  128. DRM_DEBUG("reloc with read/write non-GPU domains: "
  129. "obj %p target %d offset %d "
  130. "read %08x write %08x",
  131. obj, reloc->target_handle,
  132. (int) reloc->offset,
  133. reloc->read_domains,
  134. reloc->write_domain);
  135. return ret;
  136. }
  137. if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
  138. reloc->write_domain != target_obj->pending_write_domain)) {
  139. DRM_DEBUG("Write domain conflict: "
  140. "obj %p target %d offset %d "
  141. "new %08x old %08x\n",
  142. obj, reloc->target_handle,
  143. (int) reloc->offset,
  144. reloc->write_domain,
  145. target_obj->pending_write_domain);
  146. return ret;
  147. }
  148. target_obj->pending_read_domains |= reloc->read_domains;
  149. target_obj->pending_write_domain |= reloc->write_domain;
  150. /* If the relocation already has the right value in it, no
  151. * more work needs to be done.
  152. */
  153. if (target_offset == reloc->presumed_offset)
  154. return 0;
  155. /* Check that the relocation address is valid... */
  156. if (unlikely(reloc->offset > obj->base.size - 4)) {
  157. DRM_DEBUG("Relocation beyond object bounds: "
  158. "obj %p target %d offset %d size %d.\n",
  159. obj, reloc->target_handle,
  160. (int) reloc->offset,
  161. (int) obj->base.size);
  162. return ret;
  163. }
  164. if (unlikely(reloc->offset & 3)) {
  165. DRM_DEBUG("Relocation not 4-byte aligned: "
  166. "obj %p target %d offset %d.\n",
  167. obj, reloc->target_handle,
  168. (int) reloc->offset);
  169. return ret;
  170. }
  171. /* We can't wait for rendering with pagefaults disabled */
  172. if (obj->active && in_atomic())
  173. return -EFAULT;
  174. reloc->delta += target_offset;
  175. if (use_cpu_reloc(obj)) {
  176. uint32_t page_offset = reloc->offset & ~PAGE_MASK;
  177. char *vaddr;
  178. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  179. if (ret)
  180. return ret;
  181. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  182. reloc->offset >> PAGE_SHIFT));
  183. *(uint32_t *)(vaddr + page_offset) = reloc->delta;
  184. kunmap_atomic(vaddr);
  185. } else {
  186. struct drm_i915_private *dev_priv = dev->dev_private;
  187. uint32_t __iomem *reloc_entry;
  188. void __iomem *reloc_page;
  189. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  190. if (ret)
  191. return ret;
  192. ret = i915_gem_object_put_fence(obj);
  193. if (ret)
  194. return ret;
  195. /* Map the page containing the relocation we're going to perform. */
  196. reloc->offset += obj->gtt_offset;
  197. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  198. reloc->offset & PAGE_MASK);
  199. reloc_entry = (uint32_t __iomem *)
  200. (reloc_page + (reloc->offset & ~PAGE_MASK));
  201. iowrite32(reloc->delta, reloc_entry);
  202. io_mapping_unmap_atomic(reloc_page);
  203. }
  204. /* and update the user's relocation entry */
  205. reloc->presumed_offset = target_offset;
  206. return 0;
  207. }
  208. static int
  209. i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
  210. struct eb_objects *eb)
  211. {
  212. #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
  213. struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
  214. struct drm_i915_gem_relocation_entry __user *user_relocs;
  215. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  216. int remain, ret;
  217. user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
  218. remain = entry->relocation_count;
  219. while (remain) {
  220. struct drm_i915_gem_relocation_entry *r = stack_reloc;
  221. int count = remain;
  222. if (count > ARRAY_SIZE(stack_reloc))
  223. count = ARRAY_SIZE(stack_reloc);
  224. remain -= count;
  225. if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
  226. return -EFAULT;
  227. do {
  228. u64 offset = r->presumed_offset;
  229. ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
  230. if (ret)
  231. return ret;
  232. if (r->presumed_offset != offset &&
  233. __copy_to_user_inatomic(&user_relocs->presumed_offset,
  234. &r->presumed_offset,
  235. sizeof(r->presumed_offset))) {
  236. return -EFAULT;
  237. }
  238. user_relocs++;
  239. r++;
  240. } while (--count);
  241. }
  242. return 0;
  243. #undef N_RELOC
  244. }
  245. static int
  246. i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
  247. struct eb_objects *eb,
  248. struct drm_i915_gem_relocation_entry *relocs)
  249. {
  250. const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  251. int i, ret;
  252. for (i = 0; i < entry->relocation_count; i++) {
  253. ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
  254. if (ret)
  255. return ret;
  256. }
  257. return 0;
  258. }
  259. static int
  260. i915_gem_execbuffer_relocate(struct drm_device *dev,
  261. struct eb_objects *eb,
  262. struct list_head *objects)
  263. {
  264. struct drm_i915_gem_object *obj;
  265. int ret = 0;
  266. /* This is the fast path and we cannot handle a pagefault whilst
  267. * holding the struct mutex lest the user pass in the relocations
  268. * contained within a mmaped bo. For in such a case we, the page
  269. * fault handler would call i915_gem_fault() and we would try to
  270. * acquire the struct mutex again. Obviously this is bad and so
  271. * lockdep complains vehemently.
  272. */
  273. pagefault_disable();
  274. list_for_each_entry(obj, objects, exec_list) {
  275. ret = i915_gem_execbuffer_relocate_object(obj, eb);
  276. if (ret)
  277. break;
  278. }
  279. pagefault_enable();
  280. return ret;
  281. }
  282. #define __EXEC_OBJECT_HAS_PIN (1<<31)
  283. #define __EXEC_OBJECT_HAS_FENCE (1<<30)
  284. static int
  285. need_reloc_mappable(struct drm_i915_gem_object *obj)
  286. {
  287. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  288. return entry->relocation_count && !use_cpu_reloc(obj);
  289. }
  290. static int
  291. i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
  292. struct intel_ring_buffer *ring)
  293. {
  294. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  295. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  296. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  297. bool need_fence, need_mappable;
  298. int ret;
  299. need_fence =
  300. has_fenced_gpu_access &&
  301. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  302. obj->tiling_mode != I915_TILING_NONE;
  303. need_mappable = need_fence || need_reloc_mappable(obj);
  304. ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
  305. if (ret)
  306. return ret;
  307. entry->flags |= __EXEC_OBJECT_HAS_PIN;
  308. if (has_fenced_gpu_access) {
  309. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  310. ret = i915_gem_object_get_fence(obj);
  311. if (ret)
  312. return ret;
  313. if (i915_gem_object_pin_fence(obj))
  314. entry->flags |= __EXEC_OBJECT_HAS_FENCE;
  315. obj->pending_fenced_gpu_access = true;
  316. }
  317. }
  318. /* Ensure ppgtt mapping exists if needed */
  319. if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
  320. i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
  321. obj, obj->cache_level);
  322. obj->has_aliasing_ppgtt_mapping = 1;
  323. }
  324. entry->offset = obj->gtt_offset;
  325. return 0;
  326. }
  327. static void
  328. i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
  329. {
  330. struct drm_i915_gem_exec_object2 *entry;
  331. if (!obj->gtt_space)
  332. return;
  333. entry = obj->exec_entry;
  334. if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
  335. i915_gem_object_unpin_fence(obj);
  336. if (entry->flags & __EXEC_OBJECT_HAS_PIN)
  337. i915_gem_object_unpin(obj);
  338. entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
  339. }
  340. static int
  341. i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
  342. struct drm_file *file,
  343. struct list_head *objects)
  344. {
  345. struct drm_i915_gem_object *obj;
  346. struct list_head ordered_objects;
  347. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  348. int retry;
  349. INIT_LIST_HEAD(&ordered_objects);
  350. while (!list_empty(objects)) {
  351. struct drm_i915_gem_exec_object2 *entry;
  352. bool need_fence, need_mappable;
  353. obj = list_first_entry(objects,
  354. struct drm_i915_gem_object,
  355. exec_list);
  356. entry = obj->exec_entry;
  357. need_fence =
  358. has_fenced_gpu_access &&
  359. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  360. obj->tiling_mode != I915_TILING_NONE;
  361. need_mappable = need_fence || need_reloc_mappable(obj);
  362. if (need_mappable)
  363. list_move(&obj->exec_list, &ordered_objects);
  364. else
  365. list_move_tail(&obj->exec_list, &ordered_objects);
  366. obj->base.pending_read_domains = 0;
  367. obj->base.pending_write_domain = 0;
  368. obj->pending_fenced_gpu_access = false;
  369. }
  370. list_splice(&ordered_objects, objects);
  371. /* Attempt to pin all of the buffers into the GTT.
  372. * This is done in 3 phases:
  373. *
  374. * 1a. Unbind all objects that do not match the GTT constraints for
  375. * the execbuffer (fenceable, mappable, alignment etc).
  376. * 1b. Increment pin count for already bound objects.
  377. * 2. Bind new objects.
  378. * 3. Decrement pin count.
  379. *
  380. * This avoid unnecessary unbinding of later objects in order to make
  381. * room for the earlier objects *unless* we need to defragment.
  382. */
  383. retry = 0;
  384. do {
  385. int ret = 0;
  386. /* Unbind any ill-fitting objects or pin. */
  387. list_for_each_entry(obj, objects, exec_list) {
  388. struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
  389. bool need_fence, need_mappable;
  390. if (!obj->gtt_space)
  391. continue;
  392. need_fence =
  393. has_fenced_gpu_access &&
  394. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  395. obj->tiling_mode != I915_TILING_NONE;
  396. need_mappable = need_fence || need_reloc_mappable(obj);
  397. if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
  398. (need_mappable && !obj->map_and_fenceable))
  399. ret = i915_gem_object_unbind(obj);
  400. else
  401. ret = i915_gem_execbuffer_reserve_object(obj, ring);
  402. if (ret)
  403. goto err;
  404. }
  405. /* Bind fresh objects */
  406. list_for_each_entry(obj, objects, exec_list) {
  407. if (obj->gtt_space)
  408. continue;
  409. ret = i915_gem_execbuffer_reserve_object(obj, ring);
  410. if (ret)
  411. goto err;
  412. }
  413. err: /* Decrement pin count for bound objects */
  414. list_for_each_entry(obj, objects, exec_list)
  415. i915_gem_execbuffer_unreserve_object(obj);
  416. if (ret != -ENOSPC || retry++)
  417. return ret;
  418. ret = i915_gem_evict_everything(ring->dev);
  419. if (ret)
  420. return ret;
  421. } while (1);
  422. }
  423. static int
  424. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  425. struct drm_file *file,
  426. struct intel_ring_buffer *ring,
  427. struct list_head *objects,
  428. struct eb_objects *eb,
  429. struct drm_i915_gem_exec_object2 *exec,
  430. int count)
  431. {
  432. struct drm_i915_gem_relocation_entry *reloc;
  433. struct drm_i915_gem_object *obj;
  434. int *reloc_offset;
  435. int i, total, ret;
  436. /* We may process another execbuffer during the unlock... */
  437. while (!list_empty(objects)) {
  438. obj = list_first_entry(objects,
  439. struct drm_i915_gem_object,
  440. exec_list);
  441. list_del_init(&obj->exec_list);
  442. drm_gem_object_unreference(&obj->base);
  443. }
  444. mutex_unlock(&dev->struct_mutex);
  445. total = 0;
  446. for (i = 0; i < count; i++)
  447. total += exec[i].relocation_count;
  448. reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
  449. reloc = drm_malloc_ab(total, sizeof(*reloc));
  450. if (reloc == NULL || reloc_offset == NULL) {
  451. drm_free_large(reloc);
  452. drm_free_large(reloc_offset);
  453. mutex_lock(&dev->struct_mutex);
  454. return -ENOMEM;
  455. }
  456. total = 0;
  457. for (i = 0; i < count; i++) {
  458. struct drm_i915_gem_relocation_entry __user *user_relocs;
  459. u64 invalid_offset = (u64)-1;
  460. int j;
  461. user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
  462. if (copy_from_user(reloc+total, user_relocs,
  463. exec[i].relocation_count * sizeof(*reloc))) {
  464. ret = -EFAULT;
  465. mutex_lock(&dev->struct_mutex);
  466. goto err;
  467. }
  468. /* As we do not update the known relocation offsets after
  469. * relocating (due to the complexities in lock handling),
  470. * we need to mark them as invalid now so that we force the
  471. * relocation processing next time. Just in case the target
  472. * object is evicted and then rebound into its old
  473. * presumed_offset before the next execbuffer - if that
  474. * happened we would make the mistake of assuming that the
  475. * relocations were valid.
  476. */
  477. for (j = 0; j < exec[i].relocation_count; j++) {
  478. if (copy_to_user(&user_relocs[j].presumed_offset,
  479. &invalid_offset,
  480. sizeof(invalid_offset))) {
  481. ret = -EFAULT;
  482. mutex_lock(&dev->struct_mutex);
  483. goto err;
  484. }
  485. }
  486. reloc_offset[i] = total;
  487. total += exec[i].relocation_count;
  488. }
  489. ret = i915_mutex_lock_interruptible(dev);
  490. if (ret) {
  491. mutex_lock(&dev->struct_mutex);
  492. goto err;
  493. }
  494. /* reacquire the objects */
  495. eb_reset(eb);
  496. for (i = 0; i < count; i++) {
  497. obj = to_intel_bo(drm_gem_object_lookup(dev, file,
  498. exec[i].handle));
  499. if (&obj->base == NULL) {
  500. DRM_DEBUG("Invalid object handle %d at index %d\n",
  501. exec[i].handle, i);
  502. ret = -ENOENT;
  503. goto err;
  504. }
  505. list_add_tail(&obj->exec_list, objects);
  506. obj->exec_handle = exec[i].handle;
  507. obj->exec_entry = &exec[i];
  508. eb_add_object(eb, obj);
  509. }
  510. ret = i915_gem_execbuffer_reserve(ring, file, objects);
  511. if (ret)
  512. goto err;
  513. list_for_each_entry(obj, objects, exec_list) {
  514. int offset = obj->exec_entry - exec;
  515. ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
  516. reloc + reloc_offset[offset]);
  517. if (ret)
  518. goto err;
  519. }
  520. /* Leave the user relocations as are, this is the painfully slow path,
  521. * and we want to avoid the complication of dropping the lock whilst
  522. * having buffers reserved in the aperture and so causing spurious
  523. * ENOSPC for random operations.
  524. */
  525. err:
  526. drm_free_large(reloc);
  527. drm_free_large(reloc_offset);
  528. return ret;
  529. }
  530. static int
  531. i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
  532. {
  533. u32 plane, flip_mask;
  534. int ret;
  535. /* Check for any pending flips. As we only maintain a flip queue depth
  536. * of 1, we can simply insert a WAIT for the next display flip prior
  537. * to executing the batch and avoid stalling the CPU.
  538. */
  539. for (plane = 0; flips >> plane; plane++) {
  540. if (((flips >> plane) & 1) == 0)
  541. continue;
  542. if (plane)
  543. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  544. else
  545. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  546. ret = intel_ring_begin(ring, 2);
  547. if (ret)
  548. return ret;
  549. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  550. intel_ring_emit(ring, MI_NOOP);
  551. intel_ring_advance(ring);
  552. }
  553. return 0;
  554. }
  555. static int
  556. i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
  557. struct list_head *objects)
  558. {
  559. struct drm_i915_gem_object *obj;
  560. uint32_t flush_domains = 0;
  561. uint32_t flips = 0;
  562. int ret;
  563. list_for_each_entry(obj, objects, exec_list) {
  564. ret = i915_gem_object_sync(obj, ring);
  565. if (ret)
  566. return ret;
  567. if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
  568. i915_gem_clflush_object(obj);
  569. if (obj->base.pending_write_domain)
  570. flips |= atomic_read(&obj->pending_flip);
  571. flush_domains |= obj->base.write_domain;
  572. }
  573. if (flips) {
  574. ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
  575. if (ret)
  576. return ret;
  577. }
  578. if (flush_domains & I915_GEM_DOMAIN_CPU)
  579. i915_gem_chipset_flush(ring->dev);
  580. if (flush_domains & I915_GEM_DOMAIN_GTT)
  581. wmb();
  582. /* Unconditionally invalidate gpu caches and ensure that we do flush
  583. * any residual writes from the previous batch.
  584. */
  585. return intel_ring_invalidate_all_caches(ring);
  586. }
  587. static bool
  588. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
  589. {
  590. return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
  591. }
  592. static int
  593. validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
  594. int count)
  595. {
  596. int i;
  597. for (i = 0; i < count; i++) {
  598. char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
  599. int length; /* limited by fault_in_pages_readable() */
  600. /* First check for malicious input causing overflow */
  601. if (exec[i].relocation_count >
  602. INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
  603. return -EINVAL;
  604. length = exec[i].relocation_count *
  605. sizeof(struct drm_i915_gem_relocation_entry);
  606. if (!access_ok(VERIFY_READ, ptr, length))
  607. return -EFAULT;
  608. /* we may also need to update the presumed offsets */
  609. if (!access_ok(VERIFY_WRITE, ptr, length))
  610. return -EFAULT;
  611. if (fault_in_multipages_readable(ptr, length))
  612. return -EFAULT;
  613. }
  614. return 0;
  615. }
  616. static void
  617. i915_gem_execbuffer_move_to_active(struct list_head *objects,
  618. struct intel_ring_buffer *ring)
  619. {
  620. struct drm_i915_gem_object *obj;
  621. list_for_each_entry(obj, objects, exec_list) {
  622. u32 old_read = obj->base.read_domains;
  623. u32 old_write = obj->base.write_domain;
  624. obj->base.read_domains = obj->base.pending_read_domains;
  625. obj->base.write_domain = obj->base.pending_write_domain;
  626. obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
  627. i915_gem_object_move_to_active(obj, ring);
  628. if (obj->base.write_domain) {
  629. obj->dirty = 1;
  630. obj->last_write_seqno = intel_ring_get_seqno(ring);
  631. if (obj->pin_count) /* check for potential scanout */
  632. intel_mark_fb_busy(obj);
  633. }
  634. trace_i915_gem_object_change_domain(obj, old_read, old_write);
  635. }
  636. }
  637. static void
  638. i915_gem_execbuffer_retire_commands(struct drm_device *dev,
  639. struct drm_file *file,
  640. struct intel_ring_buffer *ring)
  641. {
  642. /* Unconditionally force add_request to emit a full flush. */
  643. ring->gpu_caches_dirty = true;
  644. /* Add a breadcrumb for the completion of the batch buffer */
  645. (void)i915_add_request(ring, file, NULL);
  646. }
  647. static int
  648. i915_reset_gen7_sol_offsets(struct drm_device *dev,
  649. struct intel_ring_buffer *ring)
  650. {
  651. drm_i915_private_t *dev_priv = dev->dev_private;
  652. int ret, i;
  653. if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
  654. return 0;
  655. ret = intel_ring_begin(ring, 4 * 3);
  656. if (ret)
  657. return ret;
  658. for (i = 0; i < 4; i++) {
  659. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  660. intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
  661. intel_ring_emit(ring, 0);
  662. }
  663. intel_ring_advance(ring);
  664. return 0;
  665. }
  666. static int
  667. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  668. struct drm_file *file,
  669. struct drm_i915_gem_execbuffer2 *args,
  670. struct drm_i915_gem_exec_object2 *exec)
  671. {
  672. drm_i915_private_t *dev_priv = dev->dev_private;
  673. struct list_head objects;
  674. struct eb_objects *eb;
  675. struct drm_i915_gem_object *batch_obj;
  676. struct drm_clip_rect *cliprects = NULL;
  677. struct intel_ring_buffer *ring;
  678. u32 ctx_id = i915_execbuffer2_get_context_id(*args);
  679. u32 exec_start, exec_len;
  680. u32 mask;
  681. u32 flags;
  682. int ret, mode, i;
  683. if (!i915_gem_check_execbuffer(args)) {
  684. DRM_DEBUG("execbuf with invalid offset/length\n");
  685. return -EINVAL;
  686. }
  687. ret = validate_exec_list(exec, args->buffer_count);
  688. if (ret)
  689. return ret;
  690. flags = 0;
  691. if (args->flags & I915_EXEC_SECURE) {
  692. if (!file->is_master || !capable(CAP_SYS_ADMIN))
  693. return -EPERM;
  694. flags |= I915_DISPATCH_SECURE;
  695. }
  696. if (args->flags & I915_EXEC_IS_PINNED)
  697. flags |= I915_DISPATCH_PINNED;
  698. switch (args->flags & I915_EXEC_RING_MASK) {
  699. case I915_EXEC_DEFAULT:
  700. case I915_EXEC_RENDER:
  701. ring = &dev_priv->ring[RCS];
  702. break;
  703. case I915_EXEC_BSD:
  704. ring = &dev_priv->ring[VCS];
  705. if (ctx_id != 0) {
  706. DRM_DEBUG("Ring %s doesn't support contexts\n",
  707. ring->name);
  708. return -EPERM;
  709. }
  710. break;
  711. case I915_EXEC_BLT:
  712. ring = &dev_priv->ring[BCS];
  713. if (ctx_id != 0) {
  714. DRM_DEBUG("Ring %s doesn't support contexts\n",
  715. ring->name);
  716. return -EPERM;
  717. }
  718. break;
  719. default:
  720. DRM_DEBUG("execbuf with unknown ring: %d\n",
  721. (int)(args->flags & I915_EXEC_RING_MASK));
  722. return -EINVAL;
  723. }
  724. if (!intel_ring_initialized(ring)) {
  725. DRM_DEBUG("execbuf with invalid ring: %d\n",
  726. (int)(args->flags & I915_EXEC_RING_MASK));
  727. return -EINVAL;
  728. }
  729. mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  730. mask = I915_EXEC_CONSTANTS_MASK;
  731. switch (mode) {
  732. case I915_EXEC_CONSTANTS_REL_GENERAL:
  733. case I915_EXEC_CONSTANTS_ABSOLUTE:
  734. case I915_EXEC_CONSTANTS_REL_SURFACE:
  735. if (ring == &dev_priv->ring[RCS] &&
  736. mode != dev_priv->relative_constants_mode) {
  737. if (INTEL_INFO(dev)->gen < 4)
  738. return -EINVAL;
  739. if (INTEL_INFO(dev)->gen > 5 &&
  740. mode == I915_EXEC_CONSTANTS_REL_SURFACE)
  741. return -EINVAL;
  742. /* The HW changed the meaning on this bit on gen6 */
  743. if (INTEL_INFO(dev)->gen >= 6)
  744. mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  745. }
  746. break;
  747. default:
  748. DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
  749. return -EINVAL;
  750. }
  751. if (args->buffer_count < 1) {
  752. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  753. return -EINVAL;
  754. }
  755. if (args->num_cliprects != 0) {
  756. if (ring != &dev_priv->ring[RCS]) {
  757. DRM_DEBUG("clip rectangles are only valid with the render ring\n");
  758. return -EINVAL;
  759. }
  760. if (INTEL_INFO(dev)->gen >= 5) {
  761. DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
  762. return -EINVAL;
  763. }
  764. if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
  765. DRM_DEBUG("execbuf with %u cliprects\n",
  766. args->num_cliprects);
  767. return -EINVAL;
  768. }
  769. cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
  770. GFP_KERNEL);
  771. if (cliprects == NULL) {
  772. ret = -ENOMEM;
  773. goto pre_mutex_err;
  774. }
  775. if (copy_from_user(cliprects,
  776. (struct drm_clip_rect __user *)(uintptr_t)
  777. args->cliprects_ptr,
  778. sizeof(*cliprects)*args->num_cliprects)) {
  779. ret = -EFAULT;
  780. goto pre_mutex_err;
  781. }
  782. }
  783. ret = i915_mutex_lock_interruptible(dev);
  784. if (ret)
  785. goto pre_mutex_err;
  786. if (dev_priv->mm.suspended) {
  787. mutex_unlock(&dev->struct_mutex);
  788. ret = -EBUSY;
  789. goto pre_mutex_err;
  790. }
  791. eb = eb_create(args->buffer_count);
  792. if (eb == NULL) {
  793. mutex_unlock(&dev->struct_mutex);
  794. ret = -ENOMEM;
  795. goto pre_mutex_err;
  796. }
  797. /* Look up object handles */
  798. INIT_LIST_HEAD(&objects);
  799. for (i = 0; i < args->buffer_count; i++) {
  800. struct drm_i915_gem_object *obj;
  801. obj = to_intel_bo(drm_gem_object_lookup(dev, file,
  802. exec[i].handle));
  803. if (&obj->base == NULL) {
  804. DRM_DEBUG("Invalid object handle %d at index %d\n",
  805. exec[i].handle, i);
  806. /* prevent error path from reading uninitialized data */
  807. ret = -ENOENT;
  808. goto err;
  809. }
  810. if (!list_empty(&obj->exec_list)) {
  811. DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
  812. obj, exec[i].handle, i);
  813. ret = -EINVAL;
  814. goto err;
  815. }
  816. list_add_tail(&obj->exec_list, &objects);
  817. obj->exec_handle = exec[i].handle;
  818. obj->exec_entry = &exec[i];
  819. eb_add_object(eb, obj);
  820. }
  821. /* take note of the batch buffer before we might reorder the lists */
  822. batch_obj = list_entry(objects.prev,
  823. struct drm_i915_gem_object,
  824. exec_list);
  825. /* Move the objects en-masse into the GTT, evicting if necessary. */
  826. ret = i915_gem_execbuffer_reserve(ring, file, &objects);
  827. if (ret)
  828. goto err;
  829. /* The objects are in their final locations, apply the relocations. */
  830. ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
  831. if (ret) {
  832. if (ret == -EFAULT) {
  833. ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
  834. &objects, eb,
  835. exec,
  836. args->buffer_count);
  837. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  838. }
  839. if (ret)
  840. goto err;
  841. }
  842. /* Set the pending read domains for the batch buffer to COMMAND */
  843. if (batch_obj->base.pending_write_domain) {
  844. DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
  845. ret = -EINVAL;
  846. goto err;
  847. }
  848. batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  849. /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
  850. * batch" bit. Hence we need to pin secure batches into the global gtt.
  851. * hsw should have this fixed, but let's be paranoid and do it
  852. * unconditionally for now. */
  853. if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
  854. i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
  855. ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
  856. if (ret)
  857. goto err;
  858. ret = i915_switch_context(ring, file, ctx_id);
  859. if (ret)
  860. goto err;
  861. if (ring == &dev_priv->ring[RCS] &&
  862. mode != dev_priv->relative_constants_mode) {
  863. ret = intel_ring_begin(ring, 4);
  864. if (ret)
  865. goto err;
  866. intel_ring_emit(ring, MI_NOOP);
  867. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  868. intel_ring_emit(ring, INSTPM);
  869. intel_ring_emit(ring, mask << 16 | mode);
  870. intel_ring_advance(ring);
  871. dev_priv->relative_constants_mode = mode;
  872. }
  873. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  874. ret = i915_reset_gen7_sol_offsets(dev, ring);
  875. if (ret)
  876. goto err;
  877. }
  878. exec_start = batch_obj->gtt_offset + args->batch_start_offset;
  879. exec_len = args->batch_len;
  880. if (cliprects) {
  881. for (i = 0; i < args->num_cliprects; i++) {
  882. ret = i915_emit_box(dev, &cliprects[i],
  883. args->DR1, args->DR4);
  884. if (ret)
  885. goto err;
  886. ret = ring->dispatch_execbuffer(ring,
  887. exec_start, exec_len,
  888. flags);
  889. if (ret)
  890. goto err;
  891. }
  892. } else {
  893. ret = ring->dispatch_execbuffer(ring,
  894. exec_start, exec_len,
  895. flags);
  896. if (ret)
  897. goto err;
  898. }
  899. trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
  900. i915_gem_execbuffer_move_to_active(&objects, ring);
  901. i915_gem_execbuffer_retire_commands(dev, file, ring);
  902. err:
  903. eb_destroy(eb);
  904. while (!list_empty(&objects)) {
  905. struct drm_i915_gem_object *obj;
  906. obj = list_first_entry(&objects,
  907. struct drm_i915_gem_object,
  908. exec_list);
  909. list_del_init(&obj->exec_list);
  910. drm_gem_object_unreference(&obj->base);
  911. }
  912. mutex_unlock(&dev->struct_mutex);
  913. pre_mutex_err:
  914. kfree(cliprects);
  915. return ret;
  916. }
  917. /*
  918. * Legacy execbuffer just creates an exec2 list from the original exec object
  919. * list array and passes it to the real function.
  920. */
  921. int
  922. i915_gem_execbuffer(struct drm_device *dev, void *data,
  923. struct drm_file *file)
  924. {
  925. struct drm_i915_gem_execbuffer *args = data;
  926. struct drm_i915_gem_execbuffer2 exec2;
  927. struct drm_i915_gem_exec_object *exec_list = NULL;
  928. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  929. int ret, i;
  930. if (args->buffer_count < 1) {
  931. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  932. return -EINVAL;
  933. }
  934. /* Copy in the exec list from userland */
  935. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  936. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  937. if (exec_list == NULL || exec2_list == NULL) {
  938. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  939. args->buffer_count);
  940. drm_free_large(exec_list);
  941. drm_free_large(exec2_list);
  942. return -ENOMEM;
  943. }
  944. ret = copy_from_user(exec_list,
  945. (void __user *)(uintptr_t)args->buffers_ptr,
  946. sizeof(*exec_list) * args->buffer_count);
  947. if (ret != 0) {
  948. DRM_DEBUG("copy %d exec entries failed %d\n",
  949. args->buffer_count, ret);
  950. drm_free_large(exec_list);
  951. drm_free_large(exec2_list);
  952. return -EFAULT;
  953. }
  954. for (i = 0; i < args->buffer_count; i++) {
  955. exec2_list[i].handle = exec_list[i].handle;
  956. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  957. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  958. exec2_list[i].alignment = exec_list[i].alignment;
  959. exec2_list[i].offset = exec_list[i].offset;
  960. if (INTEL_INFO(dev)->gen < 4)
  961. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  962. else
  963. exec2_list[i].flags = 0;
  964. }
  965. exec2.buffers_ptr = args->buffers_ptr;
  966. exec2.buffer_count = args->buffer_count;
  967. exec2.batch_start_offset = args->batch_start_offset;
  968. exec2.batch_len = args->batch_len;
  969. exec2.DR1 = args->DR1;
  970. exec2.DR4 = args->DR4;
  971. exec2.num_cliprects = args->num_cliprects;
  972. exec2.cliprects_ptr = args->cliprects_ptr;
  973. exec2.flags = I915_EXEC_RENDER;
  974. i915_execbuffer2_set_context_id(exec2, 0);
  975. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  976. if (!ret) {
  977. /* Copy the new buffer offsets back to the user's exec list. */
  978. for (i = 0; i < args->buffer_count; i++)
  979. exec_list[i].offset = exec2_list[i].offset;
  980. /* ... and back out to userspace */
  981. ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
  982. exec_list,
  983. sizeof(*exec_list) * args->buffer_count);
  984. if (ret) {
  985. ret = -EFAULT;
  986. DRM_DEBUG("failed to copy %d exec entries "
  987. "back to user (%d)\n",
  988. args->buffer_count, ret);
  989. }
  990. }
  991. drm_free_large(exec_list);
  992. drm_free_large(exec2_list);
  993. return ret;
  994. }
  995. int
  996. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  997. struct drm_file *file)
  998. {
  999. struct drm_i915_gem_execbuffer2 *args = data;
  1000. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1001. int ret;
  1002. if (args->buffer_count < 1 ||
  1003. args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
  1004. DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
  1005. return -EINVAL;
  1006. }
  1007. exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
  1008. GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  1009. if (exec2_list == NULL)
  1010. exec2_list = drm_malloc_ab(sizeof(*exec2_list),
  1011. args->buffer_count);
  1012. if (exec2_list == NULL) {
  1013. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1014. args->buffer_count);
  1015. return -ENOMEM;
  1016. }
  1017. ret = copy_from_user(exec2_list,
  1018. (struct drm_i915_relocation_entry __user *)
  1019. (uintptr_t) args->buffers_ptr,
  1020. sizeof(*exec2_list) * args->buffer_count);
  1021. if (ret != 0) {
  1022. DRM_DEBUG("copy %d exec entries failed %d\n",
  1023. args->buffer_count, ret);
  1024. drm_free_large(exec2_list);
  1025. return -EFAULT;
  1026. }
  1027. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  1028. if (!ret) {
  1029. /* Copy the new buffer offsets back to the user's exec list. */
  1030. ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
  1031. exec2_list,
  1032. sizeof(*exec2_list) * args->buffer_count);
  1033. if (ret) {
  1034. ret = -EFAULT;
  1035. DRM_DEBUG("failed to copy %d exec entries "
  1036. "back to user (%d)\n",
  1037. args->buffer_count, ret);
  1038. }
  1039. }
  1040. drm_free_large(exec2_list);
  1041. return ret;
  1042. }