i915_gem_context.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /*
  2. * Copyright © 2011-2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. *
  26. */
  27. /*
  28. * This file implements HW context support. On gen5+ a HW context consists of an
  29. * opaque GPU object which is referenced at times of context saves and restores.
  30. * With RC6 enabled, the context is also referenced as the GPU enters and exists
  31. * from RC6 (GPU has it's own internal power context, except on gen5). Though
  32. * something like a context does exist for the media ring, the code only
  33. * supports contexts for the render ring.
  34. *
  35. * In software, there is a distinction between contexts created by the user,
  36. * and the default HW context. The default HW context is used by GPU clients
  37. * that do not request setup of their own hardware context. The default
  38. * context's state is never restored to help prevent programming errors. This
  39. * would happen if a client ran and piggy-backed off another clients GPU state.
  40. * The default context only exists to give the GPU some offset to load as the
  41. * current to invoke a save of the context we actually care about. In fact, the
  42. * code could likely be constructed, albeit in a more complicated fashion, to
  43. * never use the default context, though that limits the driver's ability to
  44. * swap out, and/or destroy other contexts.
  45. *
  46. * All other contexts are created as a request by the GPU client. These contexts
  47. * store GPU state, and thus allow GPU clients to not re-emit state (and
  48. * potentially query certain state) at any time. The kernel driver makes
  49. * certain that the appropriate commands are inserted.
  50. *
  51. * The context life cycle is semi-complicated in that context BOs may live
  52. * longer than the context itself because of the way the hardware, and object
  53. * tracking works. Below is a very crude representation of the state machine
  54. * describing the context life.
  55. * refcount pincount active
  56. * S0: initial state 0 0 0
  57. * S1: context created 1 0 0
  58. * S2: context is currently running 2 1 X
  59. * S3: GPU referenced, but not current 2 0 1
  60. * S4: context is current, but destroyed 1 1 0
  61. * S5: like S3, but destroyed 1 0 1
  62. *
  63. * The most common (but not all) transitions:
  64. * S0->S1: client creates a context
  65. * S1->S2: client submits execbuf with context
  66. * S2->S3: other clients submits execbuf with context
  67. * S3->S1: context object was retired
  68. * S3->S2: clients submits another execbuf
  69. * S2->S4: context destroy called with current context
  70. * S3->S5->S0: destroy path
  71. * S4->S5->S0: destroy path on current context
  72. *
  73. * There are two confusing terms used above:
  74. * The "current context" means the context which is currently running on the
  75. * GPU. The GPU has loaded it's state already and has stored away the gtt
  76. * offset of the BO. The GPU is not actively referencing the data at this
  77. * offset, but it will on the next context switch. The only way to avoid this
  78. * is to do a GPU reset.
  79. *
  80. * An "active context' is one which was previously the "current context" and is
  81. * on the active list waiting for the next context switch to occur. Until this
  82. * happens, the object must remain at the same gtt offset. It is therefore
  83. * possible to destroy a context, but it is still active.
  84. *
  85. */
  86. #include <drm/drmP.h>
  87. #include <drm/i915_drm.h>
  88. #include "i915_drv.h"
  89. /* This is a HW constraint. The value below is the largest known requirement
  90. * I've seen in a spec to date, and that was a workaround for a non-shipping
  91. * part. It should be safe to decrease this, but it's more future proof as is.
  92. */
  93. #define CONTEXT_ALIGN (64<<10)
  94. static struct i915_hw_context *
  95. i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
  96. static int do_switch(struct i915_hw_context *to);
  97. static int get_context_size(struct drm_device *dev)
  98. {
  99. struct drm_i915_private *dev_priv = dev->dev_private;
  100. int ret;
  101. u32 reg;
  102. switch (INTEL_INFO(dev)->gen) {
  103. case 6:
  104. reg = I915_READ(CXT_SIZE);
  105. ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
  106. break;
  107. case 7:
  108. reg = I915_READ(GEN7_CXT_SIZE);
  109. if (IS_HASWELL(dev))
  110. ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
  111. else
  112. ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
  113. break;
  114. default:
  115. BUG();
  116. }
  117. return ret;
  118. }
  119. void i915_gem_context_free(struct kref *ctx_ref)
  120. {
  121. struct i915_hw_context *ctx = container_of(ctx_ref,
  122. typeof(*ctx), ref);
  123. drm_gem_object_unreference(&ctx->obj->base);
  124. kfree(ctx);
  125. }
  126. static struct i915_hw_context *
  127. create_hw_context(struct drm_device *dev,
  128. struct drm_i915_file_private *file_priv)
  129. {
  130. struct drm_i915_private *dev_priv = dev->dev_private;
  131. struct i915_hw_context *ctx;
  132. int ret;
  133. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  134. if (ctx == NULL)
  135. return ERR_PTR(-ENOMEM);
  136. kref_init(&ctx->ref);
  137. ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
  138. if (ctx->obj == NULL) {
  139. kfree(ctx);
  140. DRM_DEBUG_DRIVER("Context object allocated failed\n");
  141. return ERR_PTR(-ENOMEM);
  142. }
  143. if (INTEL_INFO(dev)->gen >= 7) {
  144. ret = i915_gem_object_set_cache_level(ctx->obj,
  145. I915_CACHE_LLC_MLC);
  146. if (ret)
  147. goto err_out;
  148. }
  149. /* The ring associated with the context object is handled by the normal
  150. * object tracking code. We give an initial ring value simple to pass an
  151. * assertion in the context switch code.
  152. */
  153. ctx->ring = &dev_priv->ring[RCS];
  154. /* Default context will never have a file_priv */
  155. if (file_priv == NULL)
  156. return ctx;
  157. ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
  158. GFP_KERNEL);
  159. if (ret < 0)
  160. goto err_out;
  161. ctx->file_priv = file_priv;
  162. ctx->id = ret;
  163. return ctx;
  164. err_out:
  165. i915_gem_context_unreference(ctx);
  166. return ERR_PTR(ret);
  167. }
  168. static inline bool is_default_context(struct i915_hw_context *ctx)
  169. {
  170. return (ctx == ctx->ring->default_context);
  171. }
  172. /**
  173. * The default context needs to exist per ring that uses contexts. It stores the
  174. * context state of the GPU for applications that don't utilize HW contexts, as
  175. * well as an idle case.
  176. */
  177. static int create_default_context(struct drm_i915_private *dev_priv)
  178. {
  179. struct i915_hw_context *ctx;
  180. int ret;
  181. BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
  182. ctx = create_hw_context(dev_priv->dev, NULL);
  183. if (IS_ERR(ctx))
  184. return PTR_ERR(ctx);
  185. /* We may need to do things with the shrinker which require us to
  186. * immediately switch back to the default context. This can cause a
  187. * problem as pinning the default context also requires GTT space which
  188. * may not be available. To avoid this we always pin the
  189. * default context.
  190. */
  191. dev_priv->ring[RCS].default_context = ctx;
  192. ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
  193. if (ret)
  194. goto err_destroy;
  195. ret = do_switch(ctx);
  196. if (ret)
  197. goto err_unpin;
  198. DRM_DEBUG_DRIVER("Default HW context loaded\n");
  199. return 0;
  200. err_unpin:
  201. i915_gem_object_unpin(ctx->obj);
  202. err_destroy:
  203. i915_gem_context_unreference(ctx);
  204. return ret;
  205. }
  206. void i915_gem_context_init(struct drm_device *dev)
  207. {
  208. struct drm_i915_private *dev_priv = dev->dev_private;
  209. if (!HAS_HW_CONTEXTS(dev)) {
  210. dev_priv->hw_contexts_disabled = true;
  211. return;
  212. }
  213. /* If called from reset, or thaw... we've been here already */
  214. if (dev_priv->hw_contexts_disabled ||
  215. dev_priv->ring[RCS].default_context)
  216. return;
  217. dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
  218. if (dev_priv->hw_context_size > (1<<20)) {
  219. dev_priv->hw_contexts_disabled = true;
  220. return;
  221. }
  222. if (create_default_context(dev_priv)) {
  223. dev_priv->hw_contexts_disabled = true;
  224. return;
  225. }
  226. DRM_DEBUG_DRIVER("HW context support initialized\n");
  227. }
  228. void i915_gem_context_fini(struct drm_device *dev)
  229. {
  230. struct drm_i915_private *dev_priv = dev->dev_private;
  231. struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
  232. if (dev_priv->hw_contexts_disabled)
  233. return;
  234. /* The only known way to stop the gpu from accessing the hw context is
  235. * to reset it. Do this as the very last operation to avoid confusing
  236. * other code, leading to spurious errors. */
  237. intel_gpu_reset(dev);
  238. i915_gem_object_unpin(dctx->obj);
  239. /* When default context is created and switched to, base object refcount
  240. * will be 2 (+1 from object creation and +1 from do_switch()).
  241. * i915_gem_context_fini() will be called after gpu_idle() has switched
  242. * to default context. So we need to unreference the base object once
  243. * to offset the do_switch part, so that i915_gem_context_unreference()
  244. * can then free the base object correctly. */
  245. drm_gem_object_unreference(&dctx->obj->base);
  246. i915_gem_context_unreference(dctx);
  247. }
  248. static int context_idr_cleanup(int id, void *p, void *data)
  249. {
  250. struct i915_hw_context *ctx = p;
  251. BUG_ON(id == DEFAULT_CONTEXT_ID);
  252. i915_gem_context_unreference(ctx);
  253. return 0;
  254. }
  255. void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
  256. {
  257. struct drm_i915_file_private *file_priv = file->driver_priv;
  258. mutex_lock(&dev->struct_mutex);
  259. idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
  260. idr_destroy(&file_priv->context_idr);
  261. mutex_unlock(&dev->struct_mutex);
  262. }
  263. static struct i915_hw_context *
  264. i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
  265. {
  266. return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
  267. }
  268. static inline int
  269. mi_set_context(struct intel_ring_buffer *ring,
  270. struct i915_hw_context *new_context,
  271. u32 hw_flags)
  272. {
  273. int ret;
  274. /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
  275. * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
  276. * explicitly, so we rely on the value at ring init, stored in
  277. * itlb_before_ctx_switch.
  278. */
  279. if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
  280. ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
  281. if (ret)
  282. return ret;
  283. }
  284. ret = intel_ring_begin(ring, 6);
  285. if (ret)
  286. return ret;
  287. if (IS_GEN7(ring->dev))
  288. intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
  289. else
  290. intel_ring_emit(ring, MI_NOOP);
  291. intel_ring_emit(ring, MI_NOOP);
  292. intel_ring_emit(ring, MI_SET_CONTEXT);
  293. intel_ring_emit(ring, new_context->obj->gtt_offset |
  294. MI_MM_SPACE_GTT |
  295. MI_SAVE_EXT_STATE_EN |
  296. MI_RESTORE_EXT_STATE_EN |
  297. hw_flags);
  298. /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
  299. intel_ring_emit(ring, MI_NOOP);
  300. if (IS_GEN7(ring->dev))
  301. intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
  302. else
  303. intel_ring_emit(ring, MI_NOOP);
  304. intel_ring_advance(ring);
  305. return ret;
  306. }
  307. static int do_switch(struct i915_hw_context *to)
  308. {
  309. struct intel_ring_buffer *ring = to->ring;
  310. struct drm_i915_gem_object *from_obj = ring->last_context_obj;
  311. u32 hw_flags = 0;
  312. int ret;
  313. BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
  314. if (from_obj == to->obj)
  315. return 0;
  316. ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
  317. if (ret)
  318. return ret;
  319. /* Clear this page out of any CPU caches for coherent swap-in/out. Note
  320. * that thanks to write = false in this call and us not setting any gpu
  321. * write domains when putting a context object onto the active list
  322. * (when switching away from it), this won't block.
  323. * XXX: We need a real interface to do this instead of trickery. */
  324. ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
  325. if (ret) {
  326. i915_gem_object_unpin(to->obj);
  327. return ret;
  328. }
  329. if (!to->obj->has_global_gtt_mapping)
  330. i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
  331. if (!to->is_initialized || is_default_context(to))
  332. hw_flags |= MI_RESTORE_INHIBIT;
  333. else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
  334. hw_flags |= MI_FORCE_RESTORE;
  335. ret = mi_set_context(ring, to, hw_flags);
  336. if (ret) {
  337. i915_gem_object_unpin(to->obj);
  338. return ret;
  339. }
  340. /* The backing object for the context is done after switching to the
  341. * *next* context. Therefore we cannot retire the previous context until
  342. * the next context has already started running. In fact, the below code
  343. * is a bit suboptimal because the retiring can occur simply after the
  344. * MI_SET_CONTEXT instead of when the next seqno has completed.
  345. */
  346. if (from_obj != NULL) {
  347. from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
  348. i915_gem_object_move_to_active(from_obj, ring);
  349. /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
  350. * whole damn pipeline, we don't need to explicitly mark the
  351. * object dirty. The only exception is that the context must be
  352. * correct in case the object gets swapped out. Ideally we'd be
  353. * able to defer doing this until we know the object would be
  354. * swapped, but there is no way to do that yet.
  355. */
  356. from_obj->dirty = 1;
  357. BUG_ON(from_obj->ring != ring);
  358. i915_gem_object_unpin(from_obj);
  359. drm_gem_object_unreference(&from_obj->base);
  360. }
  361. drm_gem_object_reference(&to->obj->base);
  362. ring->last_context_obj = to->obj;
  363. to->is_initialized = true;
  364. return 0;
  365. }
  366. /**
  367. * i915_switch_context() - perform a GPU context switch.
  368. * @ring: ring for which we'll execute the context switch
  369. * @file_priv: file_priv associated with the context, may be NULL
  370. * @id: context id number
  371. * @seqno: sequence number by which the new context will be switched to
  372. * @flags:
  373. *
  374. * The context life cycle is simple. The context refcount is incremented and
  375. * decremented by 1 and create and destroy. If the context is in use by the GPU,
  376. * it will have a refoucnt > 1. This allows us to destroy the context abstract
  377. * object while letting the normal object tracking destroy the backing BO.
  378. */
  379. int i915_switch_context(struct intel_ring_buffer *ring,
  380. struct drm_file *file,
  381. int to_id)
  382. {
  383. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  384. struct i915_hw_context *to;
  385. if (dev_priv->hw_contexts_disabled)
  386. return 0;
  387. if (ring != &dev_priv->ring[RCS])
  388. return 0;
  389. if (to_id == DEFAULT_CONTEXT_ID) {
  390. to = ring->default_context;
  391. } else {
  392. if (file == NULL)
  393. return -EINVAL;
  394. to = i915_gem_context_get(file->driver_priv, to_id);
  395. if (to == NULL)
  396. return -ENOENT;
  397. }
  398. return do_switch(to);
  399. }
  400. int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
  401. struct drm_file *file)
  402. {
  403. struct drm_i915_private *dev_priv = dev->dev_private;
  404. struct drm_i915_gem_context_create *args = data;
  405. struct drm_i915_file_private *file_priv = file->driver_priv;
  406. struct i915_hw_context *ctx;
  407. int ret;
  408. if (!(dev->driver->driver_features & DRIVER_GEM))
  409. return -ENODEV;
  410. if (dev_priv->hw_contexts_disabled)
  411. return -ENODEV;
  412. ret = i915_mutex_lock_interruptible(dev);
  413. if (ret)
  414. return ret;
  415. ctx = create_hw_context(dev, file_priv);
  416. mutex_unlock(&dev->struct_mutex);
  417. if (IS_ERR(ctx))
  418. return PTR_ERR(ctx);
  419. args->ctx_id = ctx->id;
  420. DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
  421. return 0;
  422. }
  423. int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
  424. struct drm_file *file)
  425. {
  426. struct drm_i915_gem_context_destroy *args = data;
  427. struct drm_i915_file_private *file_priv = file->driver_priv;
  428. struct i915_hw_context *ctx;
  429. int ret;
  430. if (!(dev->driver->driver_features & DRIVER_GEM))
  431. return -ENODEV;
  432. ret = i915_mutex_lock_interruptible(dev);
  433. if (ret)
  434. return ret;
  435. ctx = i915_gem_context_get(file_priv, args->ctx_id);
  436. if (!ctx) {
  437. mutex_unlock(&dev->struct_mutex);
  438. return -ENOENT;
  439. }
  440. idr_remove(&ctx->file_priv->context_idr, ctx->id);
  441. i915_gem_context_unreference(ctx);
  442. mutex_unlock(&dev->struct_mutex);
  443. DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
  444. return 0;
  445. }