i915_irq.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #define MAX_NOPID ((u32)~0)
  33. /** These are the interrupts used by the driver */
  34. #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
  35. I915_ASLE_INTERRUPT | \
  36. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
  37. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
  38. void
  39. i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
  40. {
  41. if ((dev_priv->irq_mask_reg & mask) != 0) {
  42. dev_priv->irq_mask_reg &= ~mask;
  43. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  44. (void) I915_READ(IMR);
  45. }
  46. }
  47. static inline void
  48. i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
  49. {
  50. if ((dev_priv->irq_mask_reg & mask) != mask) {
  51. dev_priv->irq_mask_reg |= mask;
  52. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  53. (void) I915_READ(IMR);
  54. }
  55. }
  56. /**
  57. * i915_pipe_enabled - check if a pipe is enabled
  58. * @dev: DRM device
  59. * @pipe: pipe to check
  60. *
  61. * Reading certain registers when the pipe is disabled can hang the chip.
  62. * Use this routine to make sure the PLL is running and the pipe is active
  63. * before reading such registers if unsure.
  64. */
  65. static int
  66. i915_pipe_enabled(struct drm_device *dev, int pipe)
  67. {
  68. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  69. unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
  70. if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
  71. return 1;
  72. return 0;
  73. }
  74. /**
  75. * Emit blits for scheduled buffer swaps.
  76. *
  77. * This function will be called with the HW lock held.
  78. * Because this function must grab the ring mutex (dev->struct_mutex),
  79. * it can no longer run at soft irq time. We'll fix this when we do
  80. * the DRI2 swap buffer work.
  81. */
  82. static void i915_vblank_tasklet(struct drm_device *dev)
  83. {
  84. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  85. unsigned long irqflags;
  86. struct list_head *list, *tmp, hits, *hit;
  87. int nhits, nrects, slice[2], upper[2], lower[2], i;
  88. unsigned counter[2];
  89. struct drm_drawable_info *drw;
  90. drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
  91. u32 cpp = dev_priv->cpp;
  92. u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
  93. XY_SRC_COPY_BLT_WRITE_ALPHA |
  94. XY_SRC_COPY_BLT_WRITE_RGB)
  95. : XY_SRC_COPY_BLT_CMD;
  96. u32 src_pitch = sarea_priv->pitch * cpp;
  97. u32 dst_pitch = sarea_priv->pitch * cpp;
  98. u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
  99. RING_LOCALS;
  100. mutex_lock(&dev->struct_mutex);
  101. if (IS_I965G(dev) && sarea_priv->front_tiled) {
  102. cmd |= XY_SRC_COPY_BLT_DST_TILED;
  103. dst_pitch >>= 2;
  104. }
  105. if (IS_I965G(dev) && sarea_priv->back_tiled) {
  106. cmd |= XY_SRC_COPY_BLT_SRC_TILED;
  107. src_pitch >>= 2;
  108. }
  109. counter[0] = drm_vblank_count(dev, 0);
  110. counter[1] = drm_vblank_count(dev, 1);
  111. DRM_DEBUG("\n");
  112. INIT_LIST_HEAD(&hits);
  113. nhits = nrects = 0;
  114. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  115. /* Find buffer swaps scheduled for this vertical blank */
  116. list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
  117. drm_i915_vbl_swap_t *vbl_swap =
  118. list_entry(list, drm_i915_vbl_swap_t, head);
  119. int pipe = vbl_swap->pipe;
  120. if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
  121. continue;
  122. list_del(list);
  123. dev_priv->swaps_pending--;
  124. drm_vblank_put(dev, pipe);
  125. spin_unlock(&dev_priv->swaps_lock);
  126. spin_lock(&dev->drw_lock);
  127. drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
  128. list_for_each(hit, &hits) {
  129. drm_i915_vbl_swap_t *swap_cmp =
  130. list_entry(hit, drm_i915_vbl_swap_t, head);
  131. struct drm_drawable_info *drw_cmp =
  132. drm_get_drawable_info(dev, swap_cmp->drw_id);
  133. /* Make sure both drawables are still
  134. * around and have some rectangles before
  135. * we look inside to order them for the
  136. * blts below.
  137. */
  138. if (drw_cmp && drw_cmp->num_rects > 0 &&
  139. drw && drw->num_rects > 0 &&
  140. drw_cmp->rects[0].y1 > drw->rects[0].y1) {
  141. list_add_tail(list, hit);
  142. break;
  143. }
  144. }
  145. spin_unlock(&dev->drw_lock);
  146. /* List of hits was empty, or we reached the end of it */
  147. if (hit == &hits)
  148. list_add_tail(list, hits.prev);
  149. nhits++;
  150. spin_lock(&dev_priv->swaps_lock);
  151. }
  152. if (nhits == 0) {
  153. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  154. mutex_unlock(&dev->struct_mutex);
  155. return;
  156. }
  157. spin_unlock(&dev_priv->swaps_lock);
  158. i915_kernel_lost_context(dev);
  159. if (IS_I965G(dev)) {
  160. BEGIN_LP_RING(4);
  161. OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  162. OUT_RING(0);
  163. OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
  164. OUT_RING(0);
  165. ADVANCE_LP_RING();
  166. } else {
  167. BEGIN_LP_RING(6);
  168. OUT_RING(GFX_OP_DRAWRECT_INFO);
  169. OUT_RING(0);
  170. OUT_RING(0);
  171. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  172. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  173. OUT_RING(0);
  174. ADVANCE_LP_RING();
  175. }
  176. sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
  177. upper[0] = upper[1] = 0;
  178. slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
  179. slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
  180. lower[0] = sarea_priv->pipeA_y + slice[0];
  181. lower[1] = sarea_priv->pipeB_y + slice[0];
  182. spin_lock(&dev->drw_lock);
  183. /* Emit blits for buffer swaps, partitioning both outputs into as many
  184. * slices as there are buffer swaps scheduled in order to avoid tearing
  185. * (based on the assumption that a single buffer swap would always
  186. * complete before scanout starts).
  187. */
  188. for (i = 0; i++ < nhits;
  189. upper[0] = lower[0], lower[0] += slice[0],
  190. upper[1] = lower[1], lower[1] += slice[1]) {
  191. if (i == nhits)
  192. lower[0] = lower[1] = sarea_priv->height;
  193. list_for_each(hit, &hits) {
  194. drm_i915_vbl_swap_t *swap_hit =
  195. list_entry(hit, drm_i915_vbl_swap_t, head);
  196. struct drm_clip_rect *rect;
  197. int num_rects, pipe;
  198. unsigned short top, bottom;
  199. drw = drm_get_drawable_info(dev, swap_hit->drw_id);
  200. /* The drawable may have been destroyed since
  201. * the vblank swap was queued
  202. */
  203. if (!drw)
  204. continue;
  205. rect = drw->rects;
  206. pipe = swap_hit->pipe;
  207. top = upper[pipe];
  208. bottom = lower[pipe];
  209. for (num_rects = drw->num_rects; num_rects--; rect++) {
  210. int y1 = max(rect->y1, top);
  211. int y2 = min(rect->y2, bottom);
  212. if (y1 >= y2)
  213. continue;
  214. BEGIN_LP_RING(8);
  215. OUT_RING(cmd);
  216. OUT_RING(ropcpp | dst_pitch);
  217. OUT_RING((y1 << 16) | rect->x1);
  218. OUT_RING((y2 << 16) | rect->x2);
  219. OUT_RING(sarea_priv->front_offset);
  220. OUT_RING((y1 << 16) | rect->x1);
  221. OUT_RING(src_pitch);
  222. OUT_RING(sarea_priv->back_offset);
  223. ADVANCE_LP_RING();
  224. }
  225. }
  226. }
  227. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  228. mutex_unlock(&dev->struct_mutex);
  229. list_for_each_safe(hit, tmp, &hits) {
  230. drm_i915_vbl_swap_t *swap_hit =
  231. list_entry(hit, drm_i915_vbl_swap_t, head);
  232. list_del(hit);
  233. drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
  234. }
  235. }
  236. /* Called from drm generic code, passed a 'crtc', which
  237. * we use as a pipe index
  238. */
  239. u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  240. {
  241. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  242. unsigned long high_frame;
  243. unsigned long low_frame;
  244. u32 high1, high2, low, count;
  245. high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
  246. low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
  247. if (!i915_pipe_enabled(dev, pipe)) {
  248. DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
  249. return 0;
  250. }
  251. /*
  252. * High & low register fields aren't synchronized, so make sure
  253. * we get a low value that's stable across two reads of the high
  254. * register.
  255. */
  256. do {
  257. high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  258. PIPE_FRAME_HIGH_SHIFT);
  259. low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  260. PIPE_FRAME_LOW_SHIFT);
  261. high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  262. PIPE_FRAME_HIGH_SHIFT);
  263. } while (high1 != high2);
  264. count = (high1 << 8) | low;
  265. return count;
  266. }
  267. void
  268. i915_vblank_work_handler(struct work_struct *work)
  269. {
  270. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  271. vblank_work);
  272. struct drm_device *dev = dev_priv->dev;
  273. unsigned long irqflags;
  274. if (dev->lock.hw_lock == NULL) {
  275. i915_vblank_tasklet(dev);
  276. return;
  277. }
  278. spin_lock_irqsave(&dev->tasklet_lock, irqflags);
  279. dev->locked_tasklet_func = i915_vblank_tasklet;
  280. spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
  281. /* Try to get the lock now, if this fails, the lock
  282. * holder will execute the tasklet during unlock
  283. */
  284. if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
  285. return;
  286. dev->lock.lock_time = jiffies;
  287. atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
  288. spin_lock_irqsave(&dev->tasklet_lock, irqflags);
  289. dev->locked_tasklet_func = NULL;
  290. spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
  291. i915_vblank_tasklet(dev);
  292. drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
  293. }
  294. irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  295. {
  296. struct drm_device *dev = (struct drm_device *) arg;
  297. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  298. u32 iir;
  299. u32 pipea_stats, pipeb_stats;
  300. int vblank = 0;
  301. atomic_inc(&dev_priv->irq_received);
  302. if (dev->pdev->msi_enabled)
  303. I915_WRITE(IMR, ~0);
  304. iir = I915_READ(IIR);
  305. if (iir == 0) {
  306. if (dev->pdev->msi_enabled) {
  307. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  308. (void) I915_READ(IMR);
  309. }
  310. return IRQ_NONE;
  311. }
  312. /*
  313. * Clear the PIPE(A|B)STAT regs before the IIR otherwise
  314. * we may get extra interrupts.
  315. */
  316. if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
  317. pipea_stats = I915_READ(PIPEASTAT);
  318. if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
  319. pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  320. PIPE_VBLANK_INTERRUPT_ENABLE);
  321. else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
  322. PIPE_VBLANK_INTERRUPT_STATUS)) {
  323. vblank++;
  324. drm_handle_vblank(dev, 0);
  325. }
  326. I915_WRITE(PIPEASTAT, pipea_stats);
  327. }
  328. if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
  329. pipeb_stats = I915_READ(PIPEBSTAT);
  330. /* Ack the event */
  331. I915_WRITE(PIPEBSTAT, pipeb_stats);
  332. /* The vblank interrupt gets enabled even if we didn't ask for
  333. it, so make sure it's shut down again */
  334. if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
  335. pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  336. PIPE_VBLANK_INTERRUPT_ENABLE);
  337. else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
  338. PIPE_VBLANK_INTERRUPT_STATUS)) {
  339. vblank++;
  340. drm_handle_vblank(dev, 1);
  341. }
  342. if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
  343. opregion_asle_intr(dev);
  344. I915_WRITE(PIPEBSTAT, pipeb_stats);
  345. }
  346. I915_WRITE(IIR, iir);
  347. if (dev->pdev->msi_enabled)
  348. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  349. (void) I915_READ(IIR); /* Flush posted writes */
  350. if (dev_priv->sarea_priv)
  351. dev_priv->sarea_priv->last_dispatch =
  352. READ_BREADCRUMB(dev_priv);
  353. if (iir & I915_USER_INTERRUPT) {
  354. dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
  355. DRM_WAKEUP(&dev_priv->irq_queue);
  356. }
  357. if (iir & I915_ASLE_INTERRUPT)
  358. opregion_asle_intr(dev);
  359. if (vblank && dev_priv->swaps_pending > 0)
  360. schedule_work(&dev_priv->vblank_work);
  361. return IRQ_HANDLED;
  362. }
  363. static int i915_emit_irq(struct drm_device * dev)
  364. {
  365. drm_i915_private_t *dev_priv = dev->dev_private;
  366. RING_LOCALS;
  367. i915_kernel_lost_context(dev);
  368. DRM_DEBUG("\n");
  369. dev_priv->counter++;
  370. if (dev_priv->counter > 0x7FFFFFFFUL)
  371. dev_priv->counter = 1;
  372. if (dev_priv->sarea_priv)
  373. dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
  374. BEGIN_LP_RING(6);
  375. OUT_RING(MI_STORE_DWORD_INDEX);
  376. OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
  377. OUT_RING(dev_priv->counter);
  378. OUT_RING(0);
  379. OUT_RING(0);
  380. OUT_RING(MI_USER_INTERRUPT);
  381. ADVANCE_LP_RING();
  382. return dev_priv->counter;
  383. }
  384. void i915_user_irq_get(struct drm_device *dev)
  385. {
  386. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  387. unsigned long irqflags;
  388. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  389. if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
  390. i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
  391. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  392. }
  393. void i915_user_irq_put(struct drm_device *dev)
  394. {
  395. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  396. unsigned long irqflags;
  397. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  398. BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
  399. if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
  400. i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
  401. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  402. }
  403. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  404. {
  405. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  406. int ret = 0;
  407. DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
  408. READ_BREADCRUMB(dev_priv));
  409. if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  410. if (dev_priv->sarea_priv) {
  411. dev_priv->sarea_priv->last_dispatch =
  412. READ_BREADCRUMB(dev_priv);
  413. }
  414. return 0;
  415. }
  416. if (dev_priv->sarea_priv)
  417. dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  418. i915_user_irq_get(dev);
  419. DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
  420. READ_BREADCRUMB(dev_priv) >= irq_nr);
  421. i915_user_irq_put(dev);
  422. if (ret == -EBUSY) {
  423. DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  424. READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  425. }
  426. if (dev_priv->sarea_priv)
  427. dev_priv->sarea_priv->last_dispatch =
  428. READ_BREADCRUMB(dev_priv);
  429. return ret;
  430. }
  431. /* Needs the lock as it touches the ring.
  432. */
  433. int i915_irq_emit(struct drm_device *dev, void *data,
  434. struct drm_file *file_priv)
  435. {
  436. drm_i915_private_t *dev_priv = dev->dev_private;
  437. drm_i915_irq_emit_t *emit = data;
  438. int result;
  439. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  440. if (!dev_priv) {
  441. DRM_ERROR("called with no initialization\n");
  442. return -EINVAL;
  443. }
  444. mutex_lock(&dev->struct_mutex);
  445. result = i915_emit_irq(dev);
  446. mutex_unlock(&dev->struct_mutex);
  447. if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  448. DRM_ERROR("copy_to_user\n");
  449. return -EFAULT;
  450. }
  451. return 0;
  452. }
  453. /* Doesn't need the hardware lock.
  454. */
  455. int i915_irq_wait(struct drm_device *dev, void *data,
  456. struct drm_file *file_priv)
  457. {
  458. drm_i915_private_t *dev_priv = dev->dev_private;
  459. drm_i915_irq_wait_t *irqwait = data;
  460. if (!dev_priv) {
  461. DRM_ERROR("called with no initialization\n");
  462. return -EINVAL;
  463. }
  464. return i915_wait_irq(dev, irqwait->irq_seq);
  465. }
  466. /* Called from drm generic code, passed 'crtc' which
  467. * we use as a pipe index
  468. */
  469. int i915_enable_vblank(struct drm_device *dev, int pipe)
  470. {
  471. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  472. u32 pipestat_reg = 0;
  473. u32 pipestat;
  474. u32 interrupt = 0;
  475. unsigned long irqflags;
  476. switch (pipe) {
  477. case 0:
  478. pipestat_reg = PIPEASTAT;
  479. interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  480. break;
  481. case 1:
  482. pipestat_reg = PIPEBSTAT;
  483. interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  484. break;
  485. default:
  486. DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
  487. pipe);
  488. return 0;
  489. }
  490. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  491. /* Enabling vblank events in IMR comes before PIPESTAT write, or
  492. * there's a race where the PIPESTAT vblank bit gets set to 1, so
  493. * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
  494. * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
  495. * IMR masks it. It doesn't ever get set after we clear the masking
  496. * in IMR because the ISR bit is edge, not level-triggered, on the
  497. * OR of PIPESTAT bits.
  498. */
  499. i915_enable_irq(dev_priv, interrupt);
  500. pipestat = I915_READ(pipestat_reg);
  501. if (IS_I965G(dev))
  502. pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
  503. else
  504. pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
  505. /* Clear any stale interrupt status */
  506. pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
  507. PIPE_VBLANK_INTERRUPT_STATUS);
  508. I915_WRITE(pipestat_reg, pipestat);
  509. (void) I915_READ(pipestat_reg); /* Posting read */
  510. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  511. return 0;
  512. }
  513. /* Called from drm generic code, passed 'crtc' which
  514. * we use as a pipe index
  515. */
  516. void i915_disable_vblank(struct drm_device *dev, int pipe)
  517. {
  518. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  519. u32 pipestat_reg = 0;
  520. u32 pipestat;
  521. u32 interrupt = 0;
  522. unsigned long irqflags;
  523. switch (pipe) {
  524. case 0:
  525. pipestat_reg = PIPEASTAT;
  526. interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  527. break;
  528. case 1:
  529. pipestat_reg = PIPEBSTAT;
  530. interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  531. break;
  532. default:
  533. DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
  534. pipe);
  535. return;
  536. break;
  537. }
  538. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  539. i915_disable_irq(dev_priv, interrupt);
  540. pipestat = I915_READ(pipestat_reg);
  541. pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  542. PIPE_VBLANK_INTERRUPT_ENABLE);
  543. /* Clear any stale interrupt status */
  544. pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
  545. PIPE_VBLANK_INTERRUPT_STATUS);
  546. I915_WRITE(pipestat_reg, pipestat);
  547. (void) I915_READ(pipestat_reg); /* Posting read */
  548. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  549. }
  550. /* Set the vblank monitor pipe
  551. */
  552. int i915_vblank_pipe_set(struct drm_device *dev, void *data,
  553. struct drm_file *file_priv)
  554. {
  555. drm_i915_private_t *dev_priv = dev->dev_private;
  556. if (!dev_priv) {
  557. DRM_ERROR("called with no initialization\n");
  558. return -EINVAL;
  559. }
  560. return 0;
  561. }
  562. int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  563. struct drm_file *file_priv)
  564. {
  565. drm_i915_private_t *dev_priv = dev->dev_private;
  566. drm_i915_vblank_pipe_t *pipe = data;
  567. if (!dev_priv) {
  568. DRM_ERROR("called with no initialization\n");
  569. return -EINVAL;
  570. }
  571. pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  572. return 0;
  573. }
  574. /**
  575. * Schedule buffer swap at given vertical blank.
  576. */
  577. int i915_vblank_swap(struct drm_device *dev, void *data,
  578. struct drm_file *file_priv)
  579. {
  580. drm_i915_private_t *dev_priv = dev->dev_private;
  581. drm_i915_vblank_swap_t *swap = data;
  582. drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
  583. unsigned int pipe, seqtype, curseq;
  584. unsigned long irqflags;
  585. struct list_head *list;
  586. int ret;
  587. if (!dev_priv || !dev_priv->sarea_priv) {
  588. DRM_ERROR("%s called with no initialization\n", __func__);
  589. return -EINVAL;
  590. }
  591. if (dev_priv->sarea_priv->rotation) {
  592. DRM_DEBUG("Rotation not supported\n");
  593. return -EINVAL;
  594. }
  595. if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
  596. _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
  597. DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
  598. return -EINVAL;
  599. }
  600. pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
  601. seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
  602. if (!(dev_priv->vblank_pipe & (1 << pipe))) {
  603. DRM_ERROR("Invalid pipe %d\n", pipe);
  604. return -EINVAL;
  605. }
  606. spin_lock_irqsave(&dev->drw_lock, irqflags);
  607. if (!drm_get_drawable_info(dev, swap->drawable)) {
  608. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  609. DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
  610. return -EINVAL;
  611. }
  612. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  613. /*
  614. * We take the ref here and put it when the swap actually completes
  615. * in the tasklet.
  616. */
  617. ret = drm_vblank_get(dev, pipe);
  618. if (ret)
  619. return ret;
  620. curseq = drm_vblank_count(dev, pipe);
  621. if (seqtype == _DRM_VBLANK_RELATIVE)
  622. swap->sequence += curseq;
  623. if ((curseq - swap->sequence) <= (1<<23)) {
  624. if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
  625. swap->sequence = curseq + 1;
  626. } else {
  627. DRM_DEBUG("Missed target sequence\n");
  628. drm_vblank_put(dev, pipe);
  629. return -EINVAL;
  630. }
  631. }
  632. vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  633. if (!vbl_swap) {
  634. DRM_ERROR("Failed to allocate memory to queue swap\n");
  635. drm_vblank_put(dev, pipe);
  636. return -ENOMEM;
  637. }
  638. vbl_swap->drw_id = swap->drawable;
  639. vbl_swap->pipe = pipe;
  640. vbl_swap->sequence = swap->sequence;
  641. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  642. list_for_each(list, &dev_priv->vbl_swaps.head) {
  643. vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
  644. if (vbl_old->drw_id == swap->drawable &&
  645. vbl_old->pipe == pipe &&
  646. vbl_old->sequence == swap->sequence) {
  647. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  648. drm_vblank_put(dev, pipe);
  649. drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  650. DRM_DEBUG("Already scheduled\n");
  651. return 0;
  652. }
  653. }
  654. if (dev_priv->swaps_pending >= 10) {
  655. DRM_DEBUG("Too many swaps queued\n");
  656. DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
  657. drm_vblank_count(dev, 0),
  658. drm_vblank_count(dev, 1));
  659. list_for_each(list, &dev_priv->vbl_swaps.head) {
  660. vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
  661. DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
  662. vbl_old->drw_id, vbl_old->pipe,
  663. vbl_old->sequence);
  664. }
  665. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  666. drm_vblank_put(dev, pipe);
  667. drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  668. return -EBUSY;
  669. }
  670. list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
  671. dev_priv->swaps_pending++;
  672. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  673. return 0;
  674. }
  675. /* drm_dma.h hooks
  676. */
  677. void i915_driver_irq_preinstall(struct drm_device * dev)
  678. {
  679. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  680. I915_WRITE(HWSTAM, 0xeffe);
  681. I915_WRITE(IMR, 0xffffffff);
  682. I915_WRITE(IER, 0x0);
  683. }
  684. int i915_driver_irq_postinstall(struct drm_device *dev)
  685. {
  686. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  687. int ret, num_pipes = 2;
  688. spin_lock_init(&dev_priv->swaps_lock);
  689. INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
  690. INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
  691. dev_priv->swaps_pending = 0;
  692. /* Set initial unmasked IRQs to just the selected vblank pipes. */
  693. dev_priv->irq_mask_reg = ~0;
  694. ret = drm_vblank_init(dev, num_pipes);
  695. if (ret)
  696. return ret;
  697. dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  698. dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  699. dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  700. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  701. dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
  702. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  703. I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
  704. (void) I915_READ(IER);
  705. opregion_enable_asle(dev);
  706. DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
  707. return 0;
  708. }
  709. void i915_driver_irq_uninstall(struct drm_device * dev)
  710. {
  711. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  712. u32 temp;
  713. if (!dev_priv)
  714. return;
  715. dev_priv->vblank_pipe = 0;
  716. I915_WRITE(HWSTAM, 0xffffffff);
  717. I915_WRITE(IMR, 0xffffffff);
  718. I915_WRITE(IER, 0x0);
  719. temp = I915_READ(PIPEASTAT);
  720. I915_WRITE(PIPEASTAT, temp);
  721. temp = I915_READ(PIPEBSTAT);
  722. I915_WRITE(PIPEBSTAT, temp);
  723. temp = I915_READ(IIR);
  724. I915_WRITE(IIR, temp);
  725. }