i915_irq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #define MAX_NOPID ((u32)~0)
  33. /** These are the interrupts used by the driver */
  34. #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
  35. I915_ASLE_INTERRUPT | \
  36. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
  37. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
  38. void
  39. i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
  40. {
  41. if ((dev_priv->irq_mask_reg & mask) != 0) {
  42. dev_priv->irq_mask_reg &= ~mask;
  43. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  44. (void) I915_READ(IMR);
  45. }
  46. }
  47. static inline void
  48. i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
  49. {
  50. if ((dev_priv->irq_mask_reg & mask) != mask) {
  51. dev_priv->irq_mask_reg |= mask;
  52. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  53. (void) I915_READ(IMR);
  54. }
  55. }
  56. /**
  57. * i915_get_pipe - return the the pipe associated with a given plane
  58. * @dev: DRM device
  59. * @plane: plane to look for
  60. *
  61. * The Intel Mesa & 2D drivers call the vblank routines with a plane number
  62. * rather than a pipe number, since they may not always be equal. This routine
  63. * maps the given @plane back to a pipe number.
  64. */
  65. static int
  66. i915_get_pipe(struct drm_device *dev, int plane)
  67. {
  68. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  69. u32 dspcntr;
  70. dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
  71. return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
  72. }
  73. /**
  74. * i915_get_plane - return the the plane associated with a given pipe
  75. * @dev: DRM device
  76. * @pipe: pipe to look for
  77. *
  78. * The Intel Mesa & 2D drivers call the vblank routines with a plane number
  79. * rather than a plane number, since they may not always be equal. This routine
  80. * maps the given @pipe back to a plane number.
  81. */
  82. static int
  83. i915_get_plane(struct drm_device *dev, int pipe)
  84. {
  85. if (i915_get_pipe(dev, 0) == pipe)
  86. return 0;
  87. return 1;
  88. }
  89. /**
  90. * i915_pipe_enabled - check if a pipe is enabled
  91. * @dev: DRM device
  92. * @pipe: pipe to check
  93. *
  94. * Reading certain registers when the pipe is disabled can hang the chip.
  95. * Use this routine to make sure the PLL is running and the pipe is active
  96. * before reading such registers if unsure.
  97. */
  98. static int
  99. i915_pipe_enabled(struct drm_device *dev, int pipe)
  100. {
  101. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  102. unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
  103. if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
  104. return 1;
  105. return 0;
  106. }
  107. /**
  108. * Emit blits for scheduled buffer swaps.
  109. *
  110. * This function will be called with the HW lock held.
  111. */
  112. static void i915_vblank_tasklet(struct drm_device *dev)
  113. {
  114. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  115. unsigned long irqflags;
  116. struct list_head *list, *tmp, hits, *hit;
  117. int nhits, nrects, slice[2], upper[2], lower[2], i;
  118. unsigned counter[2];
  119. struct drm_drawable_info *drw;
  120. drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
  121. u32 cpp = dev_priv->cpp;
  122. u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
  123. XY_SRC_COPY_BLT_WRITE_ALPHA |
  124. XY_SRC_COPY_BLT_WRITE_RGB)
  125. : XY_SRC_COPY_BLT_CMD;
  126. u32 src_pitch = sarea_priv->pitch * cpp;
  127. u32 dst_pitch = sarea_priv->pitch * cpp;
  128. u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
  129. RING_LOCALS;
  130. if (IS_I965G(dev) && sarea_priv->front_tiled) {
  131. cmd |= XY_SRC_COPY_BLT_DST_TILED;
  132. dst_pitch >>= 2;
  133. }
  134. if (IS_I965G(dev) && sarea_priv->back_tiled) {
  135. cmd |= XY_SRC_COPY_BLT_SRC_TILED;
  136. src_pitch >>= 2;
  137. }
  138. counter[0] = drm_vblank_count(dev, 0);
  139. counter[1] = drm_vblank_count(dev, 1);
  140. DRM_DEBUG("\n");
  141. INIT_LIST_HEAD(&hits);
  142. nhits = nrects = 0;
  143. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  144. /* Find buffer swaps scheduled for this vertical blank */
  145. list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
  146. drm_i915_vbl_swap_t *vbl_swap =
  147. list_entry(list, drm_i915_vbl_swap_t, head);
  148. int pipe = i915_get_pipe(dev, vbl_swap->plane);
  149. if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
  150. continue;
  151. list_del(list);
  152. dev_priv->swaps_pending--;
  153. drm_vblank_put(dev, pipe);
  154. spin_unlock(&dev_priv->swaps_lock);
  155. spin_lock(&dev->drw_lock);
  156. drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
  157. if (!drw) {
  158. spin_unlock(&dev->drw_lock);
  159. drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  160. spin_lock(&dev_priv->swaps_lock);
  161. continue;
  162. }
  163. list_for_each(hit, &hits) {
  164. drm_i915_vbl_swap_t *swap_cmp =
  165. list_entry(hit, drm_i915_vbl_swap_t, head);
  166. struct drm_drawable_info *drw_cmp =
  167. drm_get_drawable_info(dev, swap_cmp->drw_id);
  168. if (drw_cmp &&
  169. drw_cmp->rects[0].y1 > drw->rects[0].y1) {
  170. list_add_tail(list, hit);
  171. break;
  172. }
  173. }
  174. spin_unlock(&dev->drw_lock);
  175. /* List of hits was empty, or we reached the end of it */
  176. if (hit == &hits)
  177. list_add_tail(list, hits.prev);
  178. nhits++;
  179. spin_lock(&dev_priv->swaps_lock);
  180. }
  181. if (nhits == 0) {
  182. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  183. return;
  184. }
  185. spin_unlock(&dev_priv->swaps_lock);
  186. i915_kernel_lost_context(dev);
  187. if (IS_I965G(dev)) {
  188. BEGIN_LP_RING(4);
  189. OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  190. OUT_RING(0);
  191. OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
  192. OUT_RING(0);
  193. ADVANCE_LP_RING();
  194. } else {
  195. BEGIN_LP_RING(6);
  196. OUT_RING(GFX_OP_DRAWRECT_INFO);
  197. OUT_RING(0);
  198. OUT_RING(0);
  199. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  200. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  201. OUT_RING(0);
  202. ADVANCE_LP_RING();
  203. }
  204. sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
  205. upper[0] = upper[1] = 0;
  206. slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
  207. slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
  208. lower[0] = sarea_priv->pipeA_y + slice[0];
  209. lower[1] = sarea_priv->pipeB_y + slice[0];
  210. spin_lock(&dev->drw_lock);
  211. /* Emit blits for buffer swaps, partitioning both outputs into as many
  212. * slices as there are buffer swaps scheduled in order to avoid tearing
  213. * (based on the assumption that a single buffer swap would always
  214. * complete before scanout starts).
  215. */
  216. for (i = 0; i++ < nhits;
  217. upper[0] = lower[0], lower[0] += slice[0],
  218. upper[1] = lower[1], lower[1] += slice[1]) {
  219. if (i == nhits)
  220. lower[0] = lower[1] = sarea_priv->height;
  221. list_for_each(hit, &hits) {
  222. drm_i915_vbl_swap_t *swap_hit =
  223. list_entry(hit, drm_i915_vbl_swap_t, head);
  224. struct drm_clip_rect *rect;
  225. int num_rects, plane;
  226. unsigned short top, bottom;
  227. drw = drm_get_drawable_info(dev, swap_hit->drw_id);
  228. if (!drw)
  229. continue;
  230. rect = drw->rects;
  231. plane = swap_hit->plane;
  232. top = upper[plane];
  233. bottom = lower[plane];
  234. for (num_rects = drw->num_rects; num_rects--; rect++) {
  235. int y1 = max(rect->y1, top);
  236. int y2 = min(rect->y2, bottom);
  237. if (y1 >= y2)
  238. continue;
  239. BEGIN_LP_RING(8);
  240. OUT_RING(cmd);
  241. OUT_RING(ropcpp | dst_pitch);
  242. OUT_RING((y1 << 16) | rect->x1);
  243. OUT_RING((y2 << 16) | rect->x2);
  244. OUT_RING(sarea_priv->front_offset);
  245. OUT_RING((y1 << 16) | rect->x1);
  246. OUT_RING(src_pitch);
  247. OUT_RING(sarea_priv->back_offset);
  248. ADVANCE_LP_RING();
  249. }
  250. }
  251. }
  252. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  253. list_for_each_safe(hit, tmp, &hits) {
  254. drm_i915_vbl_swap_t *swap_hit =
  255. list_entry(hit, drm_i915_vbl_swap_t, head);
  256. list_del(hit);
  257. drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
  258. }
  259. }
  260. u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
  261. {
  262. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  263. unsigned long high_frame;
  264. unsigned long low_frame;
  265. u32 high1, high2, low, count;
  266. int pipe;
  267. pipe = i915_get_pipe(dev, plane);
  268. high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
  269. low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
  270. if (!i915_pipe_enabled(dev, pipe)) {
  271. DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
  272. return 0;
  273. }
  274. /*
  275. * High & low register fields aren't synchronized, so make sure
  276. * we get a low value that's stable across two reads of the high
  277. * register.
  278. */
  279. do {
  280. high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  281. PIPE_FRAME_HIGH_SHIFT);
  282. low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  283. PIPE_FRAME_LOW_SHIFT);
  284. high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  285. PIPE_FRAME_HIGH_SHIFT);
  286. } while (high1 != high2);
  287. count = (high1 << 8) | low;
  288. return count;
  289. }
  290. void
  291. i915_gem_vblank_work_handler(struct work_struct *work)
  292. {
  293. drm_i915_private_t *dev_priv;
  294. struct drm_device *dev;
  295. dev_priv = container_of(work, drm_i915_private_t,
  296. mm.vblank_work);
  297. dev = dev_priv->dev;
  298. mutex_lock(&dev->struct_mutex);
  299. i915_vblank_tasklet(dev);
  300. mutex_unlock(&dev->struct_mutex);
  301. }
  302. irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  303. {
  304. struct drm_device *dev = (struct drm_device *) arg;
  305. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  306. u32 iir;
  307. u32 pipea_stats, pipeb_stats;
  308. int vblank = 0;
  309. if (dev->pdev->msi_enabled)
  310. I915_WRITE(IMR, ~0);
  311. iir = I915_READ(IIR);
  312. if (iir == 0) {
  313. if (dev->pdev->msi_enabled) {
  314. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  315. (void) I915_READ(IMR);
  316. }
  317. return IRQ_NONE;
  318. }
  319. /*
  320. * Clear the PIPE(A|B)STAT regs before the IIR otherwise
  321. * we may get extra interrupts.
  322. */
  323. if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
  324. pipea_stats = I915_READ(PIPEASTAT);
  325. if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
  326. pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  327. PIPE_VBLANK_INTERRUPT_ENABLE);
  328. else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
  329. PIPE_VBLANK_INTERRUPT_STATUS)) {
  330. vblank++;
  331. drm_handle_vblank(dev, i915_get_plane(dev, 0));
  332. }
  333. I915_WRITE(PIPEASTAT, pipea_stats);
  334. }
  335. if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
  336. pipeb_stats = I915_READ(PIPEBSTAT);
  337. /* Ack the event */
  338. I915_WRITE(PIPEBSTAT, pipeb_stats);
  339. /* The vblank interrupt gets enabled even if we didn't ask for
  340. it, so make sure it's shut down again */
  341. if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
  342. pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  343. PIPE_VBLANK_INTERRUPT_ENABLE);
  344. else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
  345. PIPE_VBLANK_INTERRUPT_STATUS)) {
  346. vblank++;
  347. drm_handle_vblank(dev, i915_get_plane(dev, 1));
  348. }
  349. if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
  350. opregion_asle_intr(dev);
  351. I915_WRITE(PIPEBSTAT, pipeb_stats);
  352. }
  353. I915_WRITE(IIR, iir);
  354. if (dev->pdev->msi_enabled)
  355. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  356. (void) I915_READ(IIR); /* Flush posted writes */
  357. if (dev_priv->sarea_priv)
  358. dev_priv->sarea_priv->last_dispatch =
  359. READ_BREADCRUMB(dev_priv);
  360. if (iir & I915_USER_INTERRUPT) {
  361. dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
  362. DRM_WAKEUP(&dev_priv->irq_queue);
  363. }
  364. if (iir & I915_ASLE_INTERRUPT)
  365. opregion_asle_intr(dev);
  366. if (vblank && dev_priv->swaps_pending > 0) {
  367. if (dev_priv->ring.ring_obj == NULL)
  368. drm_locked_tasklet(dev, i915_vblank_tasklet);
  369. else
  370. schedule_work(&dev_priv->mm.vblank_work);
  371. }
  372. return IRQ_HANDLED;
  373. }
  374. static int i915_emit_irq(struct drm_device * dev)
  375. {
  376. drm_i915_private_t *dev_priv = dev->dev_private;
  377. RING_LOCALS;
  378. i915_kernel_lost_context(dev);
  379. DRM_DEBUG("\n");
  380. dev_priv->counter++;
  381. if (dev_priv->counter > 0x7FFFFFFFUL)
  382. dev_priv->counter = 1;
  383. if (dev_priv->sarea_priv)
  384. dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
  385. BEGIN_LP_RING(6);
  386. OUT_RING(MI_STORE_DWORD_INDEX);
  387. OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
  388. OUT_RING(dev_priv->counter);
  389. OUT_RING(0);
  390. OUT_RING(0);
  391. OUT_RING(MI_USER_INTERRUPT);
  392. ADVANCE_LP_RING();
  393. return dev_priv->counter;
  394. }
  395. void i915_user_irq_get(struct drm_device *dev)
  396. {
  397. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  398. spin_lock(&dev_priv->user_irq_lock);
  399. if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
  400. i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
  401. spin_unlock(&dev_priv->user_irq_lock);
  402. }
  403. void i915_user_irq_put(struct drm_device *dev)
  404. {
  405. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  406. spin_lock(&dev_priv->user_irq_lock);
  407. BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
  408. if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
  409. i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
  410. spin_unlock(&dev_priv->user_irq_lock);
  411. }
  412. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  413. {
  414. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  415. int ret = 0;
  416. DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
  417. READ_BREADCRUMB(dev_priv));
  418. if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  419. if (dev_priv->sarea_priv) {
  420. dev_priv->sarea_priv->last_dispatch =
  421. READ_BREADCRUMB(dev_priv);
  422. }
  423. return 0;
  424. }
  425. if (dev_priv->sarea_priv)
  426. dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  427. i915_user_irq_get(dev);
  428. DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
  429. READ_BREADCRUMB(dev_priv) >= irq_nr);
  430. i915_user_irq_put(dev);
  431. if (ret == -EBUSY) {
  432. DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  433. READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  434. }
  435. if (dev_priv->sarea_priv)
  436. dev_priv->sarea_priv->last_dispatch =
  437. READ_BREADCRUMB(dev_priv);
  438. return ret;
  439. }
  440. /* Needs the lock as it touches the ring.
  441. */
  442. int i915_irq_emit(struct drm_device *dev, void *data,
  443. struct drm_file *file_priv)
  444. {
  445. drm_i915_private_t *dev_priv = dev->dev_private;
  446. drm_i915_irq_emit_t *emit = data;
  447. int result;
  448. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  449. if (!dev_priv) {
  450. DRM_ERROR("called with no initialization\n");
  451. return -EINVAL;
  452. }
  453. mutex_lock(&dev->struct_mutex);
  454. result = i915_emit_irq(dev);
  455. mutex_unlock(&dev->struct_mutex);
  456. if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  457. DRM_ERROR("copy_to_user\n");
  458. return -EFAULT;
  459. }
  460. return 0;
  461. }
  462. /* Doesn't need the hardware lock.
  463. */
  464. int i915_irq_wait(struct drm_device *dev, void *data,
  465. struct drm_file *file_priv)
  466. {
  467. drm_i915_private_t *dev_priv = dev->dev_private;
  468. drm_i915_irq_wait_t *irqwait = data;
  469. if (!dev_priv) {
  470. DRM_ERROR("called with no initialization\n");
  471. return -EINVAL;
  472. }
  473. return i915_wait_irq(dev, irqwait->irq_seq);
  474. }
  475. int i915_enable_vblank(struct drm_device *dev, int plane)
  476. {
  477. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  478. int pipe = i915_get_pipe(dev, plane);
  479. u32 pipestat_reg = 0;
  480. u32 pipestat;
  481. switch (pipe) {
  482. case 0:
  483. pipestat_reg = PIPEASTAT;
  484. i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
  485. break;
  486. case 1:
  487. pipestat_reg = PIPEBSTAT;
  488. i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
  489. break;
  490. default:
  491. DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
  492. pipe);
  493. break;
  494. }
  495. if (pipestat_reg) {
  496. pipestat = I915_READ(pipestat_reg);
  497. if (IS_I965G(dev))
  498. pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
  499. else
  500. pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
  501. /* Clear any stale interrupt status */
  502. pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
  503. PIPE_VBLANK_INTERRUPT_STATUS);
  504. I915_WRITE(pipestat_reg, pipestat);
  505. }
  506. return 0;
  507. }
  508. void i915_disable_vblank(struct drm_device *dev, int plane)
  509. {
  510. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  511. int pipe = i915_get_pipe(dev, plane);
  512. u32 pipestat_reg = 0;
  513. u32 pipestat;
  514. switch (pipe) {
  515. case 0:
  516. pipestat_reg = PIPEASTAT;
  517. i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
  518. break;
  519. case 1:
  520. pipestat_reg = PIPEBSTAT;
  521. i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
  522. break;
  523. default:
  524. DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
  525. pipe);
  526. break;
  527. }
  528. if (pipestat_reg) {
  529. pipestat = I915_READ(pipestat_reg);
  530. pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  531. PIPE_VBLANK_INTERRUPT_ENABLE);
  532. /* Clear any stale interrupt status */
  533. pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
  534. PIPE_VBLANK_INTERRUPT_STATUS);
  535. I915_WRITE(pipestat_reg, pipestat);
  536. }
  537. }
  538. /* Set the vblank monitor pipe
  539. */
  540. int i915_vblank_pipe_set(struct drm_device *dev, void *data,
  541. struct drm_file *file_priv)
  542. {
  543. drm_i915_private_t *dev_priv = dev->dev_private;
  544. if (!dev_priv) {
  545. DRM_ERROR("called with no initialization\n");
  546. return -EINVAL;
  547. }
  548. return 0;
  549. }
  550. int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  551. struct drm_file *file_priv)
  552. {
  553. drm_i915_private_t *dev_priv = dev->dev_private;
  554. drm_i915_vblank_pipe_t *pipe = data;
  555. if (!dev_priv) {
  556. DRM_ERROR("called with no initialization\n");
  557. return -EINVAL;
  558. }
  559. pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  560. return 0;
  561. }
  562. /**
  563. * Schedule buffer swap at given vertical blank.
  564. */
  565. int i915_vblank_swap(struct drm_device *dev, void *data,
  566. struct drm_file *file_priv)
  567. {
  568. drm_i915_private_t *dev_priv = dev->dev_private;
  569. drm_i915_vblank_swap_t *swap = data;
  570. drm_i915_vbl_swap_t *vbl_swap;
  571. unsigned int pipe, seqtype, curseq, plane;
  572. unsigned long irqflags;
  573. struct list_head *list;
  574. int ret;
  575. if (!dev_priv || !dev_priv->sarea_priv) {
  576. DRM_ERROR("%s called with no initialization\n", __func__);
  577. return -EINVAL;
  578. }
  579. if (dev_priv->sarea_priv->rotation) {
  580. DRM_DEBUG("Rotation not supported\n");
  581. return -EINVAL;
  582. }
  583. if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
  584. _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
  585. DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
  586. return -EINVAL;
  587. }
  588. plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
  589. pipe = i915_get_pipe(dev, plane);
  590. seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
  591. if (!(dev_priv->vblank_pipe & (1 << pipe))) {
  592. DRM_ERROR("Invalid pipe %d\n", pipe);
  593. return -EINVAL;
  594. }
  595. spin_lock_irqsave(&dev->drw_lock, irqflags);
  596. if (!drm_get_drawable_info(dev, swap->drawable)) {
  597. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  598. DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
  599. return -EINVAL;
  600. }
  601. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  602. /*
  603. * We take the ref here and put it when the swap actually completes
  604. * in the tasklet.
  605. */
  606. ret = drm_vblank_get(dev, pipe);
  607. if (ret)
  608. return ret;
  609. curseq = drm_vblank_count(dev, pipe);
  610. if (seqtype == _DRM_VBLANK_RELATIVE)
  611. swap->sequence += curseq;
  612. if ((curseq - swap->sequence) <= (1<<23)) {
  613. if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
  614. swap->sequence = curseq + 1;
  615. } else {
  616. DRM_DEBUG("Missed target sequence\n");
  617. drm_vblank_put(dev, pipe);
  618. return -EINVAL;
  619. }
  620. }
  621. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  622. list_for_each(list, &dev_priv->vbl_swaps.head) {
  623. vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
  624. if (vbl_swap->drw_id == swap->drawable &&
  625. vbl_swap->plane == plane &&
  626. vbl_swap->sequence == swap->sequence) {
  627. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  628. DRM_DEBUG("Already scheduled\n");
  629. return 0;
  630. }
  631. }
  632. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  633. if (dev_priv->swaps_pending >= 100) {
  634. DRM_DEBUG("Too many swaps queued\n");
  635. drm_vblank_put(dev, pipe);
  636. return -EBUSY;
  637. }
  638. vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  639. if (!vbl_swap) {
  640. DRM_ERROR("Failed to allocate memory to queue swap\n");
  641. drm_vblank_put(dev, pipe);
  642. return -ENOMEM;
  643. }
  644. DRM_DEBUG("\n");
  645. vbl_swap->drw_id = swap->drawable;
  646. vbl_swap->plane = plane;
  647. vbl_swap->sequence = swap->sequence;
  648. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  649. list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
  650. dev_priv->swaps_pending++;
  651. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  652. return 0;
  653. }
  654. /* drm_dma.h hooks
  655. */
  656. void i915_driver_irq_preinstall(struct drm_device * dev)
  657. {
  658. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  659. I915_WRITE(HWSTAM, 0xeffe);
  660. I915_WRITE(IMR, 0xffffffff);
  661. I915_WRITE(IER, 0x0);
  662. }
  663. int i915_driver_irq_postinstall(struct drm_device *dev)
  664. {
  665. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  666. int ret, num_pipes = 2;
  667. spin_lock_init(&dev_priv->swaps_lock);
  668. INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
  669. dev_priv->swaps_pending = 0;
  670. /* Set initial unmasked IRQs to just the selected vblank pipes. */
  671. dev_priv->irq_mask_reg = ~0;
  672. ret = drm_vblank_init(dev, num_pipes);
  673. if (ret)
  674. return ret;
  675. dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  676. dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  677. dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  678. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  679. dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
  680. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  681. I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
  682. (void) I915_READ(IER);
  683. opregion_enable_asle(dev);
  684. DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
  685. return 0;
  686. }
  687. void i915_driver_irq_uninstall(struct drm_device * dev)
  688. {
  689. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  690. u32 temp;
  691. if (!dev_priv)
  692. return;
  693. dev_priv->vblank_pipe = 0;
  694. I915_WRITE(HWSTAM, 0xffffffff);
  695. I915_WRITE(IMR, 0xffffffff);
  696. I915_WRITE(IER, 0x0);
  697. temp = I915_READ(PIPEASTAT);
  698. I915_WRITE(PIPEASTAT, temp);
  699. temp = I915_READ(PIPEBSTAT);
  700. I915_WRITE(PIPEBSTAT, temp);
  701. temp = I915_READ(IIR);
  702. I915_WRITE(IIR, temp);
  703. }