intel_overlay.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572
  1. /*
  2. * Copyright © 2009
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Daniel Vetter <daniel@ffwll.ch>
  25. *
  26. * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #include "i915_reg.h"
  33. #include "intel_drv.h"
  34. /* Limits for overlay size. According to intel doc, the real limits are:
  35. * Y width: 4095, UV width (planar): 2047, Y height: 2047,
  36. * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
  37. * the mininum of both. */
  38. #define IMAGE_MAX_WIDTH 2048
  39. #define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
  40. /* on 830 and 845 these large limits result in the card hanging */
  41. #define IMAGE_MAX_WIDTH_LEGACY 1024
  42. #define IMAGE_MAX_HEIGHT_LEGACY 1088
  43. /* overlay register definitions */
  44. /* OCMD register */
  45. #define OCMD_TILED_SURFACE (0x1<<19)
  46. #define OCMD_MIRROR_MASK (0x3<<17)
  47. #define OCMD_MIRROR_MODE (0x3<<17)
  48. #define OCMD_MIRROR_HORIZONTAL (0x1<<17)
  49. #define OCMD_MIRROR_VERTICAL (0x2<<17)
  50. #define OCMD_MIRROR_BOTH (0x3<<17)
  51. #define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
  52. #define OCMD_UV_SWAP (0x1<<14) /* YVYU */
  53. #define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
  54. #define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
  55. #define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
  56. #define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
  57. #define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
  58. #define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
  59. #define OCMD_YUV_422_PACKED (0x8<<10)
  60. #define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
  61. #define OCMD_YUV_420_PLANAR (0xc<<10)
  62. #define OCMD_YUV_422_PLANAR (0xd<<10)
  63. #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
  64. #define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
  65. #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
  66. #define OCMD_BUF_TYPE_MASK (0x1<<5)
  67. #define OCMD_BUF_TYPE_FRAME (0x0<<5)
  68. #define OCMD_BUF_TYPE_FIELD (0x1<<5)
  69. #define OCMD_TEST_MODE (0x1<<4)
  70. #define OCMD_BUFFER_SELECT (0x3<<2)
  71. #define OCMD_BUFFER0 (0x0<<2)
  72. #define OCMD_BUFFER1 (0x1<<2)
  73. #define OCMD_FIELD_SELECT (0x1<<2)
  74. #define OCMD_FIELD0 (0x0<<1)
  75. #define OCMD_FIELD1 (0x1<<1)
  76. #define OCMD_ENABLE (0x1<<0)
  77. /* OCONFIG register */
  78. #define OCONF_PIPE_MASK (0x1<<18)
  79. #define OCONF_PIPE_A (0x0<<18)
  80. #define OCONF_PIPE_B (0x1<<18)
  81. #define OCONF_GAMMA2_ENABLE (0x1<<16)
  82. #define OCONF_CSC_MODE_BT601 (0x0<<5)
  83. #define OCONF_CSC_MODE_BT709 (0x1<<5)
  84. #define OCONF_CSC_BYPASS (0x1<<4)
  85. #define OCONF_CC_OUT_8BIT (0x1<<3)
  86. #define OCONF_TEST_MODE (0x1<<2)
  87. #define OCONF_THREE_LINE_BUFFER (0x1<<0)
  88. #define OCONF_TWO_LINE_BUFFER (0x0<<0)
  89. /* DCLRKM (dst-key) register */
  90. #define DST_KEY_ENABLE (0x1<<31)
  91. #define CLK_RGB24_MASK 0x0
  92. #define CLK_RGB16_MASK 0x070307
  93. #define CLK_RGB15_MASK 0x070707
  94. #define CLK_RGB8I_MASK 0xffffff
  95. #define RGB16_TO_COLORKEY(c) \
  96. (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
  97. #define RGB15_TO_COLORKEY(c) \
  98. (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
  99. /* overlay flip addr flag */
  100. #define OFC_UPDATE 0x1
  101. /* polyphase filter coefficients */
  102. #define N_HORIZ_Y_TAPS 5
  103. #define N_VERT_Y_TAPS 3
  104. #define N_HORIZ_UV_TAPS 3
  105. #define N_VERT_UV_TAPS 3
  106. #define N_PHASES 17
  107. #define MAX_TAPS 5
  108. /* memory bufferd overlay registers */
  109. struct overlay_registers {
  110. u32 OBUF_0Y;
  111. u32 OBUF_1Y;
  112. u32 OBUF_0U;
  113. u32 OBUF_0V;
  114. u32 OBUF_1U;
  115. u32 OBUF_1V;
  116. u32 OSTRIDE;
  117. u32 YRGB_VPH;
  118. u32 UV_VPH;
  119. u32 HORZ_PH;
  120. u32 INIT_PHS;
  121. u32 DWINPOS;
  122. u32 DWINSZ;
  123. u32 SWIDTH;
  124. u32 SWIDTHSW;
  125. u32 SHEIGHT;
  126. u32 YRGBSCALE;
  127. u32 UVSCALE;
  128. u32 OCLRC0;
  129. u32 OCLRC1;
  130. u32 DCLRKV;
  131. u32 DCLRKM;
  132. u32 SCLRKVH;
  133. u32 SCLRKVL;
  134. u32 SCLRKEN;
  135. u32 OCONFIG;
  136. u32 OCMD;
  137. u32 RESERVED1; /* 0x6C */
  138. u32 OSTART_0Y;
  139. u32 OSTART_1Y;
  140. u32 OSTART_0U;
  141. u32 OSTART_0V;
  142. u32 OSTART_1U;
  143. u32 OSTART_1V;
  144. u32 OTILEOFF_0Y;
  145. u32 OTILEOFF_1Y;
  146. u32 OTILEOFF_0U;
  147. u32 OTILEOFF_0V;
  148. u32 OTILEOFF_1U;
  149. u32 OTILEOFF_1V;
  150. u32 FASTHSCALE; /* 0xA0 */
  151. u32 UVSCALEV; /* 0xA4 */
  152. u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
  153. u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
  154. u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
  155. u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
  156. u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
  157. u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
  158. u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
  159. u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
  160. u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
  161. };
  162. struct intel_overlay {
  163. struct drm_device *dev;
  164. struct intel_crtc *crtc;
  165. struct drm_i915_gem_object *vid_bo;
  166. struct drm_i915_gem_object *old_vid_bo;
  167. int active;
  168. int pfit_active;
  169. u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
  170. u32 color_key;
  171. u32 brightness, contrast, saturation;
  172. u32 old_xscale, old_yscale;
  173. /* register access */
  174. u32 flip_addr;
  175. struct drm_i915_gem_object *reg_bo;
  176. /* flip handling */
  177. uint32_t last_flip_req;
  178. void (*flip_tail)(struct intel_overlay *);
  179. };
  180. static struct overlay_registers __iomem *
  181. intel_overlay_map_regs(struct intel_overlay *overlay)
  182. {
  183. drm_i915_private_t *dev_priv = overlay->dev->dev_private;
  184. struct overlay_registers __iomem *regs;
  185. if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  186. regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
  187. else
  188. regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
  189. overlay->reg_bo->gtt_offset);
  190. return regs;
  191. }
  192. static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
  193. struct overlay_registers __iomem *regs)
  194. {
  195. if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  196. io_mapping_unmap(regs);
  197. }
  198. static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
  199. struct drm_i915_gem_request *request,
  200. void (*tail)(struct intel_overlay *))
  201. {
  202. struct drm_device *dev = overlay->dev;
  203. drm_i915_private_t *dev_priv = dev->dev_private;
  204. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  205. int ret;
  206. BUG_ON(overlay->last_flip_req);
  207. ret = i915_add_request(ring, NULL, request);
  208. if (ret) {
  209. kfree(request);
  210. return ret;
  211. }
  212. overlay->last_flip_req = request->seqno;
  213. overlay->flip_tail = tail;
  214. ret = i915_wait_seqno(ring, overlay->last_flip_req);
  215. if (ret)
  216. return ret;
  217. i915_gem_retire_requests(dev);
  218. overlay->last_flip_req = 0;
  219. return 0;
  220. }
  221. /* overlay needs to be disable in OCMD reg */
  222. static int intel_overlay_on(struct intel_overlay *overlay)
  223. {
  224. struct drm_device *dev = overlay->dev;
  225. struct drm_i915_private *dev_priv = dev->dev_private;
  226. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  227. struct drm_i915_gem_request *request;
  228. int ret;
  229. BUG_ON(overlay->active);
  230. overlay->active = 1;
  231. WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
  232. request = kzalloc(sizeof(*request), GFP_KERNEL);
  233. if (request == NULL) {
  234. ret = -ENOMEM;
  235. goto out;
  236. }
  237. ret = intel_ring_begin(ring, 4);
  238. if (ret) {
  239. kfree(request);
  240. goto out;
  241. }
  242. intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
  243. intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
  244. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  245. intel_ring_emit(ring, MI_NOOP);
  246. intel_ring_advance(ring);
  247. ret = intel_overlay_do_wait_request(overlay, request, NULL);
  248. out:
  249. return ret;
  250. }
  251. /* overlay needs to be enabled in OCMD reg */
  252. static int intel_overlay_continue(struct intel_overlay *overlay,
  253. bool load_polyphase_filter)
  254. {
  255. struct drm_device *dev = overlay->dev;
  256. drm_i915_private_t *dev_priv = dev->dev_private;
  257. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  258. struct drm_i915_gem_request *request;
  259. u32 flip_addr = overlay->flip_addr;
  260. u32 tmp;
  261. int ret;
  262. BUG_ON(!overlay->active);
  263. request = kzalloc(sizeof(*request), GFP_KERNEL);
  264. if (request == NULL)
  265. return -ENOMEM;
  266. if (load_polyphase_filter)
  267. flip_addr |= OFC_UPDATE;
  268. /* check for underruns */
  269. tmp = I915_READ(DOVSTA);
  270. if (tmp & (1 << 17))
  271. DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
  272. ret = intel_ring_begin(ring, 2);
  273. if (ret) {
  274. kfree(request);
  275. return ret;
  276. }
  277. intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
  278. intel_ring_emit(ring, flip_addr);
  279. intel_ring_advance(ring);
  280. ret = i915_add_request(ring, NULL, request);
  281. if (ret) {
  282. kfree(request);
  283. return ret;
  284. }
  285. overlay->last_flip_req = request->seqno;
  286. return 0;
  287. }
  288. static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
  289. {
  290. struct drm_i915_gem_object *obj = overlay->old_vid_bo;
  291. i915_gem_object_unpin(obj);
  292. drm_gem_object_unreference(&obj->base);
  293. overlay->old_vid_bo = NULL;
  294. }
  295. static void intel_overlay_off_tail(struct intel_overlay *overlay)
  296. {
  297. struct drm_i915_gem_object *obj = overlay->vid_bo;
  298. /* never have the overlay hw on without showing a frame */
  299. BUG_ON(!overlay->vid_bo);
  300. i915_gem_object_unpin(obj);
  301. drm_gem_object_unreference(&obj->base);
  302. overlay->vid_bo = NULL;
  303. overlay->crtc->overlay = NULL;
  304. overlay->crtc = NULL;
  305. overlay->active = 0;
  306. }
  307. /* overlay needs to be disabled in OCMD reg */
  308. static int intel_overlay_off(struct intel_overlay *overlay)
  309. {
  310. struct drm_device *dev = overlay->dev;
  311. struct drm_i915_private *dev_priv = dev->dev_private;
  312. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  313. u32 flip_addr = overlay->flip_addr;
  314. struct drm_i915_gem_request *request;
  315. int ret;
  316. BUG_ON(!overlay->active);
  317. request = kzalloc(sizeof(*request), GFP_KERNEL);
  318. if (request == NULL)
  319. return -ENOMEM;
  320. /* According to intel docs the overlay hw may hang (when switching
  321. * off) without loading the filter coeffs. It is however unclear whether
  322. * this applies to the disabling of the overlay or to the switching off
  323. * of the hw. Do it in both cases */
  324. flip_addr |= OFC_UPDATE;
  325. ret = intel_ring_begin(ring, 6);
  326. if (ret) {
  327. kfree(request);
  328. return ret;
  329. }
  330. /* wait for overlay to go idle */
  331. intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
  332. intel_ring_emit(ring, flip_addr);
  333. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  334. /* turn overlay off */
  335. intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
  336. intel_ring_emit(ring, flip_addr);
  337. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  338. intel_ring_advance(ring);
  339. return intel_overlay_do_wait_request(overlay, request,
  340. intel_overlay_off_tail);
  341. }
  342. /* recover from an interruption due to a signal
  343. * We have to be careful not to repeat work forever an make forward progess. */
  344. static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
  345. {
  346. struct drm_device *dev = overlay->dev;
  347. drm_i915_private_t *dev_priv = dev->dev_private;
  348. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  349. int ret;
  350. if (overlay->last_flip_req == 0)
  351. return 0;
  352. ret = i915_wait_seqno(ring, overlay->last_flip_req);
  353. if (ret)
  354. return ret;
  355. i915_gem_retire_requests(dev);
  356. if (overlay->flip_tail)
  357. overlay->flip_tail(overlay);
  358. overlay->last_flip_req = 0;
  359. return 0;
  360. }
  361. /* Wait for pending overlay flip and release old frame.
  362. * Needs to be called before the overlay register are changed
  363. * via intel_overlay_(un)map_regs
  364. */
  365. static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
  366. {
  367. struct drm_device *dev = overlay->dev;
  368. drm_i915_private_t *dev_priv = dev->dev_private;
  369. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  370. int ret;
  371. /* Only wait if there is actually an old frame to release to
  372. * guarantee forward progress.
  373. */
  374. if (!overlay->old_vid_bo)
  375. return 0;
  376. if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
  377. struct drm_i915_gem_request *request;
  378. /* synchronous slowpath */
  379. request = kzalloc(sizeof(*request), GFP_KERNEL);
  380. if (request == NULL)
  381. return -ENOMEM;
  382. ret = intel_ring_begin(ring, 2);
  383. if (ret) {
  384. kfree(request);
  385. return ret;
  386. }
  387. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  388. intel_ring_emit(ring, MI_NOOP);
  389. intel_ring_advance(ring);
  390. ret = intel_overlay_do_wait_request(overlay, request,
  391. intel_overlay_release_old_vid_tail);
  392. if (ret)
  393. return ret;
  394. }
  395. intel_overlay_release_old_vid_tail(overlay);
  396. return 0;
  397. }
  398. struct put_image_params {
  399. int format;
  400. short dst_x;
  401. short dst_y;
  402. short dst_w;
  403. short dst_h;
  404. short src_w;
  405. short src_scan_h;
  406. short src_scan_w;
  407. short src_h;
  408. short stride_Y;
  409. short stride_UV;
  410. int offset_Y;
  411. int offset_U;
  412. int offset_V;
  413. };
  414. static int packed_depth_bytes(u32 format)
  415. {
  416. switch (format & I915_OVERLAY_DEPTH_MASK) {
  417. case I915_OVERLAY_YUV422:
  418. return 4;
  419. case I915_OVERLAY_YUV411:
  420. /* return 6; not implemented */
  421. default:
  422. return -EINVAL;
  423. }
  424. }
  425. static int packed_width_bytes(u32 format, short width)
  426. {
  427. switch (format & I915_OVERLAY_DEPTH_MASK) {
  428. case I915_OVERLAY_YUV422:
  429. return width << 1;
  430. default:
  431. return -EINVAL;
  432. }
  433. }
  434. static int uv_hsubsampling(u32 format)
  435. {
  436. switch (format & I915_OVERLAY_DEPTH_MASK) {
  437. case I915_OVERLAY_YUV422:
  438. case I915_OVERLAY_YUV420:
  439. return 2;
  440. case I915_OVERLAY_YUV411:
  441. case I915_OVERLAY_YUV410:
  442. return 4;
  443. default:
  444. return -EINVAL;
  445. }
  446. }
  447. static int uv_vsubsampling(u32 format)
  448. {
  449. switch (format & I915_OVERLAY_DEPTH_MASK) {
  450. case I915_OVERLAY_YUV420:
  451. case I915_OVERLAY_YUV410:
  452. return 2;
  453. case I915_OVERLAY_YUV422:
  454. case I915_OVERLAY_YUV411:
  455. return 1;
  456. default:
  457. return -EINVAL;
  458. }
  459. }
  460. static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
  461. {
  462. u32 mask, shift, ret;
  463. if (IS_GEN2(dev)) {
  464. mask = 0x1f;
  465. shift = 5;
  466. } else {
  467. mask = 0x3f;
  468. shift = 6;
  469. }
  470. ret = ((offset + width + mask) >> shift) - (offset >> shift);
  471. if (!IS_GEN2(dev))
  472. ret <<= 1;
  473. ret -= 1;
  474. return ret << 2;
  475. }
  476. static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
  477. 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
  478. 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
  479. 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
  480. 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
  481. 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
  482. 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
  483. 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
  484. 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
  485. 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
  486. 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
  487. 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
  488. 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
  489. 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
  490. 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
  491. 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
  492. 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
  493. 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
  494. };
  495. static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
  496. 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
  497. 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
  498. 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
  499. 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
  500. 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
  501. 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
  502. 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
  503. 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
  504. 0x3000, 0x0800, 0x3000
  505. };
  506. static void update_polyphase_filter(struct overlay_registers __iomem *regs)
  507. {
  508. memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
  509. memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
  510. sizeof(uv_static_hcoeffs));
  511. }
  512. static bool update_scaling_factors(struct intel_overlay *overlay,
  513. struct overlay_registers __iomem *regs,
  514. struct put_image_params *params)
  515. {
  516. /* fixed point with a 12 bit shift */
  517. u32 xscale, yscale, xscale_UV, yscale_UV;
  518. #define FP_SHIFT 12
  519. #define FRACT_MASK 0xfff
  520. bool scale_changed = false;
  521. int uv_hscale = uv_hsubsampling(params->format);
  522. int uv_vscale = uv_vsubsampling(params->format);
  523. if (params->dst_w > 1)
  524. xscale = ((params->src_scan_w - 1) << FP_SHIFT)
  525. /(params->dst_w);
  526. else
  527. xscale = 1 << FP_SHIFT;
  528. if (params->dst_h > 1)
  529. yscale = ((params->src_scan_h - 1) << FP_SHIFT)
  530. /(params->dst_h);
  531. else
  532. yscale = 1 << FP_SHIFT;
  533. /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
  534. xscale_UV = xscale/uv_hscale;
  535. yscale_UV = yscale/uv_vscale;
  536. /* make the Y scale to UV scale ratio an exact multiply */
  537. xscale = xscale_UV * uv_hscale;
  538. yscale = yscale_UV * uv_vscale;
  539. /*} else {
  540. xscale_UV = 0;
  541. yscale_UV = 0;
  542. }*/
  543. if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
  544. scale_changed = true;
  545. overlay->old_xscale = xscale;
  546. overlay->old_yscale = yscale;
  547. iowrite32(((yscale & FRACT_MASK) << 20) |
  548. ((xscale >> FP_SHIFT) << 16) |
  549. ((xscale & FRACT_MASK) << 3),
  550. &regs->YRGBSCALE);
  551. iowrite32(((yscale_UV & FRACT_MASK) << 20) |
  552. ((xscale_UV >> FP_SHIFT) << 16) |
  553. ((xscale_UV & FRACT_MASK) << 3),
  554. &regs->UVSCALE);
  555. iowrite32((((yscale >> FP_SHIFT) << 16) |
  556. ((yscale_UV >> FP_SHIFT) << 0)),
  557. &regs->UVSCALEV);
  558. if (scale_changed)
  559. update_polyphase_filter(regs);
  560. return scale_changed;
  561. }
  562. static void update_colorkey(struct intel_overlay *overlay,
  563. struct overlay_registers __iomem *regs)
  564. {
  565. u32 key = overlay->color_key;
  566. switch (overlay->crtc->base.fb->bits_per_pixel) {
  567. case 8:
  568. iowrite32(0, &regs->DCLRKV);
  569. iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
  570. break;
  571. case 16:
  572. if (overlay->crtc->base.fb->depth == 15) {
  573. iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
  574. iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
  575. &regs->DCLRKM);
  576. } else {
  577. iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
  578. iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
  579. &regs->DCLRKM);
  580. }
  581. break;
  582. case 24:
  583. case 32:
  584. iowrite32(key, &regs->DCLRKV);
  585. iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
  586. break;
  587. }
  588. }
  589. static u32 overlay_cmd_reg(struct put_image_params *params)
  590. {
  591. u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
  592. if (params->format & I915_OVERLAY_YUV_PLANAR) {
  593. switch (params->format & I915_OVERLAY_DEPTH_MASK) {
  594. case I915_OVERLAY_YUV422:
  595. cmd |= OCMD_YUV_422_PLANAR;
  596. break;
  597. case I915_OVERLAY_YUV420:
  598. cmd |= OCMD_YUV_420_PLANAR;
  599. break;
  600. case I915_OVERLAY_YUV411:
  601. case I915_OVERLAY_YUV410:
  602. cmd |= OCMD_YUV_410_PLANAR;
  603. break;
  604. }
  605. } else { /* YUV packed */
  606. switch (params->format & I915_OVERLAY_DEPTH_MASK) {
  607. case I915_OVERLAY_YUV422:
  608. cmd |= OCMD_YUV_422_PACKED;
  609. break;
  610. case I915_OVERLAY_YUV411:
  611. cmd |= OCMD_YUV_411_PACKED;
  612. break;
  613. }
  614. switch (params->format & I915_OVERLAY_SWAP_MASK) {
  615. case I915_OVERLAY_NO_SWAP:
  616. break;
  617. case I915_OVERLAY_UV_SWAP:
  618. cmd |= OCMD_UV_SWAP;
  619. break;
  620. case I915_OVERLAY_Y_SWAP:
  621. cmd |= OCMD_Y_SWAP;
  622. break;
  623. case I915_OVERLAY_Y_AND_UV_SWAP:
  624. cmd |= OCMD_Y_AND_UV_SWAP;
  625. break;
  626. }
  627. }
  628. return cmd;
  629. }
  630. static int intel_overlay_do_put_image(struct intel_overlay *overlay,
  631. struct drm_i915_gem_object *new_bo,
  632. struct put_image_params *params)
  633. {
  634. int ret, tmp_width;
  635. struct overlay_registers __iomem *regs;
  636. bool scale_changed = false;
  637. struct drm_device *dev = overlay->dev;
  638. u32 swidth, swidthsw, sheight, ostride;
  639. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  640. BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
  641. BUG_ON(!overlay);
  642. ret = intel_overlay_release_old_vid(overlay);
  643. if (ret != 0)
  644. return ret;
  645. ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
  646. if (ret != 0)
  647. return ret;
  648. ret = i915_gem_object_put_fence(new_bo);
  649. if (ret)
  650. goto out_unpin;
  651. if (!overlay->active) {
  652. u32 oconfig;
  653. regs = intel_overlay_map_regs(overlay);
  654. if (!regs) {
  655. ret = -ENOMEM;
  656. goto out_unpin;
  657. }
  658. oconfig = OCONF_CC_OUT_8BIT;
  659. if (IS_GEN4(overlay->dev))
  660. oconfig |= OCONF_CSC_MODE_BT709;
  661. oconfig |= overlay->crtc->pipe == 0 ?
  662. OCONF_PIPE_A : OCONF_PIPE_B;
  663. iowrite32(oconfig, &regs->OCONFIG);
  664. intel_overlay_unmap_regs(overlay, regs);
  665. ret = intel_overlay_on(overlay);
  666. if (ret != 0)
  667. goto out_unpin;
  668. }
  669. regs = intel_overlay_map_regs(overlay);
  670. if (!regs) {
  671. ret = -ENOMEM;
  672. goto out_unpin;
  673. }
  674. iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
  675. iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
  676. if (params->format & I915_OVERLAY_YUV_PACKED)
  677. tmp_width = packed_width_bytes(params->format, params->src_w);
  678. else
  679. tmp_width = params->src_w;
  680. swidth = params->src_w;
  681. swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
  682. sheight = params->src_h;
  683. iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
  684. ostride = params->stride_Y;
  685. if (params->format & I915_OVERLAY_YUV_PLANAR) {
  686. int uv_hscale = uv_hsubsampling(params->format);
  687. int uv_vscale = uv_vsubsampling(params->format);
  688. u32 tmp_U, tmp_V;
  689. swidth |= (params->src_w/uv_hscale) << 16;
  690. tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
  691. params->src_w/uv_hscale);
  692. tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
  693. params->src_w/uv_hscale);
  694. swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
  695. sheight |= (params->src_h/uv_vscale) << 16;
  696. iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
  697. iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
  698. ostride |= params->stride_UV << 16;
  699. }
  700. iowrite32(swidth, &regs->SWIDTH);
  701. iowrite32(swidthsw, &regs->SWIDTHSW);
  702. iowrite32(sheight, &regs->SHEIGHT);
  703. iowrite32(ostride, &regs->OSTRIDE);
  704. scale_changed = update_scaling_factors(overlay, regs, params);
  705. update_colorkey(overlay, regs);
  706. iowrite32(overlay_cmd_reg(params), &regs->OCMD);
  707. intel_overlay_unmap_regs(overlay, regs);
  708. ret = intel_overlay_continue(overlay, scale_changed);
  709. if (ret)
  710. goto out_unpin;
  711. overlay->old_vid_bo = overlay->vid_bo;
  712. overlay->vid_bo = new_bo;
  713. return 0;
  714. out_unpin:
  715. i915_gem_object_unpin(new_bo);
  716. return ret;
  717. }
  718. int intel_overlay_switch_off(struct intel_overlay *overlay)
  719. {
  720. struct overlay_registers __iomem *regs;
  721. struct drm_device *dev = overlay->dev;
  722. int ret;
  723. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  724. BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
  725. ret = intel_overlay_recover_from_interrupt(overlay);
  726. if (ret != 0)
  727. return ret;
  728. if (!overlay->active)
  729. return 0;
  730. ret = intel_overlay_release_old_vid(overlay);
  731. if (ret != 0)
  732. return ret;
  733. regs = intel_overlay_map_regs(overlay);
  734. iowrite32(0, &regs->OCMD);
  735. intel_overlay_unmap_regs(overlay, regs);
  736. ret = intel_overlay_off(overlay);
  737. if (ret != 0)
  738. return ret;
  739. intel_overlay_off_tail(overlay);
  740. return 0;
  741. }
  742. static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
  743. struct intel_crtc *crtc)
  744. {
  745. drm_i915_private_t *dev_priv = overlay->dev->dev_private;
  746. if (!crtc->active)
  747. return -EINVAL;
  748. /* can't use the overlay with double wide pipe */
  749. if (INTEL_INFO(overlay->dev)->gen < 4 &&
  750. (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
  751. return -EINVAL;
  752. return 0;
  753. }
  754. static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
  755. {
  756. struct drm_device *dev = overlay->dev;
  757. drm_i915_private_t *dev_priv = dev->dev_private;
  758. u32 pfit_control = I915_READ(PFIT_CONTROL);
  759. u32 ratio;
  760. /* XXX: This is not the same logic as in the xorg driver, but more in
  761. * line with the intel documentation for the i965
  762. */
  763. if (INTEL_INFO(dev)->gen >= 4) {
  764. /* on i965 use the PGM reg to read out the autoscaler values */
  765. ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
  766. } else {
  767. if (pfit_control & VERT_AUTO_SCALE)
  768. ratio = I915_READ(PFIT_AUTO_RATIOS);
  769. else
  770. ratio = I915_READ(PFIT_PGM_RATIOS);
  771. ratio >>= PFIT_VERT_SCALE_SHIFT;
  772. }
  773. overlay->pfit_vscale_ratio = ratio;
  774. }
  775. static int check_overlay_dst(struct intel_overlay *overlay,
  776. struct drm_intel_overlay_put_image *rec)
  777. {
  778. struct drm_display_mode *mode = &overlay->crtc->base.mode;
  779. if (rec->dst_x < mode->hdisplay &&
  780. rec->dst_x + rec->dst_width <= mode->hdisplay &&
  781. rec->dst_y < mode->vdisplay &&
  782. rec->dst_y + rec->dst_height <= mode->vdisplay)
  783. return 0;
  784. else
  785. return -EINVAL;
  786. }
  787. static int check_overlay_scaling(struct put_image_params *rec)
  788. {
  789. u32 tmp;
  790. /* downscaling limit is 8.0 */
  791. tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
  792. if (tmp > 7)
  793. return -EINVAL;
  794. tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
  795. if (tmp > 7)
  796. return -EINVAL;
  797. return 0;
  798. }
  799. static int check_overlay_src(struct drm_device *dev,
  800. struct drm_intel_overlay_put_image *rec,
  801. struct drm_i915_gem_object *new_bo)
  802. {
  803. int uv_hscale = uv_hsubsampling(rec->flags);
  804. int uv_vscale = uv_vsubsampling(rec->flags);
  805. u32 stride_mask;
  806. int depth;
  807. u32 tmp;
  808. /* check src dimensions */
  809. if (IS_845G(dev) || IS_I830(dev)) {
  810. if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
  811. rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
  812. return -EINVAL;
  813. } else {
  814. if (rec->src_height > IMAGE_MAX_HEIGHT ||
  815. rec->src_width > IMAGE_MAX_WIDTH)
  816. return -EINVAL;
  817. }
  818. /* better safe than sorry, use 4 as the maximal subsampling ratio */
  819. if (rec->src_height < N_VERT_Y_TAPS*4 ||
  820. rec->src_width < N_HORIZ_Y_TAPS*4)
  821. return -EINVAL;
  822. /* check alignment constraints */
  823. switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
  824. case I915_OVERLAY_RGB:
  825. /* not implemented */
  826. return -EINVAL;
  827. case I915_OVERLAY_YUV_PACKED:
  828. if (uv_vscale != 1)
  829. return -EINVAL;
  830. depth = packed_depth_bytes(rec->flags);
  831. if (depth < 0)
  832. return depth;
  833. /* ignore UV planes */
  834. rec->stride_UV = 0;
  835. rec->offset_U = 0;
  836. rec->offset_V = 0;
  837. /* check pixel alignment */
  838. if (rec->offset_Y % depth)
  839. return -EINVAL;
  840. break;
  841. case I915_OVERLAY_YUV_PLANAR:
  842. if (uv_vscale < 0 || uv_hscale < 0)
  843. return -EINVAL;
  844. /* no offset restrictions for planar formats */
  845. break;
  846. default:
  847. return -EINVAL;
  848. }
  849. if (rec->src_width % uv_hscale)
  850. return -EINVAL;
  851. /* stride checking */
  852. if (IS_I830(dev) || IS_845G(dev))
  853. stride_mask = 255;
  854. else
  855. stride_mask = 63;
  856. if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
  857. return -EINVAL;
  858. if (IS_GEN4(dev) && rec->stride_Y < 512)
  859. return -EINVAL;
  860. tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
  861. 4096 : 8192;
  862. if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
  863. return -EINVAL;
  864. /* check buffer dimensions */
  865. switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
  866. case I915_OVERLAY_RGB:
  867. case I915_OVERLAY_YUV_PACKED:
  868. /* always 4 Y values per depth pixels */
  869. if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
  870. return -EINVAL;
  871. tmp = rec->stride_Y*rec->src_height;
  872. if (rec->offset_Y + tmp > new_bo->base.size)
  873. return -EINVAL;
  874. break;
  875. case I915_OVERLAY_YUV_PLANAR:
  876. if (rec->src_width > rec->stride_Y)
  877. return -EINVAL;
  878. if (rec->src_width/uv_hscale > rec->stride_UV)
  879. return -EINVAL;
  880. tmp = rec->stride_Y * rec->src_height;
  881. if (rec->offset_Y + tmp > new_bo->base.size)
  882. return -EINVAL;
  883. tmp = rec->stride_UV * (rec->src_height / uv_vscale);
  884. if (rec->offset_U + tmp > new_bo->base.size ||
  885. rec->offset_V + tmp > new_bo->base.size)
  886. return -EINVAL;
  887. break;
  888. }
  889. return 0;
  890. }
  891. /**
  892. * Return the pipe currently connected to the panel fitter,
  893. * or -1 if the panel fitter is not present or not in use
  894. */
  895. static int intel_panel_fitter_pipe(struct drm_device *dev)
  896. {
  897. struct drm_i915_private *dev_priv = dev->dev_private;
  898. u32 pfit_control;
  899. /* i830 doesn't have a panel fitter */
  900. if (IS_I830(dev))
  901. return -1;
  902. pfit_control = I915_READ(PFIT_CONTROL);
  903. /* See if the panel fitter is in use */
  904. if ((pfit_control & PFIT_ENABLE) == 0)
  905. return -1;
  906. /* 965 can place panel fitter on either pipe */
  907. if (IS_GEN4(dev))
  908. return (pfit_control >> 29) & 0x3;
  909. /* older chips can only use pipe 1 */
  910. return 1;
  911. }
  912. int intel_overlay_put_image(struct drm_device *dev, void *data,
  913. struct drm_file *file_priv)
  914. {
  915. struct drm_intel_overlay_put_image *put_image_rec = data;
  916. drm_i915_private_t *dev_priv = dev->dev_private;
  917. struct intel_overlay *overlay;
  918. struct drm_mode_object *drmmode_obj;
  919. struct intel_crtc *crtc;
  920. struct drm_i915_gem_object *new_bo;
  921. struct put_image_params *params;
  922. int ret;
  923. /* No need to check for DRIVER_MODESET - we don't set it up then. */
  924. overlay = dev_priv->overlay;
  925. if (!overlay) {
  926. DRM_DEBUG("userspace bug: no overlay\n");
  927. return -ENODEV;
  928. }
  929. if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
  930. mutex_lock(&dev->mode_config.mutex);
  931. mutex_lock(&dev->struct_mutex);
  932. ret = intel_overlay_switch_off(overlay);
  933. mutex_unlock(&dev->struct_mutex);
  934. mutex_unlock(&dev->mode_config.mutex);
  935. return ret;
  936. }
  937. params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
  938. if (!params)
  939. return -ENOMEM;
  940. drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
  941. DRM_MODE_OBJECT_CRTC);
  942. if (!drmmode_obj) {
  943. ret = -ENOENT;
  944. goto out_free;
  945. }
  946. crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
  947. new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
  948. put_image_rec->bo_handle));
  949. if (&new_bo->base == NULL) {
  950. ret = -ENOENT;
  951. goto out_free;
  952. }
  953. mutex_lock(&dev->mode_config.mutex);
  954. mutex_lock(&dev->struct_mutex);
  955. if (new_bo->tiling_mode) {
  956. DRM_ERROR("buffer used for overlay image can not be tiled\n");
  957. ret = -EINVAL;
  958. goto out_unlock;
  959. }
  960. ret = intel_overlay_recover_from_interrupt(overlay);
  961. if (ret != 0)
  962. goto out_unlock;
  963. if (overlay->crtc != crtc) {
  964. struct drm_display_mode *mode = &crtc->base.mode;
  965. ret = intel_overlay_switch_off(overlay);
  966. if (ret != 0)
  967. goto out_unlock;
  968. ret = check_overlay_possible_on_crtc(overlay, crtc);
  969. if (ret != 0)
  970. goto out_unlock;
  971. overlay->crtc = crtc;
  972. crtc->overlay = overlay;
  973. /* line too wide, i.e. one-line-mode */
  974. if (mode->hdisplay > 1024 &&
  975. intel_panel_fitter_pipe(dev) == crtc->pipe) {
  976. overlay->pfit_active = 1;
  977. update_pfit_vscale_ratio(overlay);
  978. } else
  979. overlay->pfit_active = 0;
  980. }
  981. ret = check_overlay_dst(overlay, put_image_rec);
  982. if (ret != 0)
  983. goto out_unlock;
  984. if (overlay->pfit_active) {
  985. params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
  986. overlay->pfit_vscale_ratio);
  987. /* shifting right rounds downwards, so add 1 */
  988. params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
  989. overlay->pfit_vscale_ratio) + 1;
  990. } else {
  991. params->dst_y = put_image_rec->dst_y;
  992. params->dst_h = put_image_rec->dst_height;
  993. }
  994. params->dst_x = put_image_rec->dst_x;
  995. params->dst_w = put_image_rec->dst_width;
  996. params->src_w = put_image_rec->src_width;
  997. params->src_h = put_image_rec->src_height;
  998. params->src_scan_w = put_image_rec->src_scan_width;
  999. params->src_scan_h = put_image_rec->src_scan_height;
  1000. if (params->src_scan_h > params->src_h ||
  1001. params->src_scan_w > params->src_w) {
  1002. ret = -EINVAL;
  1003. goto out_unlock;
  1004. }
  1005. ret = check_overlay_src(dev, put_image_rec, new_bo);
  1006. if (ret != 0)
  1007. goto out_unlock;
  1008. params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
  1009. params->stride_Y = put_image_rec->stride_Y;
  1010. params->stride_UV = put_image_rec->stride_UV;
  1011. params->offset_Y = put_image_rec->offset_Y;
  1012. params->offset_U = put_image_rec->offset_U;
  1013. params->offset_V = put_image_rec->offset_V;
  1014. /* Check scaling after src size to prevent a divide-by-zero. */
  1015. ret = check_overlay_scaling(params);
  1016. if (ret != 0)
  1017. goto out_unlock;
  1018. ret = intel_overlay_do_put_image(overlay, new_bo, params);
  1019. if (ret != 0)
  1020. goto out_unlock;
  1021. mutex_unlock(&dev->struct_mutex);
  1022. mutex_unlock(&dev->mode_config.mutex);
  1023. kfree(params);
  1024. return 0;
  1025. out_unlock:
  1026. mutex_unlock(&dev->struct_mutex);
  1027. mutex_unlock(&dev->mode_config.mutex);
  1028. drm_gem_object_unreference_unlocked(&new_bo->base);
  1029. out_free:
  1030. kfree(params);
  1031. return ret;
  1032. }
  1033. static void update_reg_attrs(struct intel_overlay *overlay,
  1034. struct overlay_registers __iomem *regs)
  1035. {
  1036. iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
  1037. &regs->OCLRC0);
  1038. iowrite32(overlay->saturation, &regs->OCLRC1);
  1039. }
  1040. static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
  1041. {
  1042. int i;
  1043. if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
  1044. return false;
  1045. for (i = 0; i < 3; i++) {
  1046. if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
  1047. return false;
  1048. }
  1049. return true;
  1050. }
  1051. static bool check_gamma5_errata(u32 gamma5)
  1052. {
  1053. int i;
  1054. for (i = 0; i < 3; i++) {
  1055. if (((gamma5 >> i*8) & 0xff) == 0x80)
  1056. return false;
  1057. }
  1058. return true;
  1059. }
  1060. static int check_gamma(struct drm_intel_overlay_attrs *attrs)
  1061. {
  1062. if (!check_gamma_bounds(0, attrs->gamma0) ||
  1063. !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
  1064. !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
  1065. !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
  1066. !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
  1067. !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
  1068. !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
  1069. return -EINVAL;
  1070. if (!check_gamma5_errata(attrs->gamma5))
  1071. return -EINVAL;
  1072. return 0;
  1073. }
  1074. int intel_overlay_attrs(struct drm_device *dev, void *data,
  1075. struct drm_file *file_priv)
  1076. {
  1077. struct drm_intel_overlay_attrs *attrs = data;
  1078. drm_i915_private_t *dev_priv = dev->dev_private;
  1079. struct intel_overlay *overlay;
  1080. struct overlay_registers __iomem *regs;
  1081. int ret;
  1082. /* No need to check for DRIVER_MODESET - we don't set it up then. */
  1083. overlay = dev_priv->overlay;
  1084. if (!overlay) {
  1085. DRM_DEBUG("userspace bug: no overlay\n");
  1086. return -ENODEV;
  1087. }
  1088. mutex_lock(&dev->mode_config.mutex);
  1089. mutex_lock(&dev->struct_mutex);
  1090. ret = -EINVAL;
  1091. if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
  1092. attrs->color_key = overlay->color_key;
  1093. attrs->brightness = overlay->brightness;
  1094. attrs->contrast = overlay->contrast;
  1095. attrs->saturation = overlay->saturation;
  1096. if (!IS_GEN2(dev)) {
  1097. attrs->gamma0 = I915_READ(OGAMC0);
  1098. attrs->gamma1 = I915_READ(OGAMC1);
  1099. attrs->gamma2 = I915_READ(OGAMC2);
  1100. attrs->gamma3 = I915_READ(OGAMC3);
  1101. attrs->gamma4 = I915_READ(OGAMC4);
  1102. attrs->gamma5 = I915_READ(OGAMC5);
  1103. }
  1104. } else {
  1105. if (attrs->brightness < -128 || attrs->brightness > 127)
  1106. goto out_unlock;
  1107. if (attrs->contrast > 255)
  1108. goto out_unlock;
  1109. if (attrs->saturation > 1023)
  1110. goto out_unlock;
  1111. overlay->color_key = attrs->color_key;
  1112. overlay->brightness = attrs->brightness;
  1113. overlay->contrast = attrs->contrast;
  1114. overlay->saturation = attrs->saturation;
  1115. regs = intel_overlay_map_regs(overlay);
  1116. if (!regs) {
  1117. ret = -ENOMEM;
  1118. goto out_unlock;
  1119. }
  1120. update_reg_attrs(overlay, regs);
  1121. intel_overlay_unmap_regs(overlay, regs);
  1122. if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
  1123. if (IS_GEN2(dev))
  1124. goto out_unlock;
  1125. if (overlay->active) {
  1126. ret = -EBUSY;
  1127. goto out_unlock;
  1128. }
  1129. ret = check_gamma(attrs);
  1130. if (ret)
  1131. goto out_unlock;
  1132. I915_WRITE(OGAMC0, attrs->gamma0);
  1133. I915_WRITE(OGAMC1, attrs->gamma1);
  1134. I915_WRITE(OGAMC2, attrs->gamma2);
  1135. I915_WRITE(OGAMC3, attrs->gamma3);
  1136. I915_WRITE(OGAMC4, attrs->gamma4);
  1137. I915_WRITE(OGAMC5, attrs->gamma5);
  1138. }
  1139. }
  1140. ret = 0;
  1141. out_unlock:
  1142. mutex_unlock(&dev->struct_mutex);
  1143. mutex_unlock(&dev->mode_config.mutex);
  1144. return ret;
  1145. }
  1146. void intel_setup_overlay(struct drm_device *dev)
  1147. {
  1148. drm_i915_private_t *dev_priv = dev->dev_private;
  1149. struct intel_overlay *overlay;
  1150. struct drm_i915_gem_object *reg_bo;
  1151. struct overlay_registers __iomem *regs;
  1152. int ret;
  1153. if (!HAS_OVERLAY(dev))
  1154. return;
  1155. overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
  1156. if (!overlay)
  1157. return;
  1158. mutex_lock(&dev->struct_mutex);
  1159. if (WARN_ON(dev_priv->overlay))
  1160. goto out_free;
  1161. overlay->dev = dev;
  1162. reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
  1163. if (!reg_bo)
  1164. goto out_free;
  1165. overlay->reg_bo = reg_bo;
  1166. if (OVERLAY_NEEDS_PHYSICAL(dev)) {
  1167. ret = i915_gem_attach_phys_object(dev, reg_bo,
  1168. I915_GEM_PHYS_OVERLAY_REGS,
  1169. PAGE_SIZE);
  1170. if (ret) {
  1171. DRM_ERROR("failed to attach phys overlay regs\n");
  1172. goto out_free_bo;
  1173. }
  1174. overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
  1175. } else {
  1176. ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
  1177. if (ret) {
  1178. DRM_ERROR("failed to pin overlay register bo\n");
  1179. goto out_free_bo;
  1180. }
  1181. overlay->flip_addr = reg_bo->gtt_offset;
  1182. ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
  1183. if (ret) {
  1184. DRM_ERROR("failed to move overlay register bo into the GTT\n");
  1185. goto out_unpin_bo;
  1186. }
  1187. }
  1188. /* init all values */
  1189. overlay->color_key = 0x0101fe;
  1190. overlay->brightness = -19;
  1191. overlay->contrast = 75;
  1192. overlay->saturation = 146;
  1193. regs = intel_overlay_map_regs(overlay);
  1194. if (!regs)
  1195. goto out_unpin_bo;
  1196. memset_io(regs, 0, sizeof(struct overlay_registers));
  1197. update_polyphase_filter(regs);
  1198. update_reg_attrs(overlay, regs);
  1199. intel_overlay_unmap_regs(overlay, regs);
  1200. dev_priv->overlay = overlay;
  1201. mutex_unlock(&dev->struct_mutex);
  1202. DRM_INFO("initialized overlay support\n");
  1203. return;
  1204. out_unpin_bo:
  1205. if (!OVERLAY_NEEDS_PHYSICAL(dev))
  1206. i915_gem_object_unpin(reg_bo);
  1207. out_free_bo:
  1208. drm_gem_object_unreference(&reg_bo->base);
  1209. out_free:
  1210. mutex_unlock(&dev->struct_mutex);
  1211. kfree(overlay);
  1212. return;
  1213. }
  1214. void intel_cleanup_overlay(struct drm_device *dev)
  1215. {
  1216. drm_i915_private_t *dev_priv = dev->dev_private;
  1217. if (!dev_priv->overlay)
  1218. return;
  1219. /* The bo's should be free'd by the generic code already.
  1220. * Furthermore modesetting teardown happens beforehand so the
  1221. * hardware should be off already */
  1222. BUG_ON(dev_priv->overlay->active);
  1223. drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
  1224. kfree(dev_priv->overlay);
  1225. }
  1226. #ifdef CONFIG_DEBUG_FS
  1227. #include <linux/seq_file.h>
  1228. struct intel_overlay_error_state {
  1229. struct overlay_registers regs;
  1230. unsigned long base;
  1231. u32 dovsta;
  1232. u32 isr;
  1233. };
  1234. static struct overlay_registers __iomem *
  1235. intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
  1236. {
  1237. drm_i915_private_t *dev_priv = overlay->dev->dev_private;
  1238. struct overlay_registers __iomem *regs;
  1239. if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  1240. /* Cast to make sparse happy, but it's wc memory anyway, so
  1241. * equivalent to the wc io mapping on X86. */
  1242. regs = (struct overlay_registers __iomem *)
  1243. overlay->reg_bo->phys_obj->handle->vaddr;
  1244. else
  1245. regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  1246. overlay->reg_bo->gtt_offset);
  1247. return regs;
  1248. }
  1249. static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
  1250. struct overlay_registers __iomem *regs)
  1251. {
  1252. if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  1253. io_mapping_unmap_atomic(regs);
  1254. }
  1255. struct intel_overlay_error_state *
  1256. intel_overlay_capture_error_state(struct drm_device *dev)
  1257. {
  1258. drm_i915_private_t *dev_priv = dev->dev_private;
  1259. struct intel_overlay *overlay = dev_priv->overlay;
  1260. struct intel_overlay_error_state *error;
  1261. struct overlay_registers __iomem *regs;
  1262. if (!overlay || !overlay->active)
  1263. return NULL;
  1264. error = kmalloc(sizeof(*error), GFP_ATOMIC);
  1265. if (error == NULL)
  1266. return NULL;
  1267. error->dovsta = I915_READ(DOVSTA);
  1268. error->isr = I915_READ(ISR);
  1269. if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  1270. error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
  1271. else
  1272. error->base = overlay->reg_bo->gtt_offset;
  1273. regs = intel_overlay_map_regs_atomic(overlay);
  1274. if (!regs)
  1275. goto err;
  1276. memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
  1277. intel_overlay_unmap_regs_atomic(overlay, regs);
  1278. return error;
  1279. err:
  1280. kfree(error);
  1281. return NULL;
  1282. }
  1283. void
  1284. intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
  1285. {
  1286. seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
  1287. error->dovsta, error->isr);
  1288. seq_printf(m, " Register file at 0x%08lx:\n",
  1289. error->base);
  1290. #define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
  1291. P(OBUF_0Y);
  1292. P(OBUF_1Y);
  1293. P(OBUF_0U);
  1294. P(OBUF_0V);
  1295. P(OBUF_1U);
  1296. P(OBUF_1V);
  1297. P(OSTRIDE);
  1298. P(YRGB_VPH);
  1299. P(UV_VPH);
  1300. P(HORZ_PH);
  1301. P(INIT_PHS);
  1302. P(DWINPOS);
  1303. P(DWINSZ);
  1304. P(SWIDTH);
  1305. P(SWIDTHSW);
  1306. P(SHEIGHT);
  1307. P(YRGBSCALE);
  1308. P(UVSCALE);
  1309. P(OCLRC0);
  1310. P(OCLRC1);
  1311. P(DCLRKV);
  1312. P(DCLRKM);
  1313. P(SCLRKVH);
  1314. P(SCLRKVL);
  1315. P(SCLRKEN);
  1316. P(OCONFIG);
  1317. P(OCMD);
  1318. P(OSTART_0Y);
  1319. P(OSTART_1Y);
  1320. P(OSTART_0U);
  1321. P(OSTART_0V);
  1322. P(OSTART_1U);
  1323. P(OSTART_1V);
  1324. P(OTILEOFF_0Y);
  1325. P(OTILEOFF_1Y);
  1326. P(OTILEOFF_0U);
  1327. P(OTILEOFF_0V);
  1328. P(OTILEOFF_1U);
  1329. P(OTILEOFF_1V);
  1330. P(FASTHSCALE);
  1331. P(UVSCALEV);
  1332. #undef P
  1333. }
  1334. #endif