i915_drv.h 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456
  1. /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  2. */
  3. /*
  4. *
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sub license, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the
  17. * next paragraph) shall be included in all copies or substantial portions
  18. * of the Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. */
  29. #ifndef _I915_DRV_H_
  30. #define _I915_DRV_H_
  31. #include <uapi/drm/i915_drm.h>
  32. #include "i915_reg.h"
  33. #include "intel_bios.h"
  34. #include "intel_ringbuffer.h"
  35. #include <linux/io-mapping.h>
  36. #include <linux/i2c.h>
  37. #include <linux/i2c-algo-bit.h>
  38. #include <drm/intel-gtt.h>
  39. #include <linux/backlight.h>
  40. #include <linux/intel-iommu.h>
  41. #include <linux/kref.h>
  42. #include <linux/pm_qos.h>
  43. /* General customization:
  44. */
  45. #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
  46. #define DRIVER_NAME "i915"
  47. #define DRIVER_DESC "Intel Graphics"
  48. #define DRIVER_DATE "20080730"
  49. enum pipe {
  50. PIPE_A = 0,
  51. PIPE_B,
  52. PIPE_C,
  53. I915_MAX_PIPES
  54. };
  55. #define pipe_name(p) ((p) + 'A')
  56. enum transcoder {
  57. TRANSCODER_A = 0,
  58. TRANSCODER_B,
  59. TRANSCODER_C,
  60. TRANSCODER_EDP = 0xF,
  61. };
  62. #define transcoder_name(t) ((t) + 'A')
  63. enum plane {
  64. PLANE_A = 0,
  65. PLANE_B,
  66. PLANE_C,
  67. };
  68. #define plane_name(p) ((p) + 'A')
  69. #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
  70. enum port {
  71. PORT_A = 0,
  72. PORT_B,
  73. PORT_C,
  74. PORT_D,
  75. PORT_E,
  76. I915_MAX_PORTS
  77. };
  78. #define port_name(p) ((p) + 'A')
  79. enum intel_display_power_domain {
  80. POWER_DOMAIN_PIPE_A,
  81. POWER_DOMAIN_PIPE_B,
  82. POWER_DOMAIN_PIPE_C,
  83. POWER_DOMAIN_PIPE_A_PANEL_FITTER,
  84. POWER_DOMAIN_PIPE_B_PANEL_FITTER,
  85. POWER_DOMAIN_PIPE_C_PANEL_FITTER,
  86. POWER_DOMAIN_TRANSCODER_A,
  87. POWER_DOMAIN_TRANSCODER_B,
  88. POWER_DOMAIN_TRANSCODER_C,
  89. POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
  90. POWER_DOMAIN_VGA,
  91. };
  92. #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
  93. #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
  94. ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
  95. #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
  96. enum hpd_pin {
  97. HPD_NONE = 0,
  98. HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
  99. HPD_TV = HPD_NONE, /* TV is known to be unreliable */
  100. HPD_CRT,
  101. HPD_SDVO_B,
  102. HPD_SDVO_C,
  103. HPD_PORT_B,
  104. HPD_PORT_C,
  105. HPD_PORT_D,
  106. HPD_NUM_PINS
  107. };
  108. #define I915_GEM_GPU_DOMAINS \
  109. (I915_GEM_DOMAIN_RENDER | \
  110. I915_GEM_DOMAIN_SAMPLER | \
  111. I915_GEM_DOMAIN_COMMAND | \
  112. I915_GEM_DOMAIN_INSTRUCTION | \
  113. I915_GEM_DOMAIN_VERTEX)
  114. #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
  115. #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
  116. list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
  117. if ((intel_encoder)->base.crtc == (__crtc))
  118. struct drm_i915_private;
  119. enum intel_dpll_id {
  120. DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
  121. /* real shared dpll ids must be >= 0 */
  122. DPLL_ID_PCH_PLL_A,
  123. DPLL_ID_PCH_PLL_B,
  124. };
  125. #define I915_NUM_PLLS 2
  126. struct intel_dpll_hw_state {
  127. uint32_t dpll;
  128. uint32_t dpll_md;
  129. uint32_t fp0;
  130. uint32_t fp1;
  131. };
  132. struct intel_shared_dpll {
  133. int refcount; /* count of number of CRTCs sharing this PLL */
  134. int active; /* count of number of active CRTCs (i.e. DPMS on) */
  135. bool on; /* is the PLL actually active? Disabled during modeset */
  136. const char *name;
  137. /* should match the index in the dev_priv->shared_dplls array */
  138. enum intel_dpll_id id;
  139. struct intel_dpll_hw_state hw_state;
  140. void (*mode_set)(struct drm_i915_private *dev_priv,
  141. struct intel_shared_dpll *pll);
  142. void (*enable)(struct drm_i915_private *dev_priv,
  143. struct intel_shared_dpll *pll);
  144. void (*disable)(struct drm_i915_private *dev_priv,
  145. struct intel_shared_dpll *pll);
  146. bool (*get_hw_state)(struct drm_i915_private *dev_priv,
  147. struct intel_shared_dpll *pll,
  148. struct intel_dpll_hw_state *hw_state);
  149. };
  150. /* Used by dp and fdi links */
  151. struct intel_link_m_n {
  152. uint32_t tu;
  153. uint32_t gmch_m;
  154. uint32_t gmch_n;
  155. uint32_t link_m;
  156. uint32_t link_n;
  157. };
  158. void intel_link_compute_m_n(int bpp, int nlanes,
  159. int pixel_clock, int link_clock,
  160. struct intel_link_m_n *m_n);
  161. struct intel_ddi_plls {
  162. int spll_refcount;
  163. int wrpll1_refcount;
  164. int wrpll2_refcount;
  165. };
  166. /* Interface history:
  167. *
  168. * 1.1: Original.
  169. * 1.2: Add Power Management
  170. * 1.3: Add vblank support
  171. * 1.4: Fix cmdbuffer path, add heap destroy
  172. * 1.5: Add vblank pipe configuration
  173. * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
  174. * - Support vertical blank on secondary display pipe
  175. */
  176. #define DRIVER_MAJOR 1
  177. #define DRIVER_MINOR 6
  178. #define DRIVER_PATCHLEVEL 0
  179. #define WATCH_LISTS 0
  180. #define WATCH_GTT 0
  181. #define I915_GEM_PHYS_CURSOR_0 1
  182. #define I915_GEM_PHYS_CURSOR_1 2
  183. #define I915_GEM_PHYS_OVERLAY_REGS 3
  184. #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
  185. struct drm_i915_gem_phys_object {
  186. int id;
  187. struct page **page_list;
  188. drm_dma_handle_t *handle;
  189. struct drm_i915_gem_object *cur_obj;
  190. };
  191. struct opregion_header;
  192. struct opregion_acpi;
  193. struct opregion_swsci;
  194. struct opregion_asle;
  195. struct intel_opregion {
  196. struct opregion_header __iomem *header;
  197. struct opregion_acpi __iomem *acpi;
  198. struct opregion_swsci __iomem *swsci;
  199. u32 swsci_gbda_sub_functions;
  200. u32 swsci_sbcb_sub_functions;
  201. struct opregion_asle __iomem *asle;
  202. void __iomem *vbt;
  203. u32 __iomem *lid_state;
  204. };
  205. #define OPREGION_SIZE (8*1024)
  206. struct intel_overlay;
  207. struct intel_overlay_error_state;
  208. struct drm_i915_master_private {
  209. drm_local_map_t *sarea;
  210. struct _drm_i915_sarea *sarea_priv;
  211. };
  212. #define I915_FENCE_REG_NONE -1
  213. #define I915_MAX_NUM_FENCES 32
  214. /* 32 fences + sign bit for FENCE_REG_NONE */
  215. #define I915_MAX_NUM_FENCE_BITS 6
  216. struct drm_i915_fence_reg {
  217. struct list_head lru_list;
  218. struct drm_i915_gem_object *obj;
  219. int pin_count;
  220. };
  221. struct sdvo_device_mapping {
  222. u8 initialized;
  223. u8 dvo_port;
  224. u8 slave_addr;
  225. u8 dvo_wiring;
  226. u8 i2c_pin;
  227. u8 ddc_pin;
  228. };
  229. struct intel_display_error_state;
  230. struct drm_i915_error_state {
  231. struct kref ref;
  232. u32 eir;
  233. u32 pgtbl_er;
  234. u32 ier;
  235. u32 ccid;
  236. u32 derrmr;
  237. u32 forcewake;
  238. bool waiting[I915_NUM_RINGS];
  239. u32 pipestat[I915_MAX_PIPES];
  240. u32 tail[I915_NUM_RINGS];
  241. u32 head[I915_NUM_RINGS];
  242. u32 ctl[I915_NUM_RINGS];
  243. u32 ipeir[I915_NUM_RINGS];
  244. u32 ipehr[I915_NUM_RINGS];
  245. u32 instdone[I915_NUM_RINGS];
  246. u32 acthd[I915_NUM_RINGS];
  247. u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
  248. u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
  249. u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
  250. /* our own tracking of ring head and tail */
  251. u32 cpu_ring_head[I915_NUM_RINGS];
  252. u32 cpu_ring_tail[I915_NUM_RINGS];
  253. u32 error; /* gen6+ */
  254. u32 err_int; /* gen7 */
  255. u32 instpm[I915_NUM_RINGS];
  256. u32 instps[I915_NUM_RINGS];
  257. u32 extra_instdone[I915_NUM_INSTDONE_REG];
  258. u32 seqno[I915_NUM_RINGS];
  259. u64 bbaddr;
  260. u32 fault_reg[I915_NUM_RINGS];
  261. u32 done_reg;
  262. u32 faddr[I915_NUM_RINGS];
  263. u64 fence[I915_MAX_NUM_FENCES];
  264. struct timeval time;
  265. struct drm_i915_error_ring {
  266. struct drm_i915_error_object {
  267. int page_count;
  268. u32 gtt_offset;
  269. u32 *pages[0];
  270. } *ringbuffer, *batchbuffer, *ctx;
  271. struct drm_i915_error_request {
  272. long jiffies;
  273. u32 seqno;
  274. u32 tail;
  275. } *requests;
  276. int num_requests;
  277. } ring[I915_NUM_RINGS];
  278. struct drm_i915_error_buffer {
  279. u32 size;
  280. u32 name;
  281. u32 rseqno, wseqno;
  282. u32 gtt_offset;
  283. u32 read_domains;
  284. u32 write_domain;
  285. s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
  286. s32 pinned:2;
  287. u32 tiling:2;
  288. u32 dirty:1;
  289. u32 purgeable:1;
  290. s32 ring:4;
  291. u32 cache_level:3;
  292. } **active_bo, **pinned_bo;
  293. u32 *active_bo_count, *pinned_bo_count;
  294. struct intel_overlay_error_state *overlay;
  295. struct intel_display_error_state *display;
  296. int hangcheck_score[I915_NUM_RINGS];
  297. enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
  298. };
  299. struct intel_crtc_config;
  300. struct intel_crtc;
  301. struct intel_limit;
  302. struct dpll;
  303. struct drm_i915_display_funcs {
  304. bool (*fbc_enabled)(struct drm_device *dev);
  305. void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
  306. void (*disable_fbc)(struct drm_device *dev);
  307. int (*get_display_clock_speed)(struct drm_device *dev);
  308. int (*get_fifo_size)(struct drm_device *dev, int plane);
  309. /**
  310. * find_dpll() - Find the best values for the PLL
  311. * @limit: limits for the PLL
  312. * @crtc: current CRTC
  313. * @target: target frequency in kHz
  314. * @refclk: reference clock frequency in kHz
  315. * @match_clock: if provided, @best_clock P divider must
  316. * match the P divider from @match_clock
  317. * used for LVDS downclocking
  318. * @best_clock: best PLL values found
  319. *
  320. * Returns true on success, false on failure.
  321. */
  322. bool (*find_dpll)(const struct intel_limit *limit,
  323. struct drm_crtc *crtc,
  324. int target, int refclk,
  325. struct dpll *match_clock,
  326. struct dpll *best_clock);
  327. void (*update_wm)(struct drm_crtc *crtc);
  328. void (*update_sprite_wm)(struct drm_plane *plane,
  329. struct drm_crtc *crtc,
  330. uint32_t sprite_width, int pixel_size,
  331. bool enable, bool scaled);
  332. void (*modeset_global_resources)(struct drm_device *dev);
  333. /* Returns the active state of the crtc, and if the crtc is active,
  334. * fills out the pipe-config with the hw state. */
  335. bool (*get_pipe_config)(struct intel_crtc *,
  336. struct intel_crtc_config *);
  337. int (*crtc_mode_set)(struct drm_crtc *crtc,
  338. int x, int y,
  339. struct drm_framebuffer *old_fb);
  340. void (*crtc_enable)(struct drm_crtc *crtc);
  341. void (*crtc_disable)(struct drm_crtc *crtc);
  342. void (*off)(struct drm_crtc *crtc);
  343. void (*write_eld)(struct drm_connector *connector,
  344. struct drm_crtc *crtc);
  345. void (*fdi_link_train)(struct drm_crtc *crtc);
  346. void (*init_clock_gating)(struct drm_device *dev);
  347. int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
  348. struct drm_framebuffer *fb,
  349. struct drm_i915_gem_object *obj,
  350. uint32_t flags);
  351. int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  352. int x, int y);
  353. void (*hpd_irq_setup)(struct drm_device *dev);
  354. /* clock updates for mode set */
  355. /* cursor updates */
  356. /* render clock increase/decrease */
  357. /* display clock increase/decrease */
  358. /* pll clock increase/decrease */
  359. };
  360. struct intel_uncore_funcs {
  361. void (*force_wake_get)(struct drm_i915_private *dev_priv);
  362. void (*force_wake_put)(struct drm_i915_private *dev_priv);
  363. uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  364. uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  365. uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  366. uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  367. void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
  368. uint8_t val, bool trace);
  369. void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
  370. uint16_t val, bool trace);
  371. void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
  372. uint32_t val, bool trace);
  373. void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
  374. uint64_t val, bool trace);
  375. };
  376. struct intel_uncore {
  377. spinlock_t lock; /** lock is also taken in irq contexts. */
  378. struct intel_uncore_funcs funcs;
  379. unsigned fifo_count;
  380. unsigned forcewake_count;
  381. struct delayed_work force_wake_work;
  382. };
  383. #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
  384. func(is_mobile) sep \
  385. func(is_i85x) sep \
  386. func(is_i915g) sep \
  387. func(is_i945gm) sep \
  388. func(is_g33) sep \
  389. func(need_gfx_hws) sep \
  390. func(is_g4x) sep \
  391. func(is_pineview) sep \
  392. func(is_broadwater) sep \
  393. func(is_crestline) sep \
  394. func(is_ivybridge) sep \
  395. func(is_valleyview) sep \
  396. func(is_haswell) sep \
  397. func(is_preliminary) sep \
  398. func(has_fbc) sep \
  399. func(has_pipe_cxsr) sep \
  400. func(has_hotplug) sep \
  401. func(cursor_needs_physical) sep \
  402. func(has_overlay) sep \
  403. func(overlay_needs_physical) sep \
  404. func(supports_tv) sep \
  405. func(has_llc) sep \
  406. func(has_ddi) sep \
  407. func(has_fpga_dbg)
  408. #define DEFINE_FLAG(name) u8 name:1
  409. #define SEP_SEMICOLON ;
  410. struct intel_device_info {
  411. u32 display_mmio_offset;
  412. u8 num_pipes:3;
  413. u8 gen;
  414. u8 ring_mask; /* Rings supported by the HW */
  415. DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
  416. };
  417. #undef DEFINE_FLAG
  418. #undef SEP_SEMICOLON
  419. enum i915_cache_level {
  420. I915_CACHE_NONE = 0,
  421. I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
  422. I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
  423. caches, eg sampler/render caches, and the
  424. large Last-Level-Cache. LLC is coherent with
  425. the CPU, but L3 is only visible to the GPU. */
  426. I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
  427. };
  428. typedef uint32_t gen6_gtt_pte_t;
  429. struct i915_address_space {
  430. struct drm_mm mm;
  431. struct drm_device *dev;
  432. struct list_head global_link;
  433. unsigned long start; /* Start offset always 0 for dri2 */
  434. size_t total; /* size addr space maps (ex. 2GB for ggtt) */
  435. struct {
  436. dma_addr_t addr;
  437. struct page *page;
  438. } scratch;
  439. /**
  440. * List of objects currently involved in rendering.
  441. *
  442. * Includes buffers having the contents of their GPU caches
  443. * flushed, not necessarily primitives. last_rendering_seqno
  444. * represents when the rendering involved will be completed.
  445. *
  446. * A reference is held on the buffer while on this list.
  447. */
  448. struct list_head active_list;
  449. /**
  450. * LRU list of objects which are not in the ringbuffer and
  451. * are ready to unbind, but are still in the GTT.
  452. *
  453. * last_rendering_seqno is 0 while an object is in this list.
  454. *
  455. * A reference is not held on the buffer while on this list,
  456. * as merely being GTT-bound shouldn't prevent its being
  457. * freed, and we'll pull it off the list in the free path.
  458. */
  459. struct list_head inactive_list;
  460. /* FIXME: Need a more generic return type */
  461. gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
  462. enum i915_cache_level level);
  463. void (*clear_range)(struct i915_address_space *vm,
  464. unsigned int first_entry,
  465. unsigned int num_entries);
  466. void (*insert_entries)(struct i915_address_space *vm,
  467. struct sg_table *st,
  468. unsigned int first_entry,
  469. enum i915_cache_level cache_level);
  470. void (*cleanup)(struct i915_address_space *vm);
  471. };
  472. /* The Graphics Translation Table is the way in which GEN hardware translates a
  473. * Graphics Virtual Address into a Physical Address. In addition to the normal
  474. * collateral associated with any va->pa translations GEN hardware also has a
  475. * portion of the GTT which can be mapped by the CPU and remain both coherent
  476. * and correct (in cases like swizzling). That region is referred to as GMADR in
  477. * the spec.
  478. */
  479. struct i915_gtt {
  480. struct i915_address_space base;
  481. size_t stolen_size; /* Total size of stolen memory */
  482. unsigned long mappable_end; /* End offset that we can CPU map */
  483. struct io_mapping *mappable; /* Mapping to our CPU mappable region */
  484. phys_addr_t mappable_base; /* PA of our GMADR */
  485. /** "Graphics Stolen Memory" holds the global PTEs */
  486. void __iomem *gsm;
  487. bool do_idle_maps;
  488. int mtrr;
  489. /* global gtt ops */
  490. int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
  491. size_t *stolen, phys_addr_t *mappable_base,
  492. unsigned long *mappable_end);
  493. };
  494. #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
  495. struct i915_hw_ppgtt {
  496. struct i915_address_space base;
  497. unsigned num_pd_entries;
  498. struct page **pt_pages;
  499. uint32_t pd_offset;
  500. dma_addr_t *pt_dma_addr;
  501. int (*enable)(struct drm_device *dev);
  502. };
  503. /**
  504. * A VMA represents a GEM BO that is bound into an address space. Therefore, a
  505. * VMA's presence cannot be guaranteed before binding, or after unbinding the
  506. * object into/from the address space.
  507. *
  508. * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
  509. * will always be <= an objects lifetime. So object refcounting should cover us.
  510. */
  511. struct i915_vma {
  512. struct drm_mm_node node;
  513. struct drm_i915_gem_object *obj;
  514. struct i915_address_space *vm;
  515. /** This object's place on the active/inactive lists */
  516. struct list_head mm_list;
  517. struct list_head vma_link; /* Link in the object's VMA list */
  518. /** This vma's place in the batchbuffer or on the eviction list */
  519. struct list_head exec_list;
  520. /**
  521. * Used for performing relocations during execbuffer insertion.
  522. */
  523. struct hlist_node exec_node;
  524. unsigned long exec_handle;
  525. struct drm_i915_gem_exec_object2 *exec_entry;
  526. };
  527. struct i915_ctx_hang_stats {
  528. /* This context had batch pending when hang was declared */
  529. unsigned batch_pending;
  530. /* This context had batch active when hang was declared */
  531. unsigned batch_active;
  532. /* Time when this context was last blamed for a GPU reset */
  533. unsigned long guilty_ts;
  534. /* This context is banned to submit more work */
  535. bool banned;
  536. };
  537. /* This must match up with the value previously used for execbuf2.rsvd1. */
  538. #define DEFAULT_CONTEXT_ID 0
  539. struct i915_hw_context {
  540. struct kref ref;
  541. int id;
  542. bool is_initialized;
  543. uint8_t remap_slice;
  544. struct drm_i915_file_private *file_priv;
  545. struct intel_ring_buffer *ring;
  546. struct drm_i915_gem_object *obj;
  547. struct i915_ctx_hang_stats hang_stats;
  548. struct list_head link;
  549. };
  550. struct i915_fbc {
  551. unsigned long size;
  552. unsigned int fb_id;
  553. enum plane plane;
  554. int y;
  555. struct drm_mm_node *compressed_fb;
  556. struct drm_mm_node *compressed_llb;
  557. struct intel_fbc_work {
  558. struct delayed_work work;
  559. struct drm_crtc *crtc;
  560. struct drm_framebuffer *fb;
  561. int interval;
  562. } *fbc_work;
  563. enum no_fbc_reason {
  564. FBC_OK, /* FBC is enabled */
  565. FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
  566. FBC_NO_OUTPUT, /* no outputs enabled to compress */
  567. FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
  568. FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
  569. FBC_MODE_TOO_LARGE, /* mode too large for compression */
  570. FBC_BAD_PLANE, /* fbc not supported on plane */
  571. FBC_NOT_TILED, /* buffer not tiled */
  572. FBC_MULTIPLE_PIPES, /* more than one pipe active */
  573. FBC_MODULE_PARAM,
  574. FBC_CHIP_DEFAULT, /* disabled by default on this chip */
  575. } no_fbc_reason;
  576. };
  577. struct i915_psr {
  578. bool sink_support;
  579. bool source_ok;
  580. };
  581. enum intel_pch {
  582. PCH_NONE = 0, /* No PCH present */
  583. PCH_IBX, /* Ibexpeak PCH */
  584. PCH_CPT, /* Cougarpoint PCH */
  585. PCH_LPT, /* Lynxpoint PCH */
  586. PCH_NOP,
  587. };
  588. enum intel_sbi_destination {
  589. SBI_ICLK,
  590. SBI_MPHY,
  591. };
  592. #define QUIRK_PIPEA_FORCE (1<<0)
  593. #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  594. #define QUIRK_INVERT_BRIGHTNESS (1<<2)
  595. #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
  596. struct intel_fbdev;
  597. struct intel_fbc_work;
  598. struct intel_gmbus {
  599. struct i2c_adapter adapter;
  600. u32 force_bit;
  601. u32 reg0;
  602. u32 gpio_reg;
  603. struct i2c_algo_bit_data bit_algo;
  604. struct drm_i915_private *dev_priv;
  605. };
  606. struct i915_suspend_saved_registers {
  607. u8 saveLBB;
  608. u32 saveDSPACNTR;
  609. u32 saveDSPBCNTR;
  610. u32 saveDSPARB;
  611. u32 savePIPEACONF;
  612. u32 savePIPEBCONF;
  613. u32 savePIPEASRC;
  614. u32 savePIPEBSRC;
  615. u32 saveFPA0;
  616. u32 saveFPA1;
  617. u32 saveDPLL_A;
  618. u32 saveDPLL_A_MD;
  619. u32 saveHTOTAL_A;
  620. u32 saveHBLANK_A;
  621. u32 saveHSYNC_A;
  622. u32 saveVTOTAL_A;
  623. u32 saveVBLANK_A;
  624. u32 saveVSYNC_A;
  625. u32 saveBCLRPAT_A;
  626. u32 saveTRANSACONF;
  627. u32 saveTRANS_HTOTAL_A;
  628. u32 saveTRANS_HBLANK_A;
  629. u32 saveTRANS_HSYNC_A;
  630. u32 saveTRANS_VTOTAL_A;
  631. u32 saveTRANS_VBLANK_A;
  632. u32 saveTRANS_VSYNC_A;
  633. u32 savePIPEASTAT;
  634. u32 saveDSPASTRIDE;
  635. u32 saveDSPASIZE;
  636. u32 saveDSPAPOS;
  637. u32 saveDSPAADDR;
  638. u32 saveDSPASURF;
  639. u32 saveDSPATILEOFF;
  640. u32 savePFIT_PGM_RATIOS;
  641. u32 saveBLC_HIST_CTL;
  642. u32 saveBLC_PWM_CTL;
  643. u32 saveBLC_PWM_CTL2;
  644. u32 saveBLC_CPU_PWM_CTL;
  645. u32 saveBLC_CPU_PWM_CTL2;
  646. u32 saveFPB0;
  647. u32 saveFPB1;
  648. u32 saveDPLL_B;
  649. u32 saveDPLL_B_MD;
  650. u32 saveHTOTAL_B;
  651. u32 saveHBLANK_B;
  652. u32 saveHSYNC_B;
  653. u32 saveVTOTAL_B;
  654. u32 saveVBLANK_B;
  655. u32 saveVSYNC_B;
  656. u32 saveBCLRPAT_B;
  657. u32 saveTRANSBCONF;
  658. u32 saveTRANS_HTOTAL_B;
  659. u32 saveTRANS_HBLANK_B;
  660. u32 saveTRANS_HSYNC_B;
  661. u32 saveTRANS_VTOTAL_B;
  662. u32 saveTRANS_VBLANK_B;
  663. u32 saveTRANS_VSYNC_B;
  664. u32 savePIPEBSTAT;
  665. u32 saveDSPBSTRIDE;
  666. u32 saveDSPBSIZE;
  667. u32 saveDSPBPOS;
  668. u32 saveDSPBADDR;
  669. u32 saveDSPBSURF;
  670. u32 saveDSPBTILEOFF;
  671. u32 saveVGA0;
  672. u32 saveVGA1;
  673. u32 saveVGA_PD;
  674. u32 saveVGACNTRL;
  675. u32 saveADPA;
  676. u32 saveLVDS;
  677. u32 savePP_ON_DELAYS;
  678. u32 savePP_OFF_DELAYS;
  679. u32 saveDVOA;
  680. u32 saveDVOB;
  681. u32 saveDVOC;
  682. u32 savePP_ON;
  683. u32 savePP_OFF;
  684. u32 savePP_CONTROL;
  685. u32 savePP_DIVISOR;
  686. u32 savePFIT_CONTROL;
  687. u32 save_palette_a[256];
  688. u32 save_palette_b[256];
  689. u32 saveDPFC_CB_BASE;
  690. u32 saveFBC_CFB_BASE;
  691. u32 saveFBC_LL_BASE;
  692. u32 saveFBC_CONTROL;
  693. u32 saveFBC_CONTROL2;
  694. u32 saveIER;
  695. u32 saveIIR;
  696. u32 saveIMR;
  697. u32 saveDEIER;
  698. u32 saveDEIMR;
  699. u32 saveGTIER;
  700. u32 saveGTIMR;
  701. u32 saveFDI_RXA_IMR;
  702. u32 saveFDI_RXB_IMR;
  703. u32 saveCACHE_MODE_0;
  704. u32 saveMI_ARB_STATE;
  705. u32 saveSWF0[16];
  706. u32 saveSWF1[16];
  707. u32 saveSWF2[3];
  708. u8 saveMSR;
  709. u8 saveSR[8];
  710. u8 saveGR[25];
  711. u8 saveAR_INDEX;
  712. u8 saveAR[21];
  713. u8 saveDACMASK;
  714. u8 saveCR[37];
  715. uint64_t saveFENCE[I915_MAX_NUM_FENCES];
  716. u32 saveCURACNTR;
  717. u32 saveCURAPOS;
  718. u32 saveCURABASE;
  719. u32 saveCURBCNTR;
  720. u32 saveCURBPOS;
  721. u32 saveCURBBASE;
  722. u32 saveCURSIZE;
  723. u32 saveDP_B;
  724. u32 saveDP_C;
  725. u32 saveDP_D;
  726. u32 savePIPEA_GMCH_DATA_M;
  727. u32 savePIPEB_GMCH_DATA_M;
  728. u32 savePIPEA_GMCH_DATA_N;
  729. u32 savePIPEB_GMCH_DATA_N;
  730. u32 savePIPEA_DP_LINK_M;
  731. u32 savePIPEB_DP_LINK_M;
  732. u32 savePIPEA_DP_LINK_N;
  733. u32 savePIPEB_DP_LINK_N;
  734. u32 saveFDI_RXA_CTL;
  735. u32 saveFDI_TXA_CTL;
  736. u32 saveFDI_RXB_CTL;
  737. u32 saveFDI_TXB_CTL;
  738. u32 savePFA_CTL_1;
  739. u32 savePFB_CTL_1;
  740. u32 savePFA_WIN_SZ;
  741. u32 savePFB_WIN_SZ;
  742. u32 savePFA_WIN_POS;
  743. u32 savePFB_WIN_POS;
  744. u32 savePCH_DREF_CONTROL;
  745. u32 saveDISP_ARB_CTL;
  746. u32 savePIPEA_DATA_M1;
  747. u32 savePIPEA_DATA_N1;
  748. u32 savePIPEA_LINK_M1;
  749. u32 savePIPEA_LINK_N1;
  750. u32 savePIPEB_DATA_M1;
  751. u32 savePIPEB_DATA_N1;
  752. u32 savePIPEB_LINK_M1;
  753. u32 savePIPEB_LINK_N1;
  754. u32 saveMCHBAR_RENDER_STANDBY;
  755. u32 savePCH_PORT_HOTPLUG;
  756. };
  757. struct intel_gen6_power_mgmt {
  758. /* work and pm_iir are protected by dev_priv->irq_lock */
  759. struct work_struct work;
  760. u32 pm_iir;
  761. /* The below variables an all the rps hw state are protected by
  762. * dev->struct mutext. */
  763. u8 cur_delay;
  764. u8 min_delay;
  765. u8 max_delay;
  766. u8 rpe_delay;
  767. u8 rp1_delay;
  768. u8 rp0_delay;
  769. u8 hw_max;
  770. int last_adj;
  771. enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
  772. bool enabled;
  773. struct delayed_work delayed_resume_work;
  774. /*
  775. * Protects RPS/RC6 register access and PCU communication.
  776. * Must be taken after struct_mutex if nested.
  777. */
  778. struct mutex hw_lock;
  779. };
  780. /* defined intel_pm.c */
  781. extern spinlock_t mchdev_lock;
  782. struct intel_ilk_power_mgmt {
  783. u8 cur_delay;
  784. u8 min_delay;
  785. u8 max_delay;
  786. u8 fmax;
  787. u8 fstart;
  788. u64 last_count1;
  789. unsigned long last_time1;
  790. unsigned long chipset_power;
  791. u64 last_count2;
  792. struct timespec last_time2;
  793. unsigned long gfx_power;
  794. u8 corr;
  795. int c_m;
  796. int r_t;
  797. struct drm_i915_gem_object *pwrctx;
  798. struct drm_i915_gem_object *renderctx;
  799. };
  800. /* Power well structure for haswell */
  801. struct i915_power_well {
  802. struct drm_device *device;
  803. spinlock_t lock;
  804. /* power well enable/disable usage count */
  805. int count;
  806. int i915_request;
  807. };
  808. struct i915_dri1_state {
  809. unsigned allow_batchbuffer : 1;
  810. u32 __iomem *gfx_hws_cpu_addr;
  811. unsigned int cpp;
  812. int back_offset;
  813. int front_offset;
  814. int current_page;
  815. int page_flipping;
  816. uint32_t counter;
  817. };
  818. struct i915_ums_state {
  819. /**
  820. * Flag if the X Server, and thus DRM, is not currently in
  821. * control of the device.
  822. *
  823. * This is set between LeaveVT and EnterVT. It needs to be
  824. * replaced with a semaphore. It also needs to be
  825. * transitioned away from for kernel modesetting.
  826. */
  827. int mm_suspended;
  828. };
  829. #define MAX_L3_SLICES 2
  830. struct intel_l3_parity {
  831. u32 *remap_info[MAX_L3_SLICES];
  832. struct work_struct error_work;
  833. int which_slice;
  834. };
  835. struct i915_gem_mm {
  836. /** Memory allocator for GTT stolen memory */
  837. struct drm_mm stolen;
  838. /** List of all objects in gtt_space. Used to restore gtt
  839. * mappings on resume */
  840. struct list_head bound_list;
  841. /**
  842. * List of objects which are not bound to the GTT (thus
  843. * are idle and not used by the GPU) but still have
  844. * (presumably uncached) pages still attached.
  845. */
  846. struct list_head unbound_list;
  847. /** Usable portion of the GTT for GEM */
  848. unsigned long stolen_base; /* limited to low memory (32-bit) */
  849. /** PPGTT used for aliasing the PPGTT with the GTT */
  850. struct i915_hw_ppgtt *aliasing_ppgtt;
  851. struct shrinker inactive_shrinker;
  852. bool shrinker_no_lock_stealing;
  853. /** LRU list of objects with fence regs on them. */
  854. struct list_head fence_list;
  855. /**
  856. * We leave the user IRQ off as much as possible,
  857. * but this means that requests will finish and never
  858. * be retired once the system goes idle. Set a timer to
  859. * fire periodically while the ring is running. When it
  860. * fires, go retire requests.
  861. */
  862. struct delayed_work retire_work;
  863. /**
  864. * When we detect an idle GPU, we want to turn on
  865. * powersaving features. So once we see that there
  866. * are no more requests outstanding and no more
  867. * arrive within a small period of time, we fire
  868. * off the idle_work.
  869. */
  870. struct delayed_work idle_work;
  871. /**
  872. * Are we in a non-interruptible section of code like
  873. * modesetting?
  874. */
  875. bool interruptible;
  876. /** Bit 6 swizzling required for X tiling */
  877. uint32_t bit_6_swizzle_x;
  878. /** Bit 6 swizzling required for Y tiling */
  879. uint32_t bit_6_swizzle_y;
  880. /* storage for physical objects */
  881. struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
  882. /* accounting, useful for userland debugging */
  883. spinlock_t object_stat_lock;
  884. size_t object_memory;
  885. u32 object_count;
  886. };
  887. struct drm_i915_error_state_buf {
  888. unsigned bytes;
  889. unsigned size;
  890. int err;
  891. u8 *buf;
  892. loff_t start;
  893. loff_t pos;
  894. };
  895. struct i915_error_state_file_priv {
  896. struct drm_device *dev;
  897. struct drm_i915_error_state *error;
  898. };
  899. struct i915_gpu_error {
  900. /* For hangcheck timer */
  901. #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
  902. #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
  903. /* Hang gpu twice in this window and your context gets banned */
  904. #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
  905. struct timer_list hangcheck_timer;
  906. /* For reset and error_state handling. */
  907. spinlock_t lock;
  908. /* Protected by the above dev->gpu_error.lock. */
  909. struct drm_i915_error_state *first_error;
  910. struct work_struct work;
  911. unsigned long missed_irq_rings;
  912. /**
  913. * State variable and reset counter controlling the reset flow
  914. *
  915. * Upper bits are for the reset counter. This counter is used by the
  916. * wait_seqno code to race-free noticed that a reset event happened and
  917. * that it needs to restart the entire ioctl (since most likely the
  918. * seqno it waited for won't ever signal anytime soon).
  919. *
  920. * This is important for lock-free wait paths, where no contended lock
  921. * naturally enforces the correct ordering between the bail-out of the
  922. * waiter and the gpu reset work code.
  923. *
  924. * Lowest bit controls the reset state machine: Set means a reset is in
  925. * progress. This state will (presuming we don't have any bugs) decay
  926. * into either unset (successful reset) or the special WEDGED value (hw
  927. * terminally sour). All waiters on the reset_queue will be woken when
  928. * that happens.
  929. */
  930. atomic_t reset_counter;
  931. /**
  932. * Special values/flags for reset_counter
  933. *
  934. * Note that the code relies on
  935. * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
  936. * being true.
  937. */
  938. #define I915_RESET_IN_PROGRESS_FLAG 1
  939. #define I915_WEDGED 0xffffffff
  940. /**
  941. * Waitqueue to signal when the reset has completed. Used by clients
  942. * that wait for dev_priv->mm.wedged to settle.
  943. */
  944. wait_queue_head_t reset_queue;
  945. /* For gpu hang simulation. */
  946. unsigned int stop_rings;
  947. /* For missed irq/seqno simulation. */
  948. unsigned int test_irq_rings;
  949. };
  950. enum modeset_restore {
  951. MODESET_ON_LID_OPEN,
  952. MODESET_DONE,
  953. MODESET_SUSPENDED,
  954. };
  955. struct ddi_vbt_port_info {
  956. uint8_t hdmi_level_shift;
  957. uint8_t supports_dvi:1;
  958. uint8_t supports_hdmi:1;
  959. uint8_t supports_dp:1;
  960. };
  961. struct intel_vbt_data {
  962. struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
  963. struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
  964. /* Feature bits */
  965. unsigned int int_tv_support:1;
  966. unsigned int lvds_dither:1;
  967. unsigned int lvds_vbt:1;
  968. unsigned int int_crt_support:1;
  969. unsigned int lvds_use_ssc:1;
  970. unsigned int display_clock_mode:1;
  971. unsigned int fdi_rx_polarity_inverted:1;
  972. int lvds_ssc_freq;
  973. unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
  974. /* eDP */
  975. int edp_rate;
  976. int edp_lanes;
  977. int edp_preemphasis;
  978. int edp_vswing;
  979. bool edp_initialized;
  980. bool edp_support;
  981. int edp_bpp;
  982. struct edp_power_seq edp_pps;
  983. /* MIPI DSI */
  984. struct {
  985. u16 panel_id;
  986. } dsi;
  987. int crt_ddc_pin;
  988. int child_dev_num;
  989. union child_device_config *child_dev;
  990. struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
  991. };
  992. enum intel_ddb_partitioning {
  993. INTEL_DDB_PART_1_2,
  994. INTEL_DDB_PART_5_6, /* IVB+ */
  995. };
  996. struct intel_wm_level {
  997. bool enable;
  998. uint32_t pri_val;
  999. uint32_t spr_val;
  1000. uint32_t cur_val;
  1001. uint32_t fbc_val;
  1002. };
  1003. struct hsw_wm_values {
  1004. uint32_t wm_pipe[3];
  1005. uint32_t wm_lp[3];
  1006. uint32_t wm_lp_spr[3];
  1007. uint32_t wm_linetime[3];
  1008. bool enable_fbc_wm;
  1009. enum intel_ddb_partitioning partitioning;
  1010. };
  1011. /*
  1012. * This struct tracks the state needed for the Package C8+ feature.
  1013. *
  1014. * Package states C8 and deeper are really deep PC states that can only be
  1015. * reached when all the devices on the system allow it, so even if the graphics
  1016. * device allows PC8+, it doesn't mean the system will actually get to these
  1017. * states.
  1018. *
  1019. * Our driver only allows PC8+ when all the outputs are disabled, the power well
  1020. * is disabled and the GPU is idle. When these conditions are met, we manually
  1021. * do the other conditions: disable the interrupts, clocks and switch LCPLL
  1022. * refclk to Fclk.
  1023. *
  1024. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  1025. * the state of some registers, so when we come back from PC8+ we need to
  1026. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  1027. * need to take care of the registers kept by RC6.
  1028. *
  1029. * The interrupt disabling is part of the requirements. We can only leave the
  1030. * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
  1031. * can lock the machine.
  1032. *
  1033. * Ideally every piece of our code that needs PC8+ disabled would call
  1034. * hsw_disable_package_c8, which would increment disable_count and prevent the
  1035. * system from reaching PC8+. But we don't have a symmetric way to do this for
  1036. * everything, so we have the requirements_met and gpu_idle variables. When we
  1037. * switch requirements_met or gpu_idle to true we decrease disable_count, and
  1038. * increase it in the opposite case. The requirements_met variable is true when
  1039. * all the CRTCs, encoders and the power well are disabled. The gpu_idle
  1040. * variable is true when the GPU is idle.
  1041. *
  1042. * In addition to everything, we only actually enable PC8+ if disable_count
  1043. * stays at zero for at least some seconds. This is implemented with the
  1044. * enable_work variable. We do this so we don't enable/disable PC8 dozens of
  1045. * consecutive times when all screens are disabled and some background app
  1046. * queries the state of our connectors, or we have some application constantly
  1047. * waking up to use the GPU. Only after the enable_work function actually
  1048. * enables PC8+ the "enable" variable will become true, which means that it can
  1049. * be false even if disable_count is 0.
  1050. *
  1051. * The irqs_disabled variable becomes true exactly after we disable the IRQs and
  1052. * goes back to false exactly before we reenable the IRQs. We use this variable
  1053. * to check if someone is trying to enable/disable IRQs while they're supposed
  1054. * to be disabled. This shouldn't happen and we'll print some error messages in
  1055. * case it happens, but if it actually happens we'll also update the variables
  1056. * inside struct regsave so when we restore the IRQs they will contain the
  1057. * latest expected values.
  1058. *
  1059. * For more, read "Display Sequences for Package C8" on our documentation.
  1060. */
  1061. struct i915_package_c8 {
  1062. bool requirements_met;
  1063. bool gpu_idle;
  1064. bool irqs_disabled;
  1065. /* Only true after the delayed work task actually enables it. */
  1066. bool enabled;
  1067. int disable_count;
  1068. struct mutex lock;
  1069. struct delayed_work enable_work;
  1070. struct {
  1071. uint32_t deimr;
  1072. uint32_t sdeimr;
  1073. uint32_t gtimr;
  1074. uint32_t gtier;
  1075. uint32_t gen6_pmimr;
  1076. } regsave;
  1077. };
  1078. enum intel_pipe_crc_source {
  1079. INTEL_PIPE_CRC_SOURCE_NONE,
  1080. INTEL_PIPE_CRC_SOURCE_PLANE1,
  1081. INTEL_PIPE_CRC_SOURCE_PLANE2,
  1082. INTEL_PIPE_CRC_SOURCE_PF,
  1083. INTEL_PIPE_CRC_SOURCE_MAX,
  1084. };
  1085. struct intel_pipe_crc_entry {
  1086. uint32_t frame;
  1087. uint32_t crc[5];
  1088. };
  1089. #define INTEL_PIPE_CRC_ENTRIES_NR 128
  1090. struct intel_pipe_crc {
  1091. atomic_t available; /* exclusive access to the device */
  1092. struct intel_pipe_crc_entry *entries;
  1093. enum intel_pipe_crc_source source;
  1094. atomic_t head, tail;
  1095. wait_queue_head_t wq;
  1096. };
  1097. typedef struct drm_i915_private {
  1098. struct drm_device *dev;
  1099. struct kmem_cache *slab;
  1100. const struct intel_device_info *info;
  1101. int relative_constants_mode;
  1102. void __iomem *regs;
  1103. struct intel_uncore uncore;
  1104. struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
  1105. /** gmbus_mutex protects against concurrent usage of the single hw gmbus
  1106. * controller on different i2c buses. */
  1107. struct mutex gmbus_mutex;
  1108. /**
  1109. * Base address of the gmbus and gpio block.
  1110. */
  1111. uint32_t gpio_mmio_base;
  1112. wait_queue_head_t gmbus_wait_queue;
  1113. struct pci_dev *bridge_dev;
  1114. struct intel_ring_buffer ring[I915_NUM_RINGS];
  1115. uint32_t last_seqno, next_seqno;
  1116. drm_dma_handle_t *status_page_dmah;
  1117. struct resource mch_res;
  1118. atomic_t irq_received;
  1119. /* protects the irq masks */
  1120. spinlock_t irq_lock;
  1121. /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
  1122. struct pm_qos_request pm_qos;
  1123. /* DPIO indirect register protection */
  1124. struct mutex dpio_lock;
  1125. /** Cached value of IMR to avoid reads in updating the bitfield */
  1126. u32 irq_mask;
  1127. u32 gt_irq_mask;
  1128. u32 pm_irq_mask;
  1129. struct work_struct hotplug_work;
  1130. bool enable_hotplug_processing;
  1131. struct {
  1132. unsigned long hpd_last_jiffies;
  1133. int hpd_cnt;
  1134. enum {
  1135. HPD_ENABLED = 0,
  1136. HPD_DISABLED = 1,
  1137. HPD_MARK_DISABLED = 2
  1138. } hpd_mark;
  1139. } hpd_stats[HPD_NUM_PINS];
  1140. u32 hpd_event_bits;
  1141. struct timer_list hotplug_reenable_timer;
  1142. int num_plane;
  1143. struct i915_fbc fbc;
  1144. struct intel_opregion opregion;
  1145. struct intel_vbt_data vbt;
  1146. /* overlay */
  1147. struct intel_overlay *overlay;
  1148. unsigned int sprite_scaling_enabled;
  1149. /* backlight */
  1150. struct {
  1151. int level;
  1152. bool enabled;
  1153. spinlock_t lock; /* bl registers and the above bl fields */
  1154. struct backlight_device *device;
  1155. } backlight;
  1156. /* LVDS info */
  1157. bool no_aux_handshake;
  1158. struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
  1159. int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
  1160. int num_fence_regs; /* 8 on pre-965, 16 otherwise */
  1161. unsigned int fsb_freq, mem_freq, is_ddr3;
  1162. /**
  1163. * wq - Driver workqueue for GEM.
  1164. *
  1165. * NOTE: Work items scheduled here are not allowed to grab any modeset
  1166. * locks, for otherwise the flushing done in the pageflip code will
  1167. * result in deadlocks.
  1168. */
  1169. struct workqueue_struct *wq;
  1170. /* Display functions */
  1171. struct drm_i915_display_funcs display;
  1172. /* PCH chipset type */
  1173. enum intel_pch pch_type;
  1174. unsigned short pch_id;
  1175. unsigned long quirks;
  1176. enum modeset_restore modeset_restore;
  1177. struct mutex modeset_restore_lock;
  1178. struct list_head vm_list; /* Global list of all address spaces */
  1179. struct i915_gtt gtt; /* VMA representing the global address space */
  1180. struct i915_gem_mm mm;
  1181. /* Kernel Modesetting */
  1182. struct sdvo_device_mapping sdvo_mappings[2];
  1183. struct drm_crtc *plane_to_crtc_mapping[3];
  1184. struct drm_crtc *pipe_to_crtc_mapping[3];
  1185. wait_queue_head_t pending_flip_queue;
  1186. int num_shared_dpll;
  1187. struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
  1188. struct intel_ddi_plls ddi_plls;
  1189. /* Reclocking support */
  1190. bool render_reclock_avail;
  1191. bool lvds_downclock_avail;
  1192. /* indicates the reduced downclock for LVDS*/
  1193. int lvds_downclock;
  1194. u16 orig_clock;
  1195. bool mchbar_need_disable;
  1196. struct intel_l3_parity l3_parity;
  1197. /* Cannot be determined by PCIID. You must always read a register. */
  1198. size_t ellc_size;
  1199. /* gen6+ rps state */
  1200. struct intel_gen6_power_mgmt rps;
  1201. /* ilk-only ips/rps state. Everything in here is protected by the global
  1202. * mchdev_lock in intel_pm.c */
  1203. struct intel_ilk_power_mgmt ips;
  1204. /* Haswell power well */
  1205. struct i915_power_well power_well;
  1206. struct i915_psr psr;
  1207. struct i915_gpu_error gpu_error;
  1208. struct drm_i915_gem_object *vlv_pctx;
  1209. #ifdef CONFIG_DRM_I915_FBDEV
  1210. /* list of fbdev register on this device */
  1211. struct intel_fbdev *fbdev;
  1212. #endif
  1213. /*
  1214. * The console may be contended at resume, but we don't
  1215. * want it to block on it.
  1216. */
  1217. struct work_struct console_resume_work;
  1218. struct drm_property *broadcast_rgb_property;
  1219. struct drm_property *force_audio_property;
  1220. bool hw_contexts_disabled;
  1221. uint32_t hw_context_size;
  1222. struct list_head context_list;
  1223. u32 fdi_rx_config;
  1224. struct i915_suspend_saved_registers regfile;
  1225. struct {
  1226. /*
  1227. * Raw watermark latency values:
  1228. * in 0.1us units for WM0,
  1229. * in 0.5us units for WM1+.
  1230. */
  1231. /* primary */
  1232. uint16_t pri_latency[5];
  1233. /* sprite */
  1234. uint16_t spr_latency[5];
  1235. /* cursor */
  1236. uint16_t cur_latency[5];
  1237. /* current hardware state */
  1238. struct hsw_wm_values hw;
  1239. } wm;
  1240. struct i915_package_c8 pc8;
  1241. /* Old dri1 support infrastructure, beware the dragons ya fools entering
  1242. * here! */
  1243. struct i915_dri1_state dri1;
  1244. /* Old ums support infrastructure, same warning applies. */
  1245. struct i915_ums_state ums;
  1246. #ifdef CONFIG_DEBUG_FS
  1247. struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
  1248. #endif
  1249. } drm_i915_private_t;
  1250. static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
  1251. {
  1252. return dev->dev_private;
  1253. }
  1254. /* Iterate over initialised rings */
  1255. #define for_each_ring(ring__, dev_priv__, i__) \
  1256. for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
  1257. if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
  1258. enum hdmi_force_audio {
  1259. HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
  1260. HDMI_AUDIO_OFF, /* force turn off HDMI audio */
  1261. HDMI_AUDIO_AUTO, /* trust EDID */
  1262. HDMI_AUDIO_ON, /* force turn on HDMI audio */
  1263. };
  1264. #define I915_GTT_OFFSET_NONE ((u32)-1)
  1265. struct drm_i915_gem_object_ops {
  1266. /* Interface between the GEM object and its backing storage.
  1267. * get_pages() is called once prior to the use of the associated set
  1268. * of pages before to binding them into the GTT, and put_pages() is
  1269. * called after we no longer need them. As we expect there to be
  1270. * associated cost with migrating pages between the backing storage
  1271. * and making them available for the GPU (e.g. clflush), we may hold
  1272. * onto the pages after they are no longer referenced by the GPU
  1273. * in case they may be used again shortly (for example migrating the
  1274. * pages to a different memory domain within the GTT). put_pages()
  1275. * will therefore most likely be called when the object itself is
  1276. * being released or under memory pressure (where we attempt to
  1277. * reap pages for the shrinker).
  1278. */
  1279. int (*get_pages)(struct drm_i915_gem_object *);
  1280. void (*put_pages)(struct drm_i915_gem_object *);
  1281. };
  1282. struct drm_i915_gem_object {
  1283. struct drm_gem_object base;
  1284. const struct drm_i915_gem_object_ops *ops;
  1285. /** List of VMAs backed by this object */
  1286. struct list_head vma_list;
  1287. /** Stolen memory for this object, instead of being backed by shmem. */
  1288. struct drm_mm_node *stolen;
  1289. struct list_head global_list;
  1290. struct list_head ring_list;
  1291. /** Used in execbuf to temporarily hold a ref */
  1292. struct list_head obj_exec_link;
  1293. /**
  1294. * This is set if the object is on the active lists (has pending
  1295. * rendering and so a non-zero seqno), and is not set if it i s on
  1296. * inactive (ready to be unbound) list.
  1297. */
  1298. unsigned int active:1;
  1299. /**
  1300. * This is set if the object has been written to since last bound
  1301. * to the GTT
  1302. */
  1303. unsigned int dirty:1;
  1304. /**
  1305. * Fence register bits (if any) for this object. Will be set
  1306. * as needed when mapped into the GTT.
  1307. * Protected by dev->struct_mutex.
  1308. */
  1309. signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
  1310. /**
  1311. * Advice: are the backing pages purgeable?
  1312. */
  1313. unsigned int madv:2;
  1314. /**
  1315. * Current tiling mode for the object.
  1316. */
  1317. unsigned int tiling_mode:2;
  1318. /**
  1319. * Whether the tiling parameters for the currently associated fence
  1320. * register have changed. Note that for the purposes of tracking
  1321. * tiling changes we also treat the unfenced register, the register
  1322. * slot that the object occupies whilst it executes a fenced
  1323. * command (such as BLT on gen2/3), as a "fence".
  1324. */
  1325. unsigned int fence_dirty:1;
  1326. /** How many users have pinned this object in GTT space. The following
  1327. * users can each hold at most one reference: pwrite/pread, pin_ioctl
  1328. * (via user_pin_count), execbuffer (objects are not allowed multiple
  1329. * times for the same batchbuffer), and the framebuffer code. When
  1330. * switching/pageflipping, the framebuffer code has at most two buffers
  1331. * pinned per crtc.
  1332. *
  1333. * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
  1334. * bits with absolutely no headroom. So use 4 bits. */
  1335. unsigned int pin_count:4;
  1336. #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
  1337. /**
  1338. * Is the object at the current location in the gtt mappable and
  1339. * fenceable? Used to avoid costly recalculations.
  1340. */
  1341. unsigned int map_and_fenceable:1;
  1342. /**
  1343. * Whether the current gtt mapping needs to be mappable (and isn't just
  1344. * mappable by accident). Track pin and fault separate for a more
  1345. * accurate mappable working set.
  1346. */
  1347. unsigned int fault_mappable:1;
  1348. unsigned int pin_mappable:1;
  1349. unsigned int pin_display:1;
  1350. /*
  1351. * Is the GPU currently using a fence to access this buffer,
  1352. */
  1353. unsigned int pending_fenced_gpu_access:1;
  1354. unsigned int fenced_gpu_access:1;
  1355. unsigned int cache_level:3;
  1356. unsigned int has_aliasing_ppgtt_mapping:1;
  1357. unsigned int has_global_gtt_mapping:1;
  1358. unsigned int has_dma_mapping:1;
  1359. struct sg_table *pages;
  1360. int pages_pin_count;
  1361. /* prime dma-buf support */
  1362. void *dma_buf_vmapping;
  1363. int vmapping_count;
  1364. struct intel_ring_buffer *ring;
  1365. /** Breadcrumb of last rendering to the buffer. */
  1366. uint32_t last_read_seqno;
  1367. uint32_t last_write_seqno;
  1368. /** Breadcrumb of last fenced GPU access to the buffer. */
  1369. uint32_t last_fenced_seqno;
  1370. /** Current tiling stride for the object, if it's tiled. */
  1371. uint32_t stride;
  1372. /** References from framebuffers, locks out tiling changes. */
  1373. unsigned long framebuffer_references;
  1374. /** Record of address bit 17 of each page at last unbind. */
  1375. unsigned long *bit_17;
  1376. /** User space pin count and filp owning the pin */
  1377. uint32_t user_pin_count;
  1378. struct drm_file *pin_filp;
  1379. /** for phy allocated objects */
  1380. struct drm_i915_gem_phys_object *phys_obj;
  1381. };
  1382. #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
  1383. #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  1384. /**
  1385. * Request queue structure.
  1386. *
  1387. * The request queue allows us to note sequence numbers that have been emitted
  1388. * and may be associated with active buffers to be retired.
  1389. *
  1390. * By keeping this list, we can avoid having to do questionable
  1391. * sequence-number comparisons on buffer last_rendering_seqnos, and associate
  1392. * an emission time with seqnos for tracking how far ahead of the GPU we are.
  1393. */
  1394. struct drm_i915_gem_request {
  1395. /** On Which ring this request was generated */
  1396. struct intel_ring_buffer *ring;
  1397. /** GEM sequence number associated with this request. */
  1398. uint32_t seqno;
  1399. /** Position in the ringbuffer of the start of the request */
  1400. u32 head;
  1401. /** Position in the ringbuffer of the end of the request */
  1402. u32 tail;
  1403. /** Context related to this request */
  1404. struct i915_hw_context *ctx;
  1405. /** Batch buffer related to this request if any */
  1406. struct drm_i915_gem_object *batch_obj;
  1407. /** Time at which this request was emitted, in jiffies. */
  1408. unsigned long emitted_jiffies;
  1409. /** global list entry for this request */
  1410. struct list_head list;
  1411. struct drm_i915_file_private *file_priv;
  1412. /** file_priv list entry for this request */
  1413. struct list_head client_list;
  1414. };
  1415. struct drm_i915_file_private {
  1416. struct drm_i915_private *dev_priv;
  1417. struct {
  1418. spinlock_t lock;
  1419. struct list_head request_list;
  1420. struct delayed_work idle_work;
  1421. } mm;
  1422. struct idr context_idr;
  1423. struct i915_ctx_hang_stats hang_stats;
  1424. atomic_t rps_wait_boost;
  1425. };
  1426. #define INTEL_INFO(dev) (to_i915(dev)->info)
  1427. #define IS_I830(dev) ((dev)->pdev->device == 0x3577)
  1428. #define IS_845G(dev) ((dev)->pdev->device == 0x2562)
  1429. #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
  1430. #define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
  1431. #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
  1432. #define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
  1433. #define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
  1434. #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
  1435. #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
  1436. #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
  1437. #define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
  1438. #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
  1439. #define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
  1440. #define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
  1441. #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
  1442. #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
  1443. #define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
  1444. #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
  1445. #define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
  1446. (dev)->pdev->device == 0x0152 || \
  1447. (dev)->pdev->device == 0x015a)
  1448. #define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
  1449. (dev)->pdev->device == 0x0106 || \
  1450. (dev)->pdev->device == 0x010A)
  1451. #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
  1452. #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
  1453. #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
  1454. #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
  1455. ((dev)->pdev->device & 0xFF00) == 0x0C00)
  1456. #define IS_ULT(dev) (IS_HASWELL(dev) && \
  1457. ((dev)->pdev->device & 0xFF00) == 0x0A00)
  1458. #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
  1459. ((dev)->pdev->device & 0x00F0) == 0x0020)
  1460. #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
  1461. /*
  1462. * The genX designation typically refers to the render engine, so render
  1463. * capability related checks should use IS_GEN, while display and other checks
  1464. * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  1465. * chips, etc.).
  1466. */
  1467. #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
  1468. #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
  1469. #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
  1470. #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
  1471. #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
  1472. #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
  1473. #define RENDER_RING (1<<RCS)
  1474. #define BSD_RING (1<<VCS)
  1475. #define BLT_RING (1<<BCS)
  1476. #define VEBOX_RING (1<<VECS)
  1477. #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
  1478. #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
  1479. #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
  1480. #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
  1481. #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
  1482. #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
  1483. #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
  1484. #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
  1485. #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
  1486. #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
  1487. /* Early gen2 have a totally busted CS tlb and require pinned batches. */
  1488. #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
  1489. /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  1490. * rows, which changed the alignment requirements and fence programming.
  1491. */
  1492. #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
  1493. IS_I915GM(dev)))
  1494. #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
  1495. #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
  1496. #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
  1497. #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
  1498. #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
  1499. #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
  1500. #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
  1501. #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
  1502. #define HAS_IPS(dev) (IS_ULT(dev))
  1503. #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
  1504. #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
  1505. #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
  1506. #define HAS_PSR(dev) (IS_HASWELL(dev))
  1507. #define INTEL_PCH_DEVICE_ID_MASK 0xff00
  1508. #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
  1509. #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
  1510. #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
  1511. #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
  1512. #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
  1513. #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
  1514. #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
  1515. #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
  1516. #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
  1517. #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
  1518. #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
  1519. /* DPF == dynamic parity feature */
  1520. #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  1521. #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
  1522. #define GT_FREQUENCY_MULTIPLIER 50
  1523. #include "i915_trace.h"
  1524. /**
  1525. * RC6 is a special power stage which allows the GPU to enter an very
  1526. * low-voltage mode when idle, using down to 0V while at this stage. This
  1527. * stage is entered automatically when the GPU is idle when RC6 support is
  1528. * enabled, and as soon as new workload arises GPU wakes up automatically as well.
  1529. *
  1530. * There are different RC6 modes available in Intel GPU, which differentiate
  1531. * among each other with the latency required to enter and leave RC6 and
  1532. * voltage consumed by the GPU in different states.
  1533. *
  1534. * The combination of the following flags define which states GPU is allowed
  1535. * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
  1536. * RC6pp is deepest RC6. Their support by hardware varies according to the
  1537. * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
  1538. * which brings the most power savings; deeper states save more power, but
  1539. * require higher latency to switch to and wake up.
  1540. */
  1541. #define INTEL_RC6_ENABLE (1<<0)
  1542. #define INTEL_RC6p_ENABLE (1<<1)
  1543. #define INTEL_RC6pp_ENABLE (1<<2)
  1544. extern const struct drm_ioctl_desc i915_ioctls[];
  1545. extern int i915_max_ioctl;
  1546. extern unsigned int i915_fbpercrtc __always_unused;
  1547. extern int i915_panel_ignore_lid __read_mostly;
  1548. extern unsigned int i915_powersave __read_mostly;
  1549. extern int i915_semaphores __read_mostly;
  1550. extern unsigned int i915_lvds_downclock __read_mostly;
  1551. extern int i915_lvds_channel_mode __read_mostly;
  1552. extern int i915_panel_use_ssc __read_mostly;
  1553. extern int i915_vbt_sdvo_panel_type __read_mostly;
  1554. extern int i915_enable_rc6 __read_mostly;
  1555. extern int i915_enable_fbc __read_mostly;
  1556. extern bool i915_enable_hangcheck __read_mostly;
  1557. extern int i915_enable_ppgtt __read_mostly;
  1558. extern int i915_enable_psr __read_mostly;
  1559. extern unsigned int i915_preliminary_hw_support __read_mostly;
  1560. extern int i915_disable_power_well __read_mostly;
  1561. extern int i915_enable_ips __read_mostly;
  1562. extern bool i915_fastboot __read_mostly;
  1563. extern int i915_enable_pc8 __read_mostly;
  1564. extern int i915_pc8_timeout __read_mostly;
  1565. extern bool i915_prefault_disable __read_mostly;
  1566. extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  1567. extern int i915_resume(struct drm_device *dev);
  1568. extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
  1569. extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
  1570. /* i915_dma.c */
  1571. void i915_update_dri1_breadcrumb(struct drm_device *dev);
  1572. extern void i915_kernel_lost_context(struct drm_device * dev);
  1573. extern int i915_driver_load(struct drm_device *, unsigned long flags);
  1574. extern int i915_driver_unload(struct drm_device *);
  1575. extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
  1576. extern void i915_driver_lastclose(struct drm_device * dev);
  1577. extern void i915_driver_preclose(struct drm_device *dev,
  1578. struct drm_file *file_priv);
  1579. extern void i915_driver_postclose(struct drm_device *dev,
  1580. struct drm_file *file_priv);
  1581. extern int i915_driver_device_is_agp(struct drm_device * dev);
  1582. #ifdef CONFIG_COMPAT
  1583. extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
  1584. unsigned long arg);
  1585. #endif
  1586. extern int i915_emit_box(struct drm_device *dev,
  1587. struct drm_clip_rect *box,
  1588. int DR1, int DR4);
  1589. extern int intel_gpu_reset(struct drm_device *dev);
  1590. extern int i915_reset(struct drm_device *dev);
  1591. extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
  1592. extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
  1593. extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
  1594. extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
  1595. extern void intel_console_resume(struct work_struct *work);
  1596. /* i915_irq.c */
  1597. void i915_queue_hangcheck(struct drm_device *dev);
  1598. void i915_handle_error(struct drm_device *dev, bool wedged);
  1599. extern void intel_irq_init(struct drm_device *dev);
  1600. extern void intel_pm_init(struct drm_device *dev);
  1601. extern void intel_hpd_init(struct drm_device *dev);
  1602. extern void intel_pm_init(struct drm_device *dev);
  1603. extern void intel_uncore_sanitize(struct drm_device *dev);
  1604. extern void intel_uncore_early_sanitize(struct drm_device *dev);
  1605. extern void intel_uncore_init(struct drm_device *dev);
  1606. extern void intel_uncore_clear_errors(struct drm_device *dev);
  1607. extern void intel_uncore_check_errors(struct drm_device *dev);
  1608. extern void intel_uncore_fini(struct drm_device *dev);
  1609. void
  1610. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  1611. void
  1612. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  1613. /* i915_gem.c */
  1614. int i915_gem_init_ioctl(struct drm_device *dev, void *data,
  1615. struct drm_file *file_priv);
  1616. int i915_gem_create_ioctl(struct drm_device *dev, void *data,
  1617. struct drm_file *file_priv);
  1618. int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  1619. struct drm_file *file_priv);
  1620. int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  1621. struct drm_file *file_priv);
  1622. int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1623. struct drm_file *file_priv);
  1624. int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1625. struct drm_file *file_priv);
  1626. int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1627. struct drm_file *file_priv);
  1628. int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1629. struct drm_file *file_priv);
  1630. int i915_gem_execbuffer(struct drm_device *dev, void *data,
  1631. struct drm_file *file_priv);
  1632. int i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1633. struct drm_file *file_priv);
  1634. int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  1635. struct drm_file *file_priv);
  1636. int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  1637. struct drm_file *file_priv);
  1638. int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  1639. struct drm_file *file_priv);
  1640. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  1641. struct drm_file *file);
  1642. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  1643. struct drm_file *file);
  1644. int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  1645. struct drm_file *file_priv);
  1646. int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  1647. struct drm_file *file_priv);
  1648. int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  1649. struct drm_file *file_priv);
  1650. int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  1651. struct drm_file *file_priv);
  1652. int i915_gem_set_tiling(struct drm_device *dev, void *data,
  1653. struct drm_file *file_priv);
  1654. int i915_gem_get_tiling(struct drm_device *dev, void *data,
  1655. struct drm_file *file_priv);
  1656. int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  1657. struct drm_file *file_priv);
  1658. int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
  1659. struct drm_file *file_priv);
  1660. void i915_gem_load(struct drm_device *dev);
  1661. void *i915_gem_object_alloc(struct drm_device *dev);
  1662. void i915_gem_object_free(struct drm_i915_gem_object *obj);
  1663. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  1664. const struct drm_i915_gem_object_ops *ops);
  1665. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  1666. size_t size);
  1667. void i915_gem_free_object(struct drm_gem_object *obj);
  1668. void i915_gem_vma_destroy(struct i915_vma *vma);
  1669. int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
  1670. struct i915_address_space *vm,
  1671. uint32_t alignment,
  1672. bool map_and_fenceable,
  1673. bool nonblocking);
  1674. void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
  1675. int __must_check i915_vma_unbind(struct i915_vma *vma);
  1676. int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
  1677. int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  1678. void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
  1679. void i915_gem_lastclose(struct drm_device *dev);
  1680. int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
  1681. static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
  1682. {
  1683. struct sg_page_iter sg_iter;
  1684. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
  1685. return sg_page_iter_page(&sg_iter);
  1686. return NULL;
  1687. }
  1688. static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
  1689. {
  1690. BUG_ON(obj->pages == NULL);
  1691. obj->pages_pin_count++;
  1692. }
  1693. static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
  1694. {
  1695. BUG_ON(obj->pages_pin_count == 0);
  1696. obj->pages_pin_count--;
  1697. }
  1698. int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
  1699. int i915_gem_object_sync(struct drm_i915_gem_object *obj,
  1700. struct intel_ring_buffer *to);
  1701. void i915_vma_move_to_active(struct i915_vma *vma,
  1702. struct intel_ring_buffer *ring);
  1703. int i915_gem_dumb_create(struct drm_file *file_priv,
  1704. struct drm_device *dev,
  1705. struct drm_mode_create_dumb *args);
  1706. int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
  1707. uint32_t handle, uint64_t *offset);
  1708. /**
  1709. * Returns true if seq1 is later than seq2.
  1710. */
  1711. static inline bool
  1712. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1713. {
  1714. return (int32_t)(seq1 - seq2) >= 0;
  1715. }
  1716. int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
  1717. int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
  1718. int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
  1719. int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
  1720. static inline bool
  1721. i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
  1722. {
  1723. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1724. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1725. dev_priv->fence_regs[obj->fence_reg].pin_count++;
  1726. return true;
  1727. } else
  1728. return false;
  1729. }
  1730. static inline void
  1731. i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
  1732. {
  1733. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1734. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1735. WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
  1736. dev_priv->fence_regs[obj->fence_reg].pin_count--;
  1737. }
  1738. }
  1739. bool i915_gem_retire_requests(struct drm_device *dev);
  1740. void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
  1741. int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
  1742. bool interruptible);
  1743. static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  1744. {
  1745. return unlikely(atomic_read(&error->reset_counter)
  1746. & I915_RESET_IN_PROGRESS_FLAG);
  1747. }
  1748. static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
  1749. {
  1750. return atomic_read(&error->reset_counter) == I915_WEDGED;
  1751. }
  1752. void i915_gem_reset(struct drm_device *dev);
  1753. bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
  1754. int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
  1755. int __must_check i915_gem_init(struct drm_device *dev);
  1756. int __must_check i915_gem_init_hw(struct drm_device *dev);
  1757. int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
  1758. void i915_gem_init_swizzling(struct drm_device *dev);
  1759. void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  1760. int __must_check i915_gpu_idle(struct drm_device *dev);
  1761. int __must_check i915_gem_suspend(struct drm_device *dev);
  1762. int __i915_add_request(struct intel_ring_buffer *ring,
  1763. struct drm_file *file,
  1764. struct drm_i915_gem_object *batch_obj,
  1765. u32 *seqno);
  1766. #define i915_add_request(ring, seqno) \
  1767. __i915_add_request(ring, NULL, NULL, seqno)
  1768. int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
  1769. uint32_t seqno);
  1770. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  1771. int __must_check
  1772. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
  1773. bool write);
  1774. int __must_check
  1775. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
  1776. int __must_check
  1777. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  1778. u32 alignment,
  1779. struct intel_ring_buffer *pipelined);
  1780. void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
  1781. int i915_gem_attach_phys_object(struct drm_device *dev,
  1782. struct drm_i915_gem_object *obj,
  1783. int id,
  1784. int align);
  1785. void i915_gem_detach_phys_object(struct drm_device *dev,
  1786. struct drm_i915_gem_object *obj);
  1787. void i915_gem_free_all_phys_object(struct drm_device *dev);
  1788. int i915_gem_open(struct drm_device *dev, struct drm_file *file);
  1789. void i915_gem_release(struct drm_device *dev, struct drm_file *file);
  1790. uint32_t
  1791. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
  1792. uint32_t
  1793. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1794. int tiling_mode, bool fenced);
  1795. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  1796. enum i915_cache_level cache_level);
  1797. struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
  1798. struct dma_buf *dma_buf);
  1799. struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
  1800. struct drm_gem_object *gem_obj, int flags);
  1801. void i915_gem_restore_fences(struct drm_device *dev);
  1802. unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
  1803. struct i915_address_space *vm);
  1804. bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
  1805. bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
  1806. struct i915_address_space *vm);
  1807. unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
  1808. struct i915_address_space *vm);
  1809. struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
  1810. struct i915_address_space *vm);
  1811. struct i915_vma *
  1812. i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
  1813. struct i915_address_space *vm);
  1814. struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
  1815. /* Some GGTT VM helpers */
  1816. #define obj_to_ggtt(obj) \
  1817. (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
  1818. static inline bool i915_is_ggtt(struct i915_address_space *vm)
  1819. {
  1820. struct i915_address_space *ggtt =
  1821. &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
  1822. return vm == ggtt;
  1823. }
  1824. static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
  1825. {
  1826. return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
  1827. }
  1828. static inline unsigned long
  1829. i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
  1830. {
  1831. return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
  1832. }
  1833. static inline unsigned long
  1834. i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
  1835. {
  1836. return i915_gem_obj_size(obj, obj_to_ggtt(obj));
  1837. }
  1838. static inline int __must_check
  1839. i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
  1840. uint32_t alignment,
  1841. bool map_and_fenceable,
  1842. bool nonblocking)
  1843. {
  1844. return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
  1845. map_and_fenceable, nonblocking);
  1846. }
  1847. /* i915_gem_context.c */
  1848. void i915_gem_context_init(struct drm_device *dev);
  1849. void i915_gem_context_fini(struct drm_device *dev);
  1850. void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  1851. int i915_switch_context(struct intel_ring_buffer *ring,
  1852. struct drm_file *file, int to_id);
  1853. void i915_gem_context_free(struct kref *ctx_ref);
  1854. static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
  1855. {
  1856. kref_get(&ctx->ref);
  1857. }
  1858. static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
  1859. {
  1860. kref_put(&ctx->ref, i915_gem_context_free);
  1861. }
  1862. struct i915_ctx_hang_stats * __must_check
  1863. i915_gem_context_get_hang_stats(struct drm_device *dev,
  1864. struct drm_file *file,
  1865. u32 id);
  1866. int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
  1867. struct drm_file *file);
  1868. int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
  1869. struct drm_file *file);
  1870. /* i915_gem_gtt.c */
  1871. void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
  1872. void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
  1873. struct drm_i915_gem_object *obj,
  1874. enum i915_cache_level cache_level);
  1875. void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
  1876. struct drm_i915_gem_object *obj);
  1877. void i915_gem_restore_gtt_mappings(struct drm_device *dev);
  1878. int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
  1879. void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
  1880. enum i915_cache_level cache_level);
  1881. void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
  1882. void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
  1883. void i915_gem_init_global_gtt(struct drm_device *dev);
  1884. void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
  1885. unsigned long mappable_end, unsigned long end);
  1886. int i915_gem_gtt_init(struct drm_device *dev);
  1887. static inline void i915_gem_chipset_flush(struct drm_device *dev)
  1888. {
  1889. if (INTEL_INFO(dev)->gen < 6)
  1890. intel_gtt_chipset_flush();
  1891. }
  1892. /* i915_gem_evict.c */
  1893. int __must_check i915_gem_evict_something(struct drm_device *dev,
  1894. struct i915_address_space *vm,
  1895. int min_size,
  1896. unsigned alignment,
  1897. unsigned cache_level,
  1898. bool mappable,
  1899. bool nonblock);
  1900. int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
  1901. int i915_gem_evict_everything(struct drm_device *dev);
  1902. /* i915_gem_stolen.c */
  1903. int i915_gem_init_stolen(struct drm_device *dev);
  1904. int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
  1905. void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
  1906. void i915_gem_cleanup_stolen(struct drm_device *dev);
  1907. struct drm_i915_gem_object *
  1908. i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
  1909. struct drm_i915_gem_object *
  1910. i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
  1911. u32 stolen_offset,
  1912. u32 gtt_offset,
  1913. u32 size);
  1914. void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
  1915. /* i915_gem_tiling.c */
  1916. static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
  1917. {
  1918. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  1919. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  1920. obj->tiling_mode != I915_TILING_NONE;
  1921. }
  1922. void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
  1923. void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
  1924. void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
  1925. /* i915_gem_debug.c */
  1926. #if WATCH_LISTS
  1927. int i915_verify_lists(struct drm_device *dev);
  1928. #else
  1929. #define i915_verify_lists(dev) 0
  1930. #endif
  1931. /* i915_debugfs.c */
  1932. int i915_debugfs_init(struct drm_minor *minor);
  1933. void i915_debugfs_cleanup(struct drm_minor *minor);
  1934. #ifdef CONFIG_DEBUG_FS
  1935. void intel_display_crc_init(struct drm_device *dev);
  1936. #else
  1937. static inline void intel_display_crc_init(struct drm_device *dev) {}
  1938. #endif
  1939. /* i915_gpu_error.c */
  1940. __printf(2, 3)
  1941. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
  1942. int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
  1943. const struct i915_error_state_file_priv *error);
  1944. int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
  1945. size_t count, loff_t pos);
  1946. static inline void i915_error_state_buf_release(
  1947. struct drm_i915_error_state_buf *eb)
  1948. {
  1949. kfree(eb->buf);
  1950. }
  1951. void i915_capture_error_state(struct drm_device *dev);
  1952. void i915_error_state_get(struct drm_device *dev,
  1953. struct i915_error_state_file_priv *error_priv);
  1954. void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
  1955. void i915_destroy_error_state(struct drm_device *dev);
  1956. void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
  1957. const char *i915_cache_level_str(int type);
  1958. /* i915_suspend.c */
  1959. extern int i915_save_state(struct drm_device *dev);
  1960. extern int i915_restore_state(struct drm_device *dev);
  1961. /* i915_ums.c */
  1962. void i915_save_display_reg(struct drm_device *dev);
  1963. void i915_restore_display_reg(struct drm_device *dev);
  1964. /* i915_sysfs.c */
  1965. void i915_setup_sysfs(struct drm_device *dev_priv);
  1966. void i915_teardown_sysfs(struct drm_device *dev_priv);
  1967. /* intel_i2c.c */
  1968. extern int intel_setup_gmbus(struct drm_device *dev);
  1969. extern void intel_teardown_gmbus(struct drm_device *dev);
  1970. static inline bool intel_gmbus_is_port_valid(unsigned port)
  1971. {
  1972. return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
  1973. }
  1974. extern struct i2c_adapter *intel_gmbus_get_adapter(
  1975. struct drm_i915_private *dev_priv, unsigned port);
  1976. extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  1977. extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
  1978. static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
  1979. {
  1980. return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
  1981. }
  1982. extern void intel_i2c_reset(struct drm_device *dev);
  1983. /* intel_opregion.c */
  1984. struct intel_encoder;
  1985. extern int intel_opregion_setup(struct drm_device *dev);
  1986. #ifdef CONFIG_ACPI
  1987. extern void intel_opregion_init(struct drm_device *dev);
  1988. extern void intel_opregion_fini(struct drm_device *dev);
  1989. extern void intel_opregion_asle_intr(struct drm_device *dev);
  1990. extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
  1991. bool enable);
  1992. extern int intel_opregion_notify_adapter(struct drm_device *dev,
  1993. pci_power_t state);
  1994. #else
  1995. static inline void intel_opregion_init(struct drm_device *dev) { return; }
  1996. static inline void intel_opregion_fini(struct drm_device *dev) { return; }
  1997. static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
  1998. static inline int
  1999. intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
  2000. {
  2001. return 0;
  2002. }
  2003. static inline int
  2004. intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
  2005. {
  2006. return 0;
  2007. }
  2008. #endif
  2009. /* intel_acpi.c */
  2010. #ifdef CONFIG_ACPI
  2011. extern void intel_register_dsm_handler(void);
  2012. extern void intel_unregister_dsm_handler(void);
  2013. #else
  2014. static inline void intel_register_dsm_handler(void) { return; }
  2015. static inline void intel_unregister_dsm_handler(void) { return; }
  2016. #endif /* CONFIG_ACPI */
  2017. /* modesetting */
  2018. extern void intel_modeset_init_hw(struct drm_device *dev);
  2019. extern void intel_modeset_suspend_hw(struct drm_device *dev);
  2020. extern void intel_modeset_init(struct drm_device *dev);
  2021. extern void intel_modeset_gem_init(struct drm_device *dev);
  2022. extern void intel_modeset_cleanup(struct drm_device *dev);
  2023. extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
  2024. extern void intel_modeset_setup_hw_state(struct drm_device *dev,
  2025. bool force_restore);
  2026. extern void i915_redisable_vga(struct drm_device *dev);
  2027. extern bool intel_fbc_enabled(struct drm_device *dev);
  2028. extern void intel_disable_fbc(struct drm_device *dev);
  2029. extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
  2030. extern void intel_init_pch_refclk(struct drm_device *dev);
  2031. extern void gen6_set_rps(struct drm_device *dev, u8 val);
  2032. extern void valleyview_set_rps(struct drm_device *dev, u8 val);
  2033. extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
  2034. extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
  2035. extern void intel_detect_pch(struct drm_device *dev);
  2036. extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
  2037. extern int intel_enable_rc6(const struct drm_device *dev);
  2038. extern bool i915_semaphore_is_enabled(struct drm_device *dev);
  2039. int i915_reg_read_ioctl(struct drm_device *dev, void *data,
  2040. struct drm_file *file);
  2041. /* overlay */
  2042. extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
  2043. extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
  2044. struct intel_overlay_error_state *error);
  2045. extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
  2046. extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  2047. struct drm_device *dev,
  2048. struct intel_display_error_state *error);
  2049. /* On SNB platform, before reading ring registers forcewake bit
  2050. * must be set to prevent GT core from power down and stale values being
  2051. * returned.
  2052. */
  2053. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
  2054. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
  2055. int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  2056. int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
  2057. /* intel_sideband.c */
  2058. u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
  2059. void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
  2060. u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
  2061. u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
  2062. void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2063. u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
  2064. void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2065. u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
  2066. void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2067. u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
  2068. void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2069. u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
  2070. void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
  2071. u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
  2072. enum intel_sbi_destination destination);
  2073. void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
  2074. enum intel_sbi_destination destination);
  2075. int vlv_gpu_freq(int ddr_freq, int val);
  2076. int vlv_freq_opcode(int ddr_freq, int val);
  2077. #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
  2078. #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
  2079. #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
  2080. #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
  2081. #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
  2082. #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
  2083. #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
  2084. #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
  2085. #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
  2086. #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
  2087. #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
  2088. #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
  2089. #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
  2090. #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
  2091. /* "Broadcast RGB" property */
  2092. #define INTEL_BROADCAST_RGB_AUTO 0
  2093. #define INTEL_BROADCAST_RGB_FULL 1
  2094. #define INTEL_BROADCAST_RGB_LIMITED 2
  2095. static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
  2096. {
  2097. if (HAS_PCH_SPLIT(dev))
  2098. return CPU_VGACNTRL;
  2099. else if (IS_VALLEYVIEW(dev))
  2100. return VLV_VGACNTRL;
  2101. else
  2102. return VGACNTRL;
  2103. }
  2104. static inline void __user *to_user_ptr(u64 address)
  2105. {
  2106. return (void __user *)(uintptr_t)address;
  2107. }
  2108. static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
  2109. {
  2110. unsigned long j = msecs_to_jiffies(m);
  2111. return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  2112. }
  2113. static inline unsigned long
  2114. timespec_to_jiffies_timeout(const struct timespec *value)
  2115. {
  2116. unsigned long j = timespec_to_jiffies(value);
  2117. return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  2118. }
  2119. #endif