i915_dma.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270
  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "drm_crtc_helper.h"
  31. #include "drm_fb_helper.h"
  32. #include "intel_drv.h"
  33. #include "i915_drm.h"
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "../../../platform/x86/intel_ips.h"
  37. #include <linux/pci.h>
  38. #include <linux/vgaarb.h>
  39. #include <linux/acpi.h>
  40. #include <linux/pnp.h>
  41. #include <linux/vga_switcheroo.h>
  42. #include <linux/slab.h>
  43. #include <acpi/video.h>
  44. /**
  45. * Sets up the hardware status page for devices that need a physical address
  46. * in the register.
  47. */
  48. static int i915_init_phys_hws(struct drm_device *dev)
  49. {
  50. drm_i915_private_t *dev_priv = dev->dev_private;
  51. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  52. /* Program Hardware Status Page */
  53. dev_priv->status_page_dmah =
  54. drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
  55. if (!dev_priv->status_page_dmah) {
  56. DRM_ERROR("Can not allocate hardware status page\n");
  57. return -ENOMEM;
  58. }
  59. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  60. dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
  61. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  62. if (INTEL_INFO(dev)->gen >= 4)
  63. dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
  64. 0xf0;
  65. I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
  66. DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  67. return 0;
  68. }
  69. /**
  70. * Frees the hardware status page, whether it's a physical address or a virtual
  71. * address set up by the X Server.
  72. */
  73. static void i915_free_hws(struct drm_device *dev)
  74. {
  75. drm_i915_private_t *dev_priv = dev->dev_private;
  76. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  77. if (dev_priv->status_page_dmah) {
  78. drm_pci_free(dev, dev_priv->status_page_dmah);
  79. dev_priv->status_page_dmah = NULL;
  80. }
  81. if (ring->status_page.gfx_addr) {
  82. ring->status_page.gfx_addr = 0;
  83. drm_core_ioremapfree(&dev_priv->hws_map, dev);
  84. }
  85. /* Need to rewrite hardware status page */
  86. I915_WRITE(HWS_PGA, 0x1ffff000);
  87. }
  88. void i915_kernel_lost_context(struct drm_device * dev)
  89. {
  90. drm_i915_private_t *dev_priv = dev->dev_private;
  91. struct drm_i915_master_private *master_priv;
  92. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  93. /*
  94. * We should never lose context on the ring with modesetting
  95. * as we don't expose it to userspace
  96. */
  97. if (drm_core_check_feature(dev, DRIVER_MODESET))
  98. return;
  99. ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  100. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  101. ring->space = ring->head - (ring->tail + 8);
  102. if (ring->space < 0)
  103. ring->space += ring->size;
  104. if (!dev->primary->master)
  105. return;
  106. master_priv = dev->primary->master->driver_priv;
  107. if (ring->head == ring->tail && master_priv->sarea_priv)
  108. master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
  109. }
  110. static int i915_dma_cleanup(struct drm_device * dev)
  111. {
  112. drm_i915_private_t *dev_priv = dev->dev_private;
  113. int i;
  114. /* Make sure interrupts are disabled here because the uninstall ioctl
  115. * may not have been called from userspace and after dev_private
  116. * is freed, it's too late.
  117. */
  118. if (dev->irq_enabled)
  119. drm_irq_uninstall(dev);
  120. mutex_lock(&dev->struct_mutex);
  121. for (i = 0; i < I915_NUM_RINGS; i++)
  122. intel_cleanup_ring_buffer(&dev_priv->ring[i]);
  123. mutex_unlock(&dev->struct_mutex);
  124. /* Clear the HWS virtual address at teardown */
  125. if (I915_NEED_GFX_HWS(dev))
  126. i915_free_hws(dev);
  127. return 0;
  128. }
  129. static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
  130. {
  131. drm_i915_private_t *dev_priv = dev->dev_private;
  132. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  133. int ret;
  134. master_priv->sarea = drm_getsarea(dev);
  135. if (master_priv->sarea) {
  136. master_priv->sarea_priv = (drm_i915_sarea_t *)
  137. ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
  138. } else {
  139. DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
  140. }
  141. if (init->ring_size != 0) {
  142. if (LP_RING(dev_priv)->obj != NULL) {
  143. i915_dma_cleanup(dev);
  144. DRM_ERROR("Client tried to initialize ringbuffer in "
  145. "GEM mode\n");
  146. return -EINVAL;
  147. }
  148. ret = intel_render_ring_init_dri(dev,
  149. init->ring_start,
  150. init->ring_size);
  151. if (ret) {
  152. i915_dma_cleanup(dev);
  153. return ret;
  154. }
  155. }
  156. dev_priv->cpp = init->cpp;
  157. dev_priv->back_offset = init->back_offset;
  158. dev_priv->front_offset = init->front_offset;
  159. dev_priv->current_page = 0;
  160. if (master_priv->sarea_priv)
  161. master_priv->sarea_priv->pf_current_page = 0;
  162. /* Allow hardware batchbuffers unless told otherwise.
  163. */
  164. dev_priv->allow_batchbuffer = 1;
  165. return 0;
  166. }
  167. static int i915_dma_resume(struct drm_device * dev)
  168. {
  169. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  170. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  171. DRM_DEBUG_DRIVER("%s\n", __func__);
  172. if (ring->map.handle == NULL) {
  173. DRM_ERROR("can not ioremap virtual address for"
  174. " ring buffer\n");
  175. return -ENOMEM;
  176. }
  177. /* Program Hardware Status Page */
  178. if (!ring->status_page.page_addr) {
  179. DRM_ERROR("Can not find hardware status page\n");
  180. return -EINVAL;
  181. }
  182. DRM_DEBUG_DRIVER("hw status page @ %p\n",
  183. ring->status_page.page_addr);
  184. if (ring->status_page.gfx_addr != 0)
  185. intel_ring_setup_status_page(ring);
  186. else
  187. I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
  188. DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  189. return 0;
  190. }
  191. static int i915_dma_init(struct drm_device *dev, void *data,
  192. struct drm_file *file_priv)
  193. {
  194. drm_i915_init_t *init = data;
  195. int retcode = 0;
  196. switch (init->func) {
  197. case I915_INIT_DMA:
  198. retcode = i915_initialize(dev, init);
  199. break;
  200. case I915_CLEANUP_DMA:
  201. retcode = i915_dma_cleanup(dev);
  202. break;
  203. case I915_RESUME_DMA:
  204. retcode = i915_dma_resume(dev);
  205. break;
  206. default:
  207. retcode = -EINVAL;
  208. break;
  209. }
  210. return retcode;
  211. }
  212. /* Implement basically the same security restrictions as hardware does
  213. * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
  214. *
  215. * Most of the calculations below involve calculating the size of a
  216. * particular instruction. It's important to get the size right as
  217. * that tells us where the next instruction to check is. Any illegal
  218. * instruction detected will be given a size of zero, which is a
  219. * signal to abort the rest of the buffer.
  220. */
  221. static int validate_cmd(int cmd)
  222. {
  223. switch (((cmd >> 29) & 0x7)) {
  224. case 0x0:
  225. switch ((cmd >> 23) & 0x3f) {
  226. case 0x0:
  227. return 1; /* MI_NOOP */
  228. case 0x4:
  229. return 1; /* MI_FLUSH */
  230. default:
  231. return 0; /* disallow everything else */
  232. }
  233. break;
  234. case 0x1:
  235. return 0; /* reserved */
  236. case 0x2:
  237. return (cmd & 0xff) + 2; /* 2d commands */
  238. case 0x3:
  239. if (((cmd >> 24) & 0x1f) <= 0x18)
  240. return 1;
  241. switch ((cmd >> 24) & 0x1f) {
  242. case 0x1c:
  243. return 1;
  244. case 0x1d:
  245. switch ((cmd >> 16) & 0xff) {
  246. case 0x3:
  247. return (cmd & 0x1f) + 2;
  248. case 0x4:
  249. return (cmd & 0xf) + 2;
  250. default:
  251. return (cmd & 0xffff) + 2;
  252. }
  253. case 0x1e:
  254. if (cmd & (1 << 23))
  255. return (cmd & 0xffff) + 1;
  256. else
  257. return 1;
  258. case 0x1f:
  259. if ((cmd & (1 << 23)) == 0) /* inline vertices */
  260. return (cmd & 0x1ffff) + 2;
  261. else if (cmd & (1 << 17)) /* indirect random */
  262. if ((cmd & 0xffff) == 0)
  263. return 0; /* unknown length, too hard */
  264. else
  265. return (((cmd & 0xffff) + 1) / 2) + 1;
  266. else
  267. return 2; /* indirect sequential */
  268. default:
  269. return 0;
  270. }
  271. default:
  272. return 0;
  273. }
  274. return 0;
  275. }
  276. static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
  277. {
  278. drm_i915_private_t *dev_priv = dev->dev_private;
  279. int i, ret;
  280. if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
  281. return -EINVAL;
  282. for (i = 0; i < dwords;) {
  283. int sz = validate_cmd(buffer[i]);
  284. if (sz == 0 || i + sz > dwords)
  285. return -EINVAL;
  286. i += sz;
  287. }
  288. ret = BEGIN_LP_RING((dwords+1)&~1);
  289. if (ret)
  290. return ret;
  291. for (i = 0; i < dwords; i++)
  292. OUT_RING(buffer[i]);
  293. if (dwords & 1)
  294. OUT_RING(0);
  295. ADVANCE_LP_RING();
  296. return 0;
  297. }
  298. int
  299. i915_emit_box(struct drm_device *dev,
  300. struct drm_clip_rect *box,
  301. int DR1, int DR4)
  302. {
  303. struct drm_i915_private *dev_priv = dev->dev_private;
  304. int ret;
  305. if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  306. box->y2 <= 0 || box->x2 <= 0) {
  307. DRM_ERROR("Bad box %d,%d..%d,%d\n",
  308. box->x1, box->y1, box->x2, box->y2);
  309. return -EINVAL;
  310. }
  311. if (INTEL_INFO(dev)->gen >= 4) {
  312. ret = BEGIN_LP_RING(4);
  313. if (ret)
  314. return ret;
  315. OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  316. OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  317. OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  318. OUT_RING(DR4);
  319. } else {
  320. ret = BEGIN_LP_RING(6);
  321. if (ret)
  322. return ret;
  323. OUT_RING(GFX_OP_DRAWRECT_INFO);
  324. OUT_RING(DR1);
  325. OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  326. OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  327. OUT_RING(DR4);
  328. OUT_RING(0);
  329. }
  330. ADVANCE_LP_RING();
  331. return 0;
  332. }
  333. /* XXX: Emitting the counter should really be moved to part of the IRQ
  334. * emit. For now, do it in both places:
  335. */
  336. static void i915_emit_breadcrumb(struct drm_device *dev)
  337. {
  338. drm_i915_private_t *dev_priv = dev->dev_private;
  339. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  340. dev_priv->counter++;
  341. if (dev_priv->counter > 0x7FFFFFFFUL)
  342. dev_priv->counter = 0;
  343. if (master_priv->sarea_priv)
  344. master_priv->sarea_priv->last_enqueue = dev_priv->counter;
  345. if (BEGIN_LP_RING(4) == 0) {
  346. OUT_RING(MI_STORE_DWORD_INDEX);
  347. OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  348. OUT_RING(dev_priv->counter);
  349. OUT_RING(0);
  350. ADVANCE_LP_RING();
  351. }
  352. }
  353. static int i915_dispatch_cmdbuffer(struct drm_device * dev,
  354. drm_i915_cmdbuffer_t *cmd,
  355. struct drm_clip_rect *cliprects,
  356. void *cmdbuf)
  357. {
  358. int nbox = cmd->num_cliprects;
  359. int i = 0, count, ret;
  360. if (cmd->sz & 0x3) {
  361. DRM_ERROR("alignment");
  362. return -EINVAL;
  363. }
  364. i915_kernel_lost_context(dev);
  365. count = nbox ? nbox : 1;
  366. for (i = 0; i < count; i++) {
  367. if (i < nbox) {
  368. ret = i915_emit_box(dev, &cliprects[i],
  369. cmd->DR1, cmd->DR4);
  370. if (ret)
  371. return ret;
  372. }
  373. ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
  374. if (ret)
  375. return ret;
  376. }
  377. i915_emit_breadcrumb(dev);
  378. return 0;
  379. }
  380. static int i915_dispatch_batchbuffer(struct drm_device * dev,
  381. drm_i915_batchbuffer_t * batch,
  382. struct drm_clip_rect *cliprects)
  383. {
  384. struct drm_i915_private *dev_priv = dev->dev_private;
  385. int nbox = batch->num_cliprects;
  386. int i, count, ret;
  387. if ((batch->start | batch->used) & 0x7) {
  388. DRM_ERROR("alignment");
  389. return -EINVAL;
  390. }
  391. i915_kernel_lost_context(dev);
  392. count = nbox ? nbox : 1;
  393. for (i = 0; i < count; i++) {
  394. if (i < nbox) {
  395. ret = i915_emit_box(dev, &cliprects[i],
  396. batch->DR1, batch->DR4);
  397. if (ret)
  398. return ret;
  399. }
  400. if (!IS_I830(dev) && !IS_845G(dev)) {
  401. ret = BEGIN_LP_RING(2);
  402. if (ret)
  403. return ret;
  404. if (INTEL_INFO(dev)->gen >= 4) {
  405. OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
  406. OUT_RING(batch->start);
  407. } else {
  408. OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
  409. OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  410. }
  411. } else {
  412. ret = BEGIN_LP_RING(4);
  413. if (ret)
  414. return ret;
  415. OUT_RING(MI_BATCH_BUFFER);
  416. OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  417. OUT_RING(batch->start + batch->used - 4);
  418. OUT_RING(0);
  419. }
  420. ADVANCE_LP_RING();
  421. }
  422. if (IS_G4X(dev) || IS_GEN5(dev)) {
  423. if (BEGIN_LP_RING(2) == 0) {
  424. OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
  425. OUT_RING(MI_NOOP);
  426. ADVANCE_LP_RING();
  427. }
  428. }
  429. i915_emit_breadcrumb(dev);
  430. return 0;
  431. }
  432. static int i915_dispatch_flip(struct drm_device * dev)
  433. {
  434. drm_i915_private_t *dev_priv = dev->dev_private;
  435. struct drm_i915_master_private *master_priv =
  436. dev->primary->master->driver_priv;
  437. int ret;
  438. if (!master_priv->sarea_priv)
  439. return -EINVAL;
  440. DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
  441. __func__,
  442. dev_priv->current_page,
  443. master_priv->sarea_priv->pf_current_page);
  444. i915_kernel_lost_context(dev);
  445. ret = BEGIN_LP_RING(10);
  446. if (ret)
  447. return ret;
  448. OUT_RING(MI_FLUSH | MI_READ_FLUSH);
  449. OUT_RING(0);
  450. OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
  451. OUT_RING(0);
  452. if (dev_priv->current_page == 0) {
  453. OUT_RING(dev_priv->back_offset);
  454. dev_priv->current_page = 1;
  455. } else {
  456. OUT_RING(dev_priv->front_offset);
  457. dev_priv->current_page = 0;
  458. }
  459. OUT_RING(0);
  460. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
  461. OUT_RING(0);
  462. ADVANCE_LP_RING();
  463. master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
  464. if (BEGIN_LP_RING(4) == 0) {
  465. OUT_RING(MI_STORE_DWORD_INDEX);
  466. OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  467. OUT_RING(dev_priv->counter);
  468. OUT_RING(0);
  469. ADVANCE_LP_RING();
  470. }
  471. master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
  472. return 0;
  473. }
  474. static int i915_quiescent(struct drm_device *dev)
  475. {
  476. struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
  477. i915_kernel_lost_context(dev);
  478. return intel_wait_ring_buffer(ring, ring->size - 8);
  479. }
  480. static int i915_flush_ioctl(struct drm_device *dev, void *data,
  481. struct drm_file *file_priv)
  482. {
  483. int ret;
  484. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  485. mutex_lock(&dev->struct_mutex);
  486. ret = i915_quiescent(dev);
  487. mutex_unlock(&dev->struct_mutex);
  488. return ret;
  489. }
  490. static int i915_batchbuffer(struct drm_device *dev, void *data,
  491. struct drm_file *file_priv)
  492. {
  493. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  494. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  495. drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  496. master_priv->sarea_priv;
  497. drm_i915_batchbuffer_t *batch = data;
  498. int ret;
  499. struct drm_clip_rect *cliprects = NULL;
  500. if (!dev_priv->allow_batchbuffer) {
  501. DRM_ERROR("Batchbuffer ioctl disabled\n");
  502. return -EINVAL;
  503. }
  504. DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
  505. batch->start, batch->used, batch->num_cliprects);
  506. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  507. if (batch->num_cliprects < 0)
  508. return -EINVAL;
  509. if (batch->num_cliprects) {
  510. cliprects = kcalloc(batch->num_cliprects,
  511. sizeof(struct drm_clip_rect),
  512. GFP_KERNEL);
  513. if (cliprects == NULL)
  514. return -ENOMEM;
  515. ret = copy_from_user(cliprects, batch->cliprects,
  516. batch->num_cliprects *
  517. sizeof(struct drm_clip_rect));
  518. if (ret != 0) {
  519. ret = -EFAULT;
  520. goto fail_free;
  521. }
  522. }
  523. mutex_lock(&dev->struct_mutex);
  524. ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
  525. mutex_unlock(&dev->struct_mutex);
  526. if (sarea_priv)
  527. sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  528. fail_free:
  529. kfree(cliprects);
  530. return ret;
  531. }
  532. static int i915_cmdbuffer(struct drm_device *dev, void *data,
  533. struct drm_file *file_priv)
  534. {
  535. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  536. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  537. drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  538. master_priv->sarea_priv;
  539. drm_i915_cmdbuffer_t *cmdbuf = data;
  540. struct drm_clip_rect *cliprects = NULL;
  541. void *batch_data;
  542. int ret;
  543. DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
  544. cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  545. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  546. if (cmdbuf->num_cliprects < 0)
  547. return -EINVAL;
  548. batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
  549. if (batch_data == NULL)
  550. return -ENOMEM;
  551. ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
  552. if (ret != 0) {
  553. ret = -EFAULT;
  554. goto fail_batch_free;
  555. }
  556. if (cmdbuf->num_cliprects) {
  557. cliprects = kcalloc(cmdbuf->num_cliprects,
  558. sizeof(struct drm_clip_rect), GFP_KERNEL);
  559. if (cliprects == NULL) {
  560. ret = -ENOMEM;
  561. goto fail_batch_free;
  562. }
  563. ret = copy_from_user(cliprects, cmdbuf->cliprects,
  564. cmdbuf->num_cliprects *
  565. sizeof(struct drm_clip_rect));
  566. if (ret != 0) {
  567. ret = -EFAULT;
  568. goto fail_clip_free;
  569. }
  570. }
  571. mutex_lock(&dev->struct_mutex);
  572. ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
  573. mutex_unlock(&dev->struct_mutex);
  574. if (ret) {
  575. DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
  576. goto fail_clip_free;
  577. }
  578. if (sarea_priv)
  579. sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  580. fail_clip_free:
  581. kfree(cliprects);
  582. fail_batch_free:
  583. kfree(batch_data);
  584. return ret;
  585. }
  586. static int i915_flip_bufs(struct drm_device *dev, void *data,
  587. struct drm_file *file_priv)
  588. {
  589. int ret;
  590. DRM_DEBUG_DRIVER("%s\n", __func__);
  591. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  592. mutex_lock(&dev->struct_mutex);
  593. ret = i915_dispatch_flip(dev);
  594. mutex_unlock(&dev->struct_mutex);
  595. return ret;
  596. }
  597. static int i915_getparam(struct drm_device *dev, void *data,
  598. struct drm_file *file_priv)
  599. {
  600. drm_i915_private_t *dev_priv = dev->dev_private;
  601. drm_i915_getparam_t *param = data;
  602. int value;
  603. if (!dev_priv) {
  604. DRM_ERROR("called with no initialization\n");
  605. return -EINVAL;
  606. }
  607. switch (param->param) {
  608. case I915_PARAM_IRQ_ACTIVE:
  609. value = dev->pdev->irq ? 1 : 0;
  610. break;
  611. case I915_PARAM_ALLOW_BATCHBUFFER:
  612. value = dev_priv->allow_batchbuffer ? 1 : 0;
  613. break;
  614. case I915_PARAM_LAST_DISPATCH:
  615. value = READ_BREADCRUMB(dev_priv);
  616. break;
  617. case I915_PARAM_CHIPSET_ID:
  618. value = dev->pci_device;
  619. break;
  620. case I915_PARAM_HAS_GEM:
  621. value = dev_priv->has_gem;
  622. break;
  623. case I915_PARAM_NUM_FENCES_AVAIL:
  624. value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
  625. break;
  626. case I915_PARAM_HAS_OVERLAY:
  627. value = dev_priv->overlay ? 1 : 0;
  628. break;
  629. case I915_PARAM_HAS_PAGEFLIPPING:
  630. value = 1;
  631. break;
  632. case I915_PARAM_HAS_EXECBUF2:
  633. /* depends on GEM */
  634. value = dev_priv->has_gem;
  635. break;
  636. case I915_PARAM_HAS_BSD:
  637. value = HAS_BSD(dev);
  638. break;
  639. case I915_PARAM_HAS_BLT:
  640. value = HAS_BLT(dev);
  641. break;
  642. case I915_PARAM_HAS_RELAXED_FENCING:
  643. value = 1;
  644. break;
  645. case I915_PARAM_HAS_COHERENT_RINGS:
  646. value = 1;
  647. break;
  648. case I915_PARAM_HAS_EXEC_CONSTANTS:
  649. value = INTEL_INFO(dev)->gen >= 4;
  650. break;
  651. default:
  652. DRM_DEBUG_DRIVER("Unknown parameter %d\n",
  653. param->param);
  654. return -EINVAL;
  655. }
  656. if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
  657. DRM_ERROR("DRM_COPY_TO_USER failed\n");
  658. return -EFAULT;
  659. }
  660. return 0;
  661. }
  662. static int i915_setparam(struct drm_device *dev, void *data,
  663. struct drm_file *file_priv)
  664. {
  665. drm_i915_private_t *dev_priv = dev->dev_private;
  666. drm_i915_setparam_t *param = data;
  667. if (!dev_priv) {
  668. DRM_ERROR("called with no initialization\n");
  669. return -EINVAL;
  670. }
  671. switch (param->param) {
  672. case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
  673. break;
  674. case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
  675. dev_priv->tex_lru_log_granularity = param->value;
  676. break;
  677. case I915_SETPARAM_ALLOW_BATCHBUFFER:
  678. dev_priv->allow_batchbuffer = param->value;
  679. break;
  680. case I915_SETPARAM_NUM_USED_FENCES:
  681. if (param->value > dev_priv->num_fence_regs ||
  682. param->value < 0)
  683. return -EINVAL;
  684. /* Userspace can use first N regs */
  685. dev_priv->fence_reg_start = param->value;
  686. break;
  687. default:
  688. DRM_DEBUG_DRIVER("unknown parameter %d\n",
  689. param->param);
  690. return -EINVAL;
  691. }
  692. return 0;
  693. }
  694. static int i915_set_status_page(struct drm_device *dev, void *data,
  695. struct drm_file *file_priv)
  696. {
  697. drm_i915_private_t *dev_priv = dev->dev_private;
  698. drm_i915_hws_addr_t *hws = data;
  699. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  700. if (!I915_NEED_GFX_HWS(dev))
  701. return -EINVAL;
  702. if (!dev_priv) {
  703. DRM_ERROR("called with no initialization\n");
  704. return -EINVAL;
  705. }
  706. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  707. WARN(1, "tried to set status page when mode setting active\n");
  708. return 0;
  709. }
  710. DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
  711. ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
  712. dev_priv->hws_map.offset = dev->agp->base + hws->addr;
  713. dev_priv->hws_map.size = 4*1024;
  714. dev_priv->hws_map.type = 0;
  715. dev_priv->hws_map.flags = 0;
  716. dev_priv->hws_map.mtrr = 0;
  717. drm_core_ioremap_wc(&dev_priv->hws_map, dev);
  718. if (dev_priv->hws_map.handle == NULL) {
  719. i915_dma_cleanup(dev);
  720. ring->status_page.gfx_addr = 0;
  721. DRM_ERROR("can not ioremap virtual address for"
  722. " G33 hw status page\n");
  723. return -ENOMEM;
  724. }
  725. ring->status_page.page_addr = dev_priv->hws_map.handle;
  726. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  727. I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
  728. DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
  729. ring->status_page.gfx_addr);
  730. DRM_DEBUG_DRIVER("load hws at %p\n",
  731. ring->status_page.page_addr);
  732. return 0;
  733. }
  734. static int i915_get_bridge_dev(struct drm_device *dev)
  735. {
  736. struct drm_i915_private *dev_priv = dev->dev_private;
  737. dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
  738. if (!dev_priv->bridge_dev) {
  739. DRM_ERROR("bridge device not found\n");
  740. return -1;
  741. }
  742. return 0;
  743. }
  744. #define MCHBAR_I915 0x44
  745. #define MCHBAR_I965 0x48
  746. #define MCHBAR_SIZE (4*4096)
  747. #define DEVEN_REG 0x54
  748. #define DEVEN_MCHBAR_EN (1 << 28)
  749. /* Allocate space for the MCH regs if needed, return nonzero on error */
  750. static int
  751. intel_alloc_mchbar_resource(struct drm_device *dev)
  752. {
  753. drm_i915_private_t *dev_priv = dev->dev_private;
  754. int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  755. u32 temp_lo, temp_hi = 0;
  756. u64 mchbar_addr;
  757. int ret;
  758. if (INTEL_INFO(dev)->gen >= 4)
  759. pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
  760. pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
  761. mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
  762. /* If ACPI doesn't have it, assume we need to allocate it ourselves */
  763. #ifdef CONFIG_PNP
  764. if (mchbar_addr &&
  765. pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
  766. return 0;
  767. #endif
  768. /* Get some space for it */
  769. dev_priv->mch_res.name = "i915 MCHBAR";
  770. dev_priv->mch_res.flags = IORESOURCE_MEM;
  771. ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
  772. &dev_priv->mch_res,
  773. MCHBAR_SIZE, MCHBAR_SIZE,
  774. PCIBIOS_MIN_MEM,
  775. 0, pcibios_align_resource,
  776. dev_priv->bridge_dev);
  777. if (ret) {
  778. DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
  779. dev_priv->mch_res.start = 0;
  780. return ret;
  781. }
  782. if (INTEL_INFO(dev)->gen >= 4)
  783. pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
  784. upper_32_bits(dev_priv->mch_res.start));
  785. pci_write_config_dword(dev_priv->bridge_dev, reg,
  786. lower_32_bits(dev_priv->mch_res.start));
  787. return 0;
  788. }
  789. /* Setup MCHBAR if possible, return true if we should disable it again */
  790. static void
  791. intel_setup_mchbar(struct drm_device *dev)
  792. {
  793. drm_i915_private_t *dev_priv = dev->dev_private;
  794. int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  795. u32 temp;
  796. bool enabled;
  797. dev_priv->mchbar_need_disable = false;
  798. if (IS_I915G(dev) || IS_I915GM(dev)) {
  799. pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  800. enabled = !!(temp & DEVEN_MCHBAR_EN);
  801. } else {
  802. pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  803. enabled = temp & 1;
  804. }
  805. /* If it's already enabled, don't have to do anything */
  806. if (enabled)
  807. return;
  808. if (intel_alloc_mchbar_resource(dev))
  809. return;
  810. dev_priv->mchbar_need_disable = true;
  811. /* Space is allocated or reserved, so enable it. */
  812. if (IS_I915G(dev) || IS_I915GM(dev)) {
  813. pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  814. temp | DEVEN_MCHBAR_EN);
  815. } else {
  816. pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  817. pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  818. }
  819. }
  820. static void
  821. intel_teardown_mchbar(struct drm_device *dev)
  822. {
  823. drm_i915_private_t *dev_priv = dev->dev_private;
  824. int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  825. u32 temp;
  826. if (dev_priv->mchbar_need_disable) {
  827. if (IS_I915G(dev) || IS_I915GM(dev)) {
  828. pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  829. temp &= ~DEVEN_MCHBAR_EN;
  830. pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
  831. } else {
  832. pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  833. temp &= ~1;
  834. pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
  835. }
  836. }
  837. if (dev_priv->mch_res.start)
  838. release_resource(&dev_priv->mch_res);
  839. }
  840. #define PTE_ADDRESS_MASK 0xfffff000
  841. #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
  842. #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
  843. #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
  844. #define PTE_MAPPING_TYPE_CACHED (3 << 1)
  845. #define PTE_MAPPING_TYPE_MASK (3 << 1)
  846. #define PTE_VALID (1 << 0)
  847. /**
  848. * i915_stolen_to_phys - take an offset into stolen memory and turn it into
  849. * a physical one
  850. * @dev: drm device
  851. * @offset: address to translate
  852. *
  853. * Some chip functions require allocations from stolen space and need the
  854. * physical address of the memory in question.
  855. */
  856. static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
  857. {
  858. struct drm_i915_private *dev_priv = dev->dev_private;
  859. struct pci_dev *pdev = dev_priv->bridge_dev;
  860. u32 base;
  861. #if 0
  862. /* On the machines I have tested the Graphics Base of Stolen Memory
  863. * is unreliable, so compute the base by subtracting the stolen memory
  864. * from the Top of Low Usable DRAM which is where the BIOS places
  865. * the graphics stolen memory.
  866. */
  867. if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
  868. /* top 32bits are reserved = 0 */
  869. pci_read_config_dword(pdev, 0xA4, &base);
  870. } else {
  871. /* XXX presume 8xx is the same as i915 */
  872. pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
  873. }
  874. #else
  875. if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
  876. u16 val;
  877. pci_read_config_word(pdev, 0xb0, &val);
  878. base = val >> 4 << 20;
  879. } else {
  880. u8 val;
  881. pci_read_config_byte(pdev, 0x9c, &val);
  882. base = val >> 3 << 27;
  883. }
  884. base -= dev_priv->mm.gtt->stolen_size;
  885. #endif
  886. return base + offset;
  887. }
  888. static void i915_warn_stolen(struct drm_device *dev)
  889. {
  890. DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
  891. DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
  892. }
  893. static void i915_setup_compression(struct drm_device *dev, int size)
  894. {
  895. struct drm_i915_private *dev_priv = dev->dev_private;
  896. struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
  897. unsigned long cfb_base;
  898. unsigned long ll_base = 0;
  899. compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
  900. if (compressed_fb)
  901. compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
  902. if (!compressed_fb)
  903. goto err;
  904. cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
  905. if (!cfb_base)
  906. goto err_fb;
  907. if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
  908. compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
  909. 4096, 4096, 0);
  910. if (compressed_llb)
  911. compressed_llb = drm_mm_get_block(compressed_llb,
  912. 4096, 4096);
  913. if (!compressed_llb)
  914. goto err_fb;
  915. ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
  916. if (!ll_base)
  917. goto err_llb;
  918. }
  919. dev_priv->cfb_size = size;
  920. intel_disable_fbc(dev);
  921. dev_priv->compressed_fb = compressed_fb;
  922. if (HAS_PCH_SPLIT(dev))
  923. I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
  924. else if (IS_GM45(dev)) {
  925. I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
  926. } else {
  927. I915_WRITE(FBC_CFB_BASE, cfb_base);
  928. I915_WRITE(FBC_LL_BASE, ll_base);
  929. dev_priv->compressed_llb = compressed_llb;
  930. }
  931. DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
  932. cfb_base, ll_base, size >> 20);
  933. return;
  934. err_llb:
  935. drm_mm_put_block(compressed_llb);
  936. err_fb:
  937. drm_mm_put_block(compressed_fb);
  938. err:
  939. dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
  940. i915_warn_stolen(dev);
  941. }
  942. static void i915_cleanup_compression(struct drm_device *dev)
  943. {
  944. struct drm_i915_private *dev_priv = dev->dev_private;
  945. drm_mm_put_block(dev_priv->compressed_fb);
  946. if (dev_priv->compressed_llb)
  947. drm_mm_put_block(dev_priv->compressed_llb);
  948. }
  949. /* true = enable decode, false = disable decoder */
  950. static unsigned int i915_vga_set_decode(void *cookie, bool state)
  951. {
  952. struct drm_device *dev = cookie;
  953. intel_modeset_vga_set_state(dev, state);
  954. if (state)
  955. return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  956. VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  957. else
  958. return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  959. }
  960. static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
  961. {
  962. struct drm_device *dev = pci_get_drvdata(pdev);
  963. pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
  964. if (state == VGA_SWITCHEROO_ON) {
  965. printk(KERN_INFO "i915: switched on\n");
  966. dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
  967. /* i915 resume handler doesn't set to D0 */
  968. pci_set_power_state(dev->pdev, PCI_D0);
  969. i915_resume(dev);
  970. dev->switch_power_state = DRM_SWITCH_POWER_ON;
  971. } else {
  972. printk(KERN_ERR "i915: switched off\n");
  973. dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
  974. i915_suspend(dev, pmm);
  975. dev->switch_power_state = DRM_SWITCH_POWER_OFF;
  976. }
  977. }
  978. static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
  979. {
  980. struct drm_device *dev = pci_get_drvdata(pdev);
  981. bool can_switch;
  982. spin_lock(&dev->count_lock);
  983. can_switch = (dev->open_count == 0);
  984. spin_unlock(&dev->count_lock);
  985. return can_switch;
  986. }
  987. static int i915_load_modeset_init(struct drm_device *dev)
  988. {
  989. struct drm_i915_private *dev_priv = dev->dev_private;
  990. unsigned long prealloc_size, gtt_size, mappable_size;
  991. int ret = 0;
  992. prealloc_size = dev_priv->mm.gtt->stolen_size;
  993. gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
  994. mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  995. /* Basic memrange allocator for stolen space */
  996. drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
  997. /* Let GEM Manage all of the aperture.
  998. *
  999. * However, leave one page at the end still bound to the scratch page.
  1000. * There are a number of places where the hardware apparently
  1001. * prefetches past the end of the object, and we've seen multiple
  1002. * hangs with the GPU head pointer stuck in a batchbuffer bound
  1003. * at the last page of the aperture. One page should be enough to
  1004. * keep any prefetching inside of the aperture.
  1005. */
  1006. i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
  1007. mutex_lock(&dev->struct_mutex);
  1008. ret = i915_gem_init_ringbuffer(dev);
  1009. mutex_unlock(&dev->struct_mutex);
  1010. if (ret)
  1011. goto out;
  1012. /* Try to set up FBC with a reasonable compressed buffer size */
  1013. if (I915_HAS_FBC(dev) && i915_powersave) {
  1014. int cfb_size;
  1015. /* Leave 1M for line length buffer & misc. */
  1016. /* Try to get a 32M buffer... */
  1017. if (prealloc_size > (36*1024*1024))
  1018. cfb_size = 32*1024*1024;
  1019. else /* fall back to 7/8 of the stolen space */
  1020. cfb_size = prealloc_size * 7 / 8;
  1021. i915_setup_compression(dev, cfb_size);
  1022. }
  1023. /* Allow hardware batchbuffers unless told otherwise. */
  1024. dev_priv->allow_batchbuffer = 1;
  1025. ret = intel_parse_bios(dev);
  1026. if (ret)
  1027. DRM_INFO("failed to find VBIOS tables\n");
  1028. /* If we have > 1 VGA cards, then we need to arbitrate access
  1029. * to the common VGA resources.
  1030. *
  1031. * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
  1032. * then we do not take part in VGA arbitration and the
  1033. * vga_client_register() fails with -ENODEV.
  1034. */
  1035. ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
  1036. if (ret && ret != -ENODEV)
  1037. goto cleanup_ringbuffer;
  1038. intel_register_dsm_handler();
  1039. ret = vga_switcheroo_register_client(dev->pdev,
  1040. i915_switcheroo_set_state,
  1041. NULL,
  1042. i915_switcheroo_can_switch);
  1043. if (ret)
  1044. goto cleanup_vga_client;
  1045. /* IIR "flip pending" bit means done if this bit is set */
  1046. if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
  1047. dev_priv->flip_pending_is_done = true;
  1048. intel_modeset_init(dev);
  1049. ret = drm_irq_install(dev);
  1050. if (ret)
  1051. goto cleanup_vga_switcheroo;
  1052. /* Always safe in the mode setting case. */
  1053. /* FIXME: do pre/post-mode set stuff in core KMS code */
  1054. dev->vblank_disable_allowed = 1;
  1055. ret = intel_fbdev_init(dev);
  1056. if (ret)
  1057. goto cleanup_irq;
  1058. drm_kms_helper_poll_init(dev);
  1059. /* We're off and running w/KMS */
  1060. dev_priv->mm.suspended = 0;
  1061. return 0;
  1062. cleanup_irq:
  1063. drm_irq_uninstall(dev);
  1064. cleanup_vga_switcheroo:
  1065. vga_switcheroo_unregister_client(dev->pdev);
  1066. cleanup_vga_client:
  1067. vga_client_register(dev->pdev, NULL, NULL, NULL);
  1068. cleanup_ringbuffer:
  1069. mutex_lock(&dev->struct_mutex);
  1070. i915_gem_cleanup_ringbuffer(dev);
  1071. mutex_unlock(&dev->struct_mutex);
  1072. out:
  1073. return ret;
  1074. }
  1075. int i915_master_create(struct drm_device *dev, struct drm_master *master)
  1076. {
  1077. struct drm_i915_master_private *master_priv;
  1078. master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
  1079. if (!master_priv)
  1080. return -ENOMEM;
  1081. master->driver_priv = master_priv;
  1082. return 0;
  1083. }
  1084. void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
  1085. {
  1086. struct drm_i915_master_private *master_priv = master->driver_priv;
  1087. if (!master_priv)
  1088. return;
  1089. kfree(master_priv);
  1090. master->driver_priv = NULL;
  1091. }
  1092. static void i915_pineview_get_mem_freq(struct drm_device *dev)
  1093. {
  1094. drm_i915_private_t *dev_priv = dev->dev_private;
  1095. u32 tmp;
  1096. tmp = I915_READ(CLKCFG);
  1097. switch (tmp & CLKCFG_FSB_MASK) {
  1098. case CLKCFG_FSB_533:
  1099. dev_priv->fsb_freq = 533; /* 133*4 */
  1100. break;
  1101. case CLKCFG_FSB_800:
  1102. dev_priv->fsb_freq = 800; /* 200*4 */
  1103. break;
  1104. case CLKCFG_FSB_667:
  1105. dev_priv->fsb_freq = 667; /* 167*4 */
  1106. break;
  1107. case CLKCFG_FSB_400:
  1108. dev_priv->fsb_freq = 400; /* 100*4 */
  1109. break;
  1110. }
  1111. switch (tmp & CLKCFG_MEM_MASK) {
  1112. case CLKCFG_MEM_533:
  1113. dev_priv->mem_freq = 533;
  1114. break;
  1115. case CLKCFG_MEM_667:
  1116. dev_priv->mem_freq = 667;
  1117. break;
  1118. case CLKCFG_MEM_800:
  1119. dev_priv->mem_freq = 800;
  1120. break;
  1121. }
  1122. /* detect pineview DDR3 setting */
  1123. tmp = I915_READ(CSHRDDR3CTL);
  1124. dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  1125. }
  1126. static void i915_ironlake_get_mem_freq(struct drm_device *dev)
  1127. {
  1128. drm_i915_private_t *dev_priv = dev->dev_private;
  1129. u16 ddrpll, csipll;
  1130. ddrpll = I915_READ16(DDRMPLL1);
  1131. csipll = I915_READ16(CSIPLL0);
  1132. switch (ddrpll & 0xff) {
  1133. case 0xc:
  1134. dev_priv->mem_freq = 800;
  1135. break;
  1136. case 0x10:
  1137. dev_priv->mem_freq = 1066;
  1138. break;
  1139. case 0x14:
  1140. dev_priv->mem_freq = 1333;
  1141. break;
  1142. case 0x18:
  1143. dev_priv->mem_freq = 1600;
  1144. break;
  1145. default:
  1146. DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
  1147. ddrpll & 0xff);
  1148. dev_priv->mem_freq = 0;
  1149. break;
  1150. }
  1151. dev_priv->r_t = dev_priv->mem_freq;
  1152. switch (csipll & 0x3ff) {
  1153. case 0x00c:
  1154. dev_priv->fsb_freq = 3200;
  1155. break;
  1156. case 0x00e:
  1157. dev_priv->fsb_freq = 3733;
  1158. break;
  1159. case 0x010:
  1160. dev_priv->fsb_freq = 4266;
  1161. break;
  1162. case 0x012:
  1163. dev_priv->fsb_freq = 4800;
  1164. break;
  1165. case 0x014:
  1166. dev_priv->fsb_freq = 5333;
  1167. break;
  1168. case 0x016:
  1169. dev_priv->fsb_freq = 5866;
  1170. break;
  1171. case 0x018:
  1172. dev_priv->fsb_freq = 6400;
  1173. break;
  1174. default:
  1175. DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
  1176. csipll & 0x3ff);
  1177. dev_priv->fsb_freq = 0;
  1178. break;
  1179. }
  1180. if (dev_priv->fsb_freq == 3200) {
  1181. dev_priv->c_m = 0;
  1182. } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
  1183. dev_priv->c_m = 1;
  1184. } else {
  1185. dev_priv->c_m = 2;
  1186. }
  1187. }
  1188. static const struct cparams {
  1189. u16 i;
  1190. u16 t;
  1191. u16 m;
  1192. u16 c;
  1193. } cparams[] = {
  1194. { 1, 1333, 301, 28664 },
  1195. { 1, 1066, 294, 24460 },
  1196. { 1, 800, 294, 25192 },
  1197. { 0, 1333, 276, 27605 },
  1198. { 0, 1066, 276, 27605 },
  1199. { 0, 800, 231, 23784 },
  1200. };
  1201. unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
  1202. {
  1203. u64 total_count, diff, ret;
  1204. u32 count1, count2, count3, m = 0, c = 0;
  1205. unsigned long now = jiffies_to_msecs(jiffies), diff1;
  1206. int i;
  1207. diff1 = now - dev_priv->last_time1;
  1208. count1 = I915_READ(DMIEC);
  1209. count2 = I915_READ(DDREC);
  1210. count3 = I915_READ(CSIEC);
  1211. total_count = count1 + count2 + count3;
  1212. /* FIXME: handle per-counter overflow */
  1213. if (total_count < dev_priv->last_count1) {
  1214. diff = ~0UL - dev_priv->last_count1;
  1215. diff += total_count;
  1216. } else {
  1217. diff = total_count - dev_priv->last_count1;
  1218. }
  1219. for (i = 0; i < ARRAY_SIZE(cparams); i++) {
  1220. if (cparams[i].i == dev_priv->c_m &&
  1221. cparams[i].t == dev_priv->r_t) {
  1222. m = cparams[i].m;
  1223. c = cparams[i].c;
  1224. break;
  1225. }
  1226. }
  1227. diff = div_u64(diff, diff1);
  1228. ret = ((m * diff) + c);
  1229. ret = div_u64(ret, 10);
  1230. dev_priv->last_count1 = total_count;
  1231. dev_priv->last_time1 = now;
  1232. return ret;
  1233. }
  1234. unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
  1235. {
  1236. unsigned long m, x, b;
  1237. u32 tsfs;
  1238. tsfs = I915_READ(TSFS);
  1239. m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
  1240. x = I915_READ8(TR1);
  1241. b = tsfs & TSFS_INTR_MASK;
  1242. return ((m * x) / 127) - b;
  1243. }
  1244. static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
  1245. {
  1246. static const struct v_table {
  1247. u16 vd; /* in .1 mil */
  1248. u16 vm; /* in .1 mil */
  1249. } v_table[] = {
  1250. { 0, 0, },
  1251. { 375, 0, },
  1252. { 500, 0, },
  1253. { 625, 0, },
  1254. { 750, 0, },
  1255. { 875, 0, },
  1256. { 1000, 0, },
  1257. { 1125, 0, },
  1258. { 4125, 3000, },
  1259. { 4125, 3000, },
  1260. { 4125, 3000, },
  1261. { 4125, 3000, },
  1262. { 4125, 3000, },
  1263. { 4125, 3000, },
  1264. { 4125, 3000, },
  1265. { 4125, 3000, },
  1266. { 4125, 3000, },
  1267. { 4125, 3000, },
  1268. { 4125, 3000, },
  1269. { 4125, 3000, },
  1270. { 4125, 3000, },
  1271. { 4125, 3000, },
  1272. { 4125, 3000, },
  1273. { 4125, 3000, },
  1274. { 4125, 3000, },
  1275. { 4125, 3000, },
  1276. { 4125, 3000, },
  1277. { 4125, 3000, },
  1278. { 4125, 3000, },
  1279. { 4125, 3000, },
  1280. { 4125, 3000, },
  1281. { 4125, 3000, },
  1282. { 4250, 3125, },
  1283. { 4375, 3250, },
  1284. { 4500, 3375, },
  1285. { 4625, 3500, },
  1286. { 4750, 3625, },
  1287. { 4875, 3750, },
  1288. { 5000, 3875, },
  1289. { 5125, 4000, },
  1290. { 5250, 4125, },
  1291. { 5375, 4250, },
  1292. { 5500, 4375, },
  1293. { 5625, 4500, },
  1294. { 5750, 4625, },
  1295. { 5875, 4750, },
  1296. { 6000, 4875, },
  1297. { 6125, 5000, },
  1298. { 6250, 5125, },
  1299. { 6375, 5250, },
  1300. { 6500, 5375, },
  1301. { 6625, 5500, },
  1302. { 6750, 5625, },
  1303. { 6875, 5750, },
  1304. { 7000, 5875, },
  1305. { 7125, 6000, },
  1306. { 7250, 6125, },
  1307. { 7375, 6250, },
  1308. { 7500, 6375, },
  1309. { 7625, 6500, },
  1310. { 7750, 6625, },
  1311. { 7875, 6750, },
  1312. { 8000, 6875, },
  1313. { 8125, 7000, },
  1314. { 8250, 7125, },
  1315. { 8375, 7250, },
  1316. { 8500, 7375, },
  1317. { 8625, 7500, },
  1318. { 8750, 7625, },
  1319. { 8875, 7750, },
  1320. { 9000, 7875, },
  1321. { 9125, 8000, },
  1322. { 9250, 8125, },
  1323. { 9375, 8250, },
  1324. { 9500, 8375, },
  1325. { 9625, 8500, },
  1326. { 9750, 8625, },
  1327. { 9875, 8750, },
  1328. { 10000, 8875, },
  1329. { 10125, 9000, },
  1330. { 10250, 9125, },
  1331. { 10375, 9250, },
  1332. { 10500, 9375, },
  1333. { 10625, 9500, },
  1334. { 10750, 9625, },
  1335. { 10875, 9750, },
  1336. { 11000, 9875, },
  1337. { 11125, 10000, },
  1338. { 11250, 10125, },
  1339. { 11375, 10250, },
  1340. { 11500, 10375, },
  1341. { 11625, 10500, },
  1342. { 11750, 10625, },
  1343. { 11875, 10750, },
  1344. { 12000, 10875, },
  1345. { 12125, 11000, },
  1346. { 12250, 11125, },
  1347. { 12375, 11250, },
  1348. { 12500, 11375, },
  1349. { 12625, 11500, },
  1350. { 12750, 11625, },
  1351. { 12875, 11750, },
  1352. { 13000, 11875, },
  1353. { 13125, 12000, },
  1354. { 13250, 12125, },
  1355. { 13375, 12250, },
  1356. { 13500, 12375, },
  1357. { 13625, 12500, },
  1358. { 13750, 12625, },
  1359. { 13875, 12750, },
  1360. { 14000, 12875, },
  1361. { 14125, 13000, },
  1362. { 14250, 13125, },
  1363. { 14375, 13250, },
  1364. { 14500, 13375, },
  1365. { 14625, 13500, },
  1366. { 14750, 13625, },
  1367. { 14875, 13750, },
  1368. { 15000, 13875, },
  1369. { 15125, 14000, },
  1370. { 15250, 14125, },
  1371. { 15375, 14250, },
  1372. { 15500, 14375, },
  1373. { 15625, 14500, },
  1374. { 15750, 14625, },
  1375. { 15875, 14750, },
  1376. { 16000, 14875, },
  1377. { 16125, 15000, },
  1378. };
  1379. if (dev_priv->info->is_mobile)
  1380. return v_table[pxvid].vm;
  1381. else
  1382. return v_table[pxvid].vd;
  1383. }
  1384. void i915_update_gfx_val(struct drm_i915_private *dev_priv)
  1385. {
  1386. struct timespec now, diff1;
  1387. u64 diff;
  1388. unsigned long diffms;
  1389. u32 count;
  1390. getrawmonotonic(&now);
  1391. diff1 = timespec_sub(now, dev_priv->last_time2);
  1392. /* Don't divide by 0 */
  1393. diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
  1394. if (!diffms)
  1395. return;
  1396. count = I915_READ(GFXEC);
  1397. if (count < dev_priv->last_count2) {
  1398. diff = ~0UL - dev_priv->last_count2;
  1399. diff += count;
  1400. } else {
  1401. diff = count - dev_priv->last_count2;
  1402. }
  1403. dev_priv->last_count2 = count;
  1404. dev_priv->last_time2 = now;
  1405. /* More magic constants... */
  1406. diff = diff * 1181;
  1407. diff = div_u64(diff, diffms * 10);
  1408. dev_priv->gfx_power = diff;
  1409. }
  1410. unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
  1411. {
  1412. unsigned long t, corr, state1, corr2, state2;
  1413. u32 pxvid, ext_v;
  1414. pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
  1415. pxvid = (pxvid >> 24) & 0x7f;
  1416. ext_v = pvid_to_extvid(dev_priv, pxvid);
  1417. state1 = ext_v;
  1418. t = i915_mch_val(dev_priv);
  1419. /* Revel in the empirically derived constants */
  1420. /* Correction factor in 1/100000 units */
  1421. if (t > 80)
  1422. corr = ((t * 2349) + 135940);
  1423. else if (t >= 50)
  1424. corr = ((t * 964) + 29317);
  1425. else /* < 50 */
  1426. corr = ((t * 301) + 1004);
  1427. corr = corr * ((150142 * state1) / 10000 - 78642);
  1428. corr /= 100000;
  1429. corr2 = (corr * dev_priv->corr);
  1430. state2 = (corr2 * state1) / 10000;
  1431. state2 /= 100; /* convert to mW */
  1432. i915_update_gfx_val(dev_priv);
  1433. return dev_priv->gfx_power + state2;
  1434. }
  1435. /* Global for IPS driver to get at the current i915 device */
  1436. static struct drm_i915_private *i915_mch_dev;
  1437. /*
  1438. * Lock protecting IPS related data structures
  1439. * - i915_mch_dev
  1440. * - dev_priv->max_delay
  1441. * - dev_priv->min_delay
  1442. * - dev_priv->fmax
  1443. * - dev_priv->gpu_busy
  1444. */
  1445. static DEFINE_SPINLOCK(mchdev_lock);
  1446. /**
  1447. * i915_read_mch_val - return value for IPS use
  1448. *
  1449. * Calculate and return a value for the IPS driver to use when deciding whether
  1450. * we have thermal and power headroom to increase CPU or GPU power budget.
  1451. */
  1452. unsigned long i915_read_mch_val(void)
  1453. {
  1454. struct drm_i915_private *dev_priv;
  1455. unsigned long chipset_val, graphics_val, ret = 0;
  1456. spin_lock(&mchdev_lock);
  1457. if (!i915_mch_dev)
  1458. goto out_unlock;
  1459. dev_priv = i915_mch_dev;
  1460. chipset_val = i915_chipset_val(dev_priv);
  1461. graphics_val = i915_gfx_val(dev_priv);
  1462. ret = chipset_val + graphics_val;
  1463. out_unlock:
  1464. spin_unlock(&mchdev_lock);
  1465. return ret;
  1466. }
  1467. EXPORT_SYMBOL_GPL(i915_read_mch_val);
  1468. /**
  1469. * i915_gpu_raise - raise GPU frequency limit
  1470. *
  1471. * Raise the limit; IPS indicates we have thermal headroom.
  1472. */
  1473. bool i915_gpu_raise(void)
  1474. {
  1475. struct drm_i915_private *dev_priv;
  1476. bool ret = true;
  1477. spin_lock(&mchdev_lock);
  1478. if (!i915_mch_dev) {
  1479. ret = false;
  1480. goto out_unlock;
  1481. }
  1482. dev_priv = i915_mch_dev;
  1483. if (dev_priv->max_delay > dev_priv->fmax)
  1484. dev_priv->max_delay--;
  1485. out_unlock:
  1486. spin_unlock(&mchdev_lock);
  1487. return ret;
  1488. }
  1489. EXPORT_SYMBOL_GPL(i915_gpu_raise);
  1490. /**
  1491. * i915_gpu_lower - lower GPU frequency limit
  1492. *
  1493. * IPS indicates we're close to a thermal limit, so throttle back the GPU
  1494. * frequency maximum.
  1495. */
  1496. bool i915_gpu_lower(void)
  1497. {
  1498. struct drm_i915_private *dev_priv;
  1499. bool ret = true;
  1500. spin_lock(&mchdev_lock);
  1501. if (!i915_mch_dev) {
  1502. ret = false;
  1503. goto out_unlock;
  1504. }
  1505. dev_priv = i915_mch_dev;
  1506. if (dev_priv->max_delay < dev_priv->min_delay)
  1507. dev_priv->max_delay++;
  1508. out_unlock:
  1509. spin_unlock(&mchdev_lock);
  1510. return ret;
  1511. }
  1512. EXPORT_SYMBOL_GPL(i915_gpu_lower);
  1513. /**
  1514. * i915_gpu_busy - indicate GPU business to IPS
  1515. *
  1516. * Tell the IPS driver whether or not the GPU is busy.
  1517. */
  1518. bool i915_gpu_busy(void)
  1519. {
  1520. struct drm_i915_private *dev_priv;
  1521. bool ret = false;
  1522. spin_lock(&mchdev_lock);
  1523. if (!i915_mch_dev)
  1524. goto out_unlock;
  1525. dev_priv = i915_mch_dev;
  1526. ret = dev_priv->busy;
  1527. out_unlock:
  1528. spin_unlock(&mchdev_lock);
  1529. return ret;
  1530. }
  1531. EXPORT_SYMBOL_GPL(i915_gpu_busy);
  1532. /**
  1533. * i915_gpu_turbo_disable - disable graphics turbo
  1534. *
  1535. * Disable graphics turbo by resetting the max frequency and setting the
  1536. * current frequency to the default.
  1537. */
  1538. bool i915_gpu_turbo_disable(void)
  1539. {
  1540. struct drm_i915_private *dev_priv;
  1541. bool ret = true;
  1542. spin_lock(&mchdev_lock);
  1543. if (!i915_mch_dev) {
  1544. ret = false;
  1545. goto out_unlock;
  1546. }
  1547. dev_priv = i915_mch_dev;
  1548. dev_priv->max_delay = dev_priv->fstart;
  1549. if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
  1550. ret = false;
  1551. out_unlock:
  1552. spin_unlock(&mchdev_lock);
  1553. return ret;
  1554. }
  1555. EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
  1556. /**
  1557. * Tells the intel_ips driver that the i915 driver is now loaded, if
  1558. * IPS got loaded first.
  1559. *
  1560. * This awkward dance is so that neither module has to depend on the
  1561. * other in order for IPS to do the appropriate communication of
  1562. * GPU turbo limits to i915.
  1563. */
  1564. static void
  1565. ips_ping_for_i915_load(void)
  1566. {
  1567. void (*link)(void);
  1568. link = symbol_get(ips_link_to_i915_driver);
  1569. if (link) {
  1570. link();
  1571. symbol_put(ips_link_to_i915_driver);
  1572. }
  1573. }
  1574. /**
  1575. * i915_driver_load - setup chip and create an initial config
  1576. * @dev: DRM device
  1577. * @flags: startup flags
  1578. *
  1579. * The driver load routine has to do several things:
  1580. * - drive output discovery via intel_modeset_init()
  1581. * - initialize the memory manager
  1582. * - allocate initial config memory
  1583. * - setup the DRM framebuffer with the allocated memory
  1584. */
  1585. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  1586. {
  1587. struct drm_i915_private *dev_priv;
  1588. int ret = 0, mmio_bar;
  1589. uint32_t agp_size;
  1590. /* i915 has 4 more counters */
  1591. dev->counters += 4;
  1592. dev->types[6] = _DRM_STAT_IRQ;
  1593. dev->types[7] = _DRM_STAT_PRIMARY;
  1594. dev->types[8] = _DRM_STAT_SECONDARY;
  1595. dev->types[9] = _DRM_STAT_DMA;
  1596. dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
  1597. if (dev_priv == NULL)
  1598. return -ENOMEM;
  1599. dev->dev_private = (void *)dev_priv;
  1600. dev_priv->dev = dev;
  1601. dev_priv->info = (struct intel_device_info *) flags;
  1602. if (i915_get_bridge_dev(dev)) {
  1603. ret = -EIO;
  1604. goto free_priv;
  1605. }
  1606. /* overlay on gen2 is broken and can't address above 1G */
  1607. if (IS_GEN2(dev))
  1608. dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
  1609. mmio_bar = IS_GEN2(dev) ? 1 : 0;
  1610. dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
  1611. if (!dev_priv->regs) {
  1612. DRM_ERROR("failed to map registers\n");
  1613. ret = -EIO;
  1614. goto put_bridge;
  1615. }
  1616. dev_priv->mm.gtt = intel_gtt_get();
  1617. if (!dev_priv->mm.gtt) {
  1618. DRM_ERROR("Failed to initialize GTT\n");
  1619. ret = -ENODEV;
  1620. goto out_iomapfree;
  1621. }
  1622. agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  1623. dev_priv->mm.gtt_mapping =
  1624. io_mapping_create_wc(dev->agp->base, agp_size);
  1625. if (dev_priv->mm.gtt_mapping == NULL) {
  1626. ret = -EIO;
  1627. goto out_rmmap;
  1628. }
  1629. /* Set up a WC MTRR for non-PAT systems. This is more common than
  1630. * one would think, because the kernel disables PAT on first
  1631. * generation Core chips because WC PAT gets overridden by a UC
  1632. * MTRR if present. Even if a UC MTRR isn't present.
  1633. */
  1634. dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
  1635. agp_size,
  1636. MTRR_TYPE_WRCOMB, 1);
  1637. if (dev_priv->mm.gtt_mtrr < 0) {
  1638. DRM_INFO("MTRR allocation failed. Graphics "
  1639. "performance may suffer.\n");
  1640. }
  1641. /* The i915 workqueue is primarily used for batched retirement of
  1642. * requests (and thus managing bo) once the task has been completed
  1643. * by the GPU. i915_gem_retire_requests() is called directly when we
  1644. * need high-priority retirement, such as waiting for an explicit
  1645. * bo.
  1646. *
  1647. * It is also used for periodic low-priority events, such as
  1648. * idle-timers and recording error state.
  1649. *
  1650. * All tasks on the workqueue are expected to acquire the dev mutex
  1651. * so there is no point in running more than one instance of the
  1652. * workqueue at any time: max_active = 1 and NON_REENTRANT.
  1653. */
  1654. dev_priv->wq = alloc_workqueue("i915",
  1655. WQ_UNBOUND | WQ_NON_REENTRANT,
  1656. 1);
  1657. if (dev_priv->wq == NULL) {
  1658. DRM_ERROR("Failed to create our workqueue.\n");
  1659. ret = -ENOMEM;
  1660. goto out_iomapfree;
  1661. }
  1662. /* enable GEM by default */
  1663. dev_priv->has_gem = 1;
  1664. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  1665. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  1666. if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
  1667. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  1668. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  1669. }
  1670. /* Try to make sure MCHBAR is enabled before poking at it */
  1671. intel_setup_mchbar(dev);
  1672. intel_setup_gmbus(dev);
  1673. intel_opregion_setup(dev);
  1674. /* Make sure the bios did its job and set up vital registers */
  1675. intel_setup_bios(dev);
  1676. i915_gem_load(dev);
  1677. /* Init HWS */
  1678. if (!I915_NEED_GFX_HWS(dev)) {
  1679. ret = i915_init_phys_hws(dev);
  1680. if (ret)
  1681. goto out_gem_unload;
  1682. }
  1683. if (IS_PINEVIEW(dev))
  1684. i915_pineview_get_mem_freq(dev);
  1685. else if (IS_GEN5(dev))
  1686. i915_ironlake_get_mem_freq(dev);
  1687. /* On the 945G/GM, the chipset reports the MSI capability on the
  1688. * integrated graphics even though the support isn't actually there
  1689. * according to the published specs. It doesn't appear to function
  1690. * correctly in testing on 945G.
  1691. * This may be a side effect of MSI having been made available for PEG
  1692. * and the registers being closely associated.
  1693. *
  1694. * According to chipset errata, on the 965GM, MSI interrupts may
  1695. * be lost or delayed, but we use them anyways to avoid
  1696. * stuck interrupts on some machines.
  1697. */
  1698. if (!IS_I945G(dev) && !IS_I945GM(dev))
  1699. pci_enable_msi(dev->pdev);
  1700. spin_lock_init(&dev_priv->irq_lock);
  1701. spin_lock_init(&dev_priv->error_lock);
  1702. dev_priv->trace_irq_seqno = 0;
  1703. ret = drm_vblank_init(dev, I915_NUM_PIPE);
  1704. if (ret)
  1705. goto out_gem_unload;
  1706. /* Start out suspended */
  1707. dev_priv->mm.suspended = 1;
  1708. intel_detect_pch(dev);
  1709. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1710. ret = i915_load_modeset_init(dev);
  1711. if (ret < 0) {
  1712. DRM_ERROR("failed to init modeset\n");
  1713. goto out_gem_unload;
  1714. }
  1715. }
  1716. /* Must be done after probing outputs */
  1717. intel_opregion_init(dev);
  1718. acpi_video_register();
  1719. setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
  1720. (unsigned long) dev);
  1721. spin_lock(&mchdev_lock);
  1722. i915_mch_dev = dev_priv;
  1723. dev_priv->mchdev_lock = &mchdev_lock;
  1724. spin_unlock(&mchdev_lock);
  1725. ips_ping_for_i915_load();
  1726. return 0;
  1727. out_gem_unload:
  1728. if (dev->pdev->msi_enabled)
  1729. pci_disable_msi(dev->pdev);
  1730. intel_teardown_gmbus(dev);
  1731. intel_teardown_mchbar(dev);
  1732. destroy_workqueue(dev_priv->wq);
  1733. out_iomapfree:
  1734. io_mapping_free(dev_priv->mm.gtt_mapping);
  1735. out_rmmap:
  1736. pci_iounmap(dev->pdev, dev_priv->regs);
  1737. put_bridge:
  1738. pci_dev_put(dev_priv->bridge_dev);
  1739. free_priv:
  1740. kfree(dev_priv);
  1741. return ret;
  1742. }
  1743. int i915_driver_unload(struct drm_device *dev)
  1744. {
  1745. struct drm_i915_private *dev_priv = dev->dev_private;
  1746. int ret;
  1747. spin_lock(&mchdev_lock);
  1748. i915_mch_dev = NULL;
  1749. spin_unlock(&mchdev_lock);
  1750. if (dev_priv->mm.inactive_shrinker.shrink)
  1751. unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1752. mutex_lock(&dev->struct_mutex);
  1753. ret = i915_gpu_idle(dev);
  1754. if (ret)
  1755. DRM_ERROR("failed to idle hardware: %d\n", ret);
  1756. mutex_unlock(&dev->struct_mutex);
  1757. /* Cancel the retire work handler, which should be idle now. */
  1758. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  1759. io_mapping_free(dev_priv->mm.gtt_mapping);
  1760. if (dev_priv->mm.gtt_mtrr >= 0) {
  1761. mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
  1762. dev->agp->agp_info.aper_size * 1024 * 1024);
  1763. dev_priv->mm.gtt_mtrr = -1;
  1764. }
  1765. acpi_video_unregister();
  1766. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1767. intel_fbdev_fini(dev);
  1768. intel_modeset_cleanup(dev);
  1769. /*
  1770. * free the memory space allocated for the child device
  1771. * config parsed from VBT
  1772. */
  1773. if (dev_priv->child_dev && dev_priv->child_dev_num) {
  1774. kfree(dev_priv->child_dev);
  1775. dev_priv->child_dev = NULL;
  1776. dev_priv->child_dev_num = 0;
  1777. }
  1778. vga_switcheroo_unregister_client(dev->pdev);
  1779. vga_client_register(dev->pdev, NULL, NULL, NULL);
  1780. }
  1781. /* Free error state after interrupts are fully disabled. */
  1782. del_timer_sync(&dev_priv->hangcheck_timer);
  1783. cancel_work_sync(&dev_priv->error_work);
  1784. i915_destroy_error_state(dev);
  1785. if (dev->pdev->msi_enabled)
  1786. pci_disable_msi(dev->pdev);
  1787. intel_opregion_fini(dev);
  1788. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1789. /* Flush any outstanding unpin_work. */
  1790. flush_workqueue(dev_priv->wq);
  1791. i915_gem_free_all_phys_object(dev);
  1792. mutex_lock(&dev->struct_mutex);
  1793. i915_gem_cleanup_ringbuffer(dev);
  1794. mutex_unlock(&dev->struct_mutex);
  1795. if (I915_HAS_FBC(dev) && i915_powersave)
  1796. i915_cleanup_compression(dev);
  1797. drm_mm_takedown(&dev_priv->mm.stolen);
  1798. intel_cleanup_overlay(dev);
  1799. if (!I915_NEED_GFX_HWS(dev))
  1800. i915_free_hws(dev);
  1801. }
  1802. if (dev_priv->regs != NULL)
  1803. pci_iounmap(dev->pdev, dev_priv->regs);
  1804. intel_teardown_gmbus(dev);
  1805. intel_teardown_mchbar(dev);
  1806. destroy_workqueue(dev_priv->wq);
  1807. pci_dev_put(dev_priv->bridge_dev);
  1808. kfree(dev->dev_private);
  1809. return 0;
  1810. }
  1811. int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  1812. {
  1813. struct drm_i915_file_private *file_priv;
  1814. DRM_DEBUG_DRIVER("\n");
  1815. file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
  1816. if (!file_priv)
  1817. return -ENOMEM;
  1818. file->driver_priv = file_priv;
  1819. spin_lock_init(&file_priv->mm.lock);
  1820. INIT_LIST_HEAD(&file_priv->mm.request_list);
  1821. return 0;
  1822. }
  1823. /**
  1824. * i915_driver_lastclose - clean up after all DRM clients have exited
  1825. * @dev: DRM device
  1826. *
  1827. * Take care of cleaning up after all DRM clients have exited. In the
  1828. * mode setting case, we want to restore the kernel's initial mode (just
  1829. * in case the last client left us in a bad state).
  1830. *
  1831. * Additionally, in the non-mode setting case, we'll tear down the AGP
  1832. * and DMA structures, since the kernel won't be using them, and clea
  1833. * up any GEM state.
  1834. */
  1835. void i915_driver_lastclose(struct drm_device * dev)
  1836. {
  1837. drm_i915_private_t *dev_priv = dev->dev_private;
  1838. if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
  1839. drm_fb_helper_restore();
  1840. vga_switcheroo_process_delayed_switch();
  1841. return;
  1842. }
  1843. i915_gem_lastclose(dev);
  1844. if (dev_priv->agp_heap)
  1845. i915_mem_takedown(&(dev_priv->agp_heap));
  1846. i915_dma_cleanup(dev);
  1847. }
  1848. void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
  1849. {
  1850. drm_i915_private_t *dev_priv = dev->dev_private;
  1851. i915_gem_release(dev, file_priv);
  1852. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  1853. i915_mem_release(dev, file_priv, dev_priv->agp_heap);
  1854. }
  1855. void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  1856. {
  1857. struct drm_i915_file_private *file_priv = file->driver_priv;
  1858. kfree(file_priv);
  1859. }
  1860. struct drm_ioctl_desc i915_ioctls[] = {
  1861. DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1862. DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
  1863. DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
  1864. DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
  1865. DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
  1866. DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
  1867. DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
  1868. DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1869. DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
  1870. DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
  1871. DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1872. DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
  1873. DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1874. DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1875. DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
  1876. DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
  1877. DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1878. DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1879. DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
  1880. DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
  1881. DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1882. DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1883. DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
  1884. DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
  1885. DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1886. DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1887. DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
  1888. DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
  1889. DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
  1890. DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
  1891. DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
  1892. DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
  1893. DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
  1894. DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
  1895. DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
  1896. DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
  1897. DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
  1898. DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
  1899. DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1900. DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1901. };
  1902. int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
  1903. /**
  1904. * Determine if the device really is AGP or not.
  1905. *
  1906. * All Intel graphics chipsets are treated as AGP, even if they are really
  1907. * PCI-e.
  1908. *
  1909. * \param dev The device to be tested.
  1910. *
  1911. * \returns
  1912. * A value of 1 is always retured to indictate every i9x5 is AGP.
  1913. */
  1914. int i915_driver_device_is_agp(struct drm_device * dev)
  1915. {
  1916. return 1;
  1917. }