i915_gem.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include <linux/swap.h>
  32. #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  33. static void
  34. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
  35. uint32_t read_domains,
  36. uint32_t write_domain);
  37. static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
  38. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  39. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  40. static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
  41. int write);
  42. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  43. int write);
  44. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  45. uint64_t offset,
  46. uint64_t size);
  47. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  48. static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
  49. static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
  50. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  51. static void
  52. i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  53. int
  54. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  55. struct drm_file *file_priv)
  56. {
  57. drm_i915_private_t *dev_priv = dev->dev_private;
  58. struct drm_i915_gem_init *args = data;
  59. mutex_lock(&dev->struct_mutex);
  60. if (args->gtt_start >= args->gtt_end ||
  61. (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
  62. (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
  63. mutex_unlock(&dev->struct_mutex);
  64. return -EINVAL;
  65. }
  66. drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
  67. args->gtt_end - args->gtt_start);
  68. dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
  69. mutex_unlock(&dev->struct_mutex);
  70. return 0;
  71. }
  72. int
  73. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  74. struct drm_file *file_priv)
  75. {
  76. struct drm_i915_gem_get_aperture *args = data;
  77. if (!(dev->driver->driver_features & DRIVER_GEM))
  78. return -ENODEV;
  79. args->aper_size = dev->gtt_total;
  80. args->aper_available_size = (args->aper_size -
  81. atomic_read(&dev->pin_memory));
  82. return 0;
  83. }
  84. /**
  85. * Creates a new mm object and returns a handle to it.
  86. */
  87. int
  88. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  89. struct drm_file *file_priv)
  90. {
  91. struct drm_i915_gem_create *args = data;
  92. struct drm_gem_object *obj;
  93. int handle, ret;
  94. args->size = roundup(args->size, PAGE_SIZE);
  95. /* Allocate the new object */
  96. obj = drm_gem_object_alloc(dev, args->size);
  97. if (obj == NULL)
  98. return -ENOMEM;
  99. ret = drm_gem_handle_create(file_priv, obj, &handle);
  100. mutex_lock(&dev->struct_mutex);
  101. drm_gem_object_handle_unreference(obj);
  102. mutex_unlock(&dev->struct_mutex);
  103. if (ret)
  104. return ret;
  105. args->handle = handle;
  106. return 0;
  107. }
  108. /**
  109. * Reads data from the object referenced by handle.
  110. *
  111. * On error, the contents of *data are undefined.
  112. */
  113. int
  114. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  115. struct drm_file *file_priv)
  116. {
  117. struct drm_i915_gem_pread *args = data;
  118. struct drm_gem_object *obj;
  119. struct drm_i915_gem_object *obj_priv;
  120. ssize_t read;
  121. loff_t offset;
  122. int ret;
  123. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  124. if (obj == NULL)
  125. return -EBADF;
  126. obj_priv = obj->driver_private;
  127. /* Bounds check source.
  128. *
  129. * XXX: This could use review for overflow issues...
  130. */
  131. if (args->offset > obj->size || args->size > obj->size ||
  132. args->offset + args->size > obj->size) {
  133. drm_gem_object_unreference(obj);
  134. return -EINVAL;
  135. }
  136. mutex_lock(&dev->struct_mutex);
  137. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  138. args->size);
  139. if (ret != 0) {
  140. drm_gem_object_unreference(obj);
  141. mutex_unlock(&dev->struct_mutex);
  142. return ret;
  143. }
  144. offset = args->offset;
  145. read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
  146. args->size, &offset);
  147. if (read != args->size) {
  148. drm_gem_object_unreference(obj);
  149. mutex_unlock(&dev->struct_mutex);
  150. if (read < 0)
  151. return read;
  152. else
  153. return -EINVAL;
  154. }
  155. drm_gem_object_unreference(obj);
  156. mutex_unlock(&dev->struct_mutex);
  157. return 0;
  158. }
  159. /* This is the fast write path which cannot handle
  160. * page faults in the source data
  161. */
  162. static inline int
  163. fast_user_write(struct io_mapping *mapping,
  164. loff_t page_base, int page_offset,
  165. char __user *user_data,
  166. int length)
  167. {
  168. char *vaddr_atomic;
  169. unsigned long unwritten;
  170. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  171. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  172. user_data, length);
  173. io_mapping_unmap_atomic(vaddr_atomic);
  174. if (unwritten)
  175. return -EFAULT;
  176. return 0;
  177. }
  178. /* Here's the write path which can sleep for
  179. * page faults
  180. */
  181. static inline int
  182. slow_user_write(struct io_mapping *mapping,
  183. loff_t page_base, int page_offset,
  184. char __user *user_data,
  185. int length)
  186. {
  187. char __iomem *vaddr;
  188. unsigned long unwritten;
  189. vaddr = io_mapping_map_wc(mapping, page_base);
  190. if (vaddr == NULL)
  191. return -EFAULT;
  192. unwritten = __copy_from_user(vaddr + page_offset,
  193. user_data, length);
  194. io_mapping_unmap(vaddr);
  195. if (unwritten)
  196. return -EFAULT;
  197. return 0;
  198. }
  199. static int
  200. i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  201. struct drm_i915_gem_pwrite *args,
  202. struct drm_file *file_priv)
  203. {
  204. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  205. drm_i915_private_t *dev_priv = dev->dev_private;
  206. ssize_t remain;
  207. loff_t offset, page_base;
  208. char __user *user_data;
  209. int page_offset, page_length;
  210. int ret;
  211. user_data = (char __user *) (uintptr_t) args->data_ptr;
  212. remain = args->size;
  213. if (!access_ok(VERIFY_READ, user_data, remain))
  214. return -EFAULT;
  215. mutex_lock(&dev->struct_mutex);
  216. ret = i915_gem_object_pin(obj, 0);
  217. if (ret) {
  218. mutex_unlock(&dev->struct_mutex);
  219. return ret;
  220. }
  221. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  222. if (ret)
  223. goto fail;
  224. obj_priv = obj->driver_private;
  225. offset = obj_priv->gtt_offset + args->offset;
  226. obj_priv->dirty = 1;
  227. while (remain > 0) {
  228. /* Operation in this page
  229. *
  230. * page_base = page offset within aperture
  231. * page_offset = offset within page
  232. * page_length = bytes to copy for this page
  233. */
  234. page_base = (offset & ~(PAGE_SIZE-1));
  235. page_offset = offset & (PAGE_SIZE-1);
  236. page_length = remain;
  237. if ((page_offset + remain) > PAGE_SIZE)
  238. page_length = PAGE_SIZE - page_offset;
  239. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  240. page_offset, user_data, page_length);
  241. /* If we get a fault while copying data, then (presumably) our
  242. * source page isn't available. In this case, use the
  243. * non-atomic function
  244. */
  245. if (ret) {
  246. ret = slow_user_write (dev_priv->mm.gtt_mapping,
  247. page_base, page_offset,
  248. user_data, page_length);
  249. if (ret)
  250. goto fail;
  251. }
  252. remain -= page_length;
  253. user_data += page_length;
  254. offset += page_length;
  255. }
  256. fail:
  257. i915_gem_object_unpin(obj);
  258. mutex_unlock(&dev->struct_mutex);
  259. return ret;
  260. }
  261. static int
  262. i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  263. struct drm_i915_gem_pwrite *args,
  264. struct drm_file *file_priv)
  265. {
  266. int ret;
  267. loff_t offset;
  268. ssize_t written;
  269. mutex_lock(&dev->struct_mutex);
  270. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  271. if (ret) {
  272. mutex_unlock(&dev->struct_mutex);
  273. return ret;
  274. }
  275. offset = args->offset;
  276. written = vfs_write(obj->filp,
  277. (char __user *)(uintptr_t) args->data_ptr,
  278. args->size, &offset);
  279. if (written != args->size) {
  280. mutex_unlock(&dev->struct_mutex);
  281. if (written < 0)
  282. return written;
  283. else
  284. return -EINVAL;
  285. }
  286. mutex_unlock(&dev->struct_mutex);
  287. return 0;
  288. }
  289. /**
  290. * Writes data to the object referenced by handle.
  291. *
  292. * On error, the contents of the buffer that were to be modified are undefined.
  293. */
  294. int
  295. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  296. struct drm_file *file_priv)
  297. {
  298. struct drm_i915_gem_pwrite *args = data;
  299. struct drm_gem_object *obj;
  300. struct drm_i915_gem_object *obj_priv;
  301. int ret = 0;
  302. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  303. if (obj == NULL)
  304. return -EBADF;
  305. obj_priv = obj->driver_private;
  306. /* Bounds check destination.
  307. *
  308. * XXX: This could use review for overflow issues...
  309. */
  310. if (args->offset > obj->size || args->size > obj->size ||
  311. args->offset + args->size > obj->size) {
  312. drm_gem_object_unreference(obj);
  313. return -EINVAL;
  314. }
  315. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  316. * it would end up going through the fenced access, and we'll get
  317. * different detiling behavior between reading and writing.
  318. * pread/pwrite currently are reading and writing from the CPU
  319. * perspective, requiring manual detiling by the client.
  320. */
  321. if (obj_priv->tiling_mode == I915_TILING_NONE &&
  322. dev->gtt_total != 0)
  323. ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
  324. else
  325. ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
  326. #if WATCH_PWRITE
  327. if (ret)
  328. DRM_INFO("pwrite failed %d\n", ret);
  329. #endif
  330. drm_gem_object_unreference(obj);
  331. return ret;
  332. }
  333. /**
  334. * Called when user space prepares to use an object with the CPU, either
  335. * through the mmap ioctl's mapping or a GTT mapping.
  336. */
  337. int
  338. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  339. struct drm_file *file_priv)
  340. {
  341. struct drm_i915_gem_set_domain *args = data;
  342. struct drm_gem_object *obj;
  343. uint32_t read_domains = args->read_domains;
  344. uint32_t write_domain = args->write_domain;
  345. int ret;
  346. if (!(dev->driver->driver_features & DRIVER_GEM))
  347. return -ENODEV;
  348. /* Only handle setting domains to types used by the CPU. */
  349. if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  350. return -EINVAL;
  351. if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  352. return -EINVAL;
  353. /* Having something in the write domain implies it's in the read
  354. * domain, and only that read domain. Enforce that in the request.
  355. */
  356. if (write_domain != 0 && read_domains != write_domain)
  357. return -EINVAL;
  358. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  359. if (obj == NULL)
  360. return -EBADF;
  361. mutex_lock(&dev->struct_mutex);
  362. #if WATCH_BUF
  363. DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
  364. obj, obj->size, read_domains, write_domain);
  365. #endif
  366. if (read_domains & I915_GEM_DOMAIN_GTT) {
  367. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  368. /* Silently promote "you're not bound, there was nothing to do"
  369. * to success, since the client was just asking us to
  370. * make sure everything was done.
  371. */
  372. if (ret == -EINVAL)
  373. ret = 0;
  374. } else {
  375. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  376. }
  377. drm_gem_object_unreference(obj);
  378. mutex_unlock(&dev->struct_mutex);
  379. return ret;
  380. }
  381. /**
  382. * Called when user space has done writes to this buffer
  383. */
  384. int
  385. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  386. struct drm_file *file_priv)
  387. {
  388. struct drm_i915_gem_sw_finish *args = data;
  389. struct drm_gem_object *obj;
  390. struct drm_i915_gem_object *obj_priv;
  391. int ret = 0;
  392. if (!(dev->driver->driver_features & DRIVER_GEM))
  393. return -ENODEV;
  394. mutex_lock(&dev->struct_mutex);
  395. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  396. if (obj == NULL) {
  397. mutex_unlock(&dev->struct_mutex);
  398. return -EBADF;
  399. }
  400. #if WATCH_BUF
  401. DRM_INFO("%s: sw_finish %d (%p %d)\n",
  402. __func__, args->handle, obj, obj->size);
  403. #endif
  404. obj_priv = obj->driver_private;
  405. /* Pinned buffers may be scanout, so flush the cache */
  406. if (obj_priv->pin_count)
  407. i915_gem_object_flush_cpu_write_domain(obj);
  408. drm_gem_object_unreference(obj);
  409. mutex_unlock(&dev->struct_mutex);
  410. return ret;
  411. }
  412. /**
  413. * Maps the contents of an object, returning the address it is mapped
  414. * into.
  415. *
  416. * While the mapping holds a reference on the contents of the object, it doesn't
  417. * imply a ref on the object itself.
  418. */
  419. int
  420. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  421. struct drm_file *file_priv)
  422. {
  423. struct drm_i915_gem_mmap *args = data;
  424. struct drm_gem_object *obj;
  425. loff_t offset;
  426. unsigned long addr;
  427. if (!(dev->driver->driver_features & DRIVER_GEM))
  428. return -ENODEV;
  429. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  430. if (obj == NULL)
  431. return -EBADF;
  432. offset = args->offset;
  433. down_write(&current->mm->mmap_sem);
  434. addr = do_mmap(obj->filp, 0, args->size,
  435. PROT_READ | PROT_WRITE, MAP_SHARED,
  436. args->offset);
  437. up_write(&current->mm->mmap_sem);
  438. mutex_lock(&dev->struct_mutex);
  439. drm_gem_object_unreference(obj);
  440. mutex_unlock(&dev->struct_mutex);
  441. if (IS_ERR((void *)addr))
  442. return addr;
  443. args->addr_ptr = (uint64_t) addr;
  444. return 0;
  445. }
  446. static void
  447. i915_gem_object_free_page_list(struct drm_gem_object *obj)
  448. {
  449. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  450. int page_count = obj->size / PAGE_SIZE;
  451. int i;
  452. if (obj_priv->page_list == NULL)
  453. return;
  454. for (i = 0; i < page_count; i++)
  455. if (obj_priv->page_list[i] != NULL) {
  456. if (obj_priv->dirty)
  457. set_page_dirty(obj_priv->page_list[i]);
  458. mark_page_accessed(obj_priv->page_list[i]);
  459. page_cache_release(obj_priv->page_list[i]);
  460. }
  461. obj_priv->dirty = 0;
  462. drm_free(obj_priv->page_list,
  463. page_count * sizeof(struct page *),
  464. DRM_MEM_DRIVER);
  465. obj_priv->page_list = NULL;
  466. }
  467. static void
  468. i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
  469. {
  470. struct drm_device *dev = obj->dev;
  471. drm_i915_private_t *dev_priv = dev->dev_private;
  472. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  473. /* Add a reference if we're newly entering the active list. */
  474. if (!obj_priv->active) {
  475. drm_gem_object_reference(obj);
  476. obj_priv->active = 1;
  477. }
  478. /* Move from whatever list we were on to the tail of execution. */
  479. list_move_tail(&obj_priv->list,
  480. &dev_priv->mm.active_list);
  481. obj_priv->last_rendering_seqno = seqno;
  482. }
  483. static void
  484. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  485. {
  486. struct drm_device *dev = obj->dev;
  487. drm_i915_private_t *dev_priv = dev->dev_private;
  488. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  489. BUG_ON(!obj_priv->active);
  490. list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
  491. obj_priv->last_rendering_seqno = 0;
  492. }
  493. static void
  494. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  495. {
  496. struct drm_device *dev = obj->dev;
  497. drm_i915_private_t *dev_priv = dev->dev_private;
  498. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  499. i915_verify_inactive(dev, __FILE__, __LINE__);
  500. if (obj_priv->pin_count != 0)
  501. list_del_init(&obj_priv->list);
  502. else
  503. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  504. obj_priv->last_rendering_seqno = 0;
  505. if (obj_priv->active) {
  506. obj_priv->active = 0;
  507. drm_gem_object_unreference(obj);
  508. }
  509. i915_verify_inactive(dev, __FILE__, __LINE__);
  510. }
  511. /**
  512. * Creates a new sequence number, emitting a write of it to the status page
  513. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  514. *
  515. * Must be called with struct_lock held.
  516. *
  517. * Returned sequence numbers are nonzero on success.
  518. */
  519. static uint32_t
  520. i915_add_request(struct drm_device *dev, uint32_t flush_domains)
  521. {
  522. drm_i915_private_t *dev_priv = dev->dev_private;
  523. struct drm_i915_gem_request *request;
  524. uint32_t seqno;
  525. int was_empty;
  526. RING_LOCALS;
  527. request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
  528. if (request == NULL)
  529. return 0;
  530. /* Grab the seqno we're going to make this request be, and bump the
  531. * next (skipping 0 so it can be the reserved no-seqno value).
  532. */
  533. seqno = dev_priv->mm.next_gem_seqno;
  534. dev_priv->mm.next_gem_seqno++;
  535. if (dev_priv->mm.next_gem_seqno == 0)
  536. dev_priv->mm.next_gem_seqno++;
  537. BEGIN_LP_RING(4);
  538. OUT_RING(MI_STORE_DWORD_INDEX);
  539. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  540. OUT_RING(seqno);
  541. OUT_RING(MI_USER_INTERRUPT);
  542. ADVANCE_LP_RING();
  543. DRM_DEBUG("%d\n", seqno);
  544. request->seqno = seqno;
  545. request->emitted_jiffies = jiffies;
  546. was_empty = list_empty(&dev_priv->mm.request_list);
  547. list_add_tail(&request->list, &dev_priv->mm.request_list);
  548. /* Associate any objects on the flushing list matching the write
  549. * domain we're flushing with our flush.
  550. */
  551. if (flush_domains != 0) {
  552. struct drm_i915_gem_object *obj_priv, *next;
  553. list_for_each_entry_safe(obj_priv, next,
  554. &dev_priv->mm.flushing_list, list) {
  555. struct drm_gem_object *obj = obj_priv->obj;
  556. if ((obj->write_domain & flush_domains) ==
  557. obj->write_domain) {
  558. obj->write_domain = 0;
  559. i915_gem_object_move_to_active(obj, seqno);
  560. }
  561. }
  562. }
  563. if (was_empty && !dev_priv->mm.suspended)
  564. schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
  565. return seqno;
  566. }
  567. /**
  568. * Command execution barrier
  569. *
  570. * Ensures that all commands in the ring are finished
  571. * before signalling the CPU
  572. */
  573. static uint32_t
  574. i915_retire_commands(struct drm_device *dev)
  575. {
  576. drm_i915_private_t *dev_priv = dev->dev_private;
  577. uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  578. uint32_t flush_domains = 0;
  579. RING_LOCALS;
  580. /* The sampler always gets flushed on i965 (sigh) */
  581. if (IS_I965G(dev))
  582. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  583. BEGIN_LP_RING(2);
  584. OUT_RING(cmd);
  585. OUT_RING(0); /* noop */
  586. ADVANCE_LP_RING();
  587. return flush_domains;
  588. }
  589. /**
  590. * Moves buffers associated only with the given active seqno from the active
  591. * to inactive list, potentially freeing them.
  592. */
  593. static void
  594. i915_gem_retire_request(struct drm_device *dev,
  595. struct drm_i915_gem_request *request)
  596. {
  597. drm_i915_private_t *dev_priv = dev->dev_private;
  598. /* Move any buffers on the active list that are no longer referenced
  599. * by the ringbuffer to the flushing/inactive lists as appropriate.
  600. */
  601. while (!list_empty(&dev_priv->mm.active_list)) {
  602. struct drm_gem_object *obj;
  603. struct drm_i915_gem_object *obj_priv;
  604. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  605. struct drm_i915_gem_object,
  606. list);
  607. obj = obj_priv->obj;
  608. /* If the seqno being retired doesn't match the oldest in the
  609. * list, then the oldest in the list must still be newer than
  610. * this seqno.
  611. */
  612. if (obj_priv->last_rendering_seqno != request->seqno)
  613. return;
  614. #if WATCH_LRU
  615. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  616. __func__, request->seqno, obj);
  617. #endif
  618. if (obj->write_domain != 0)
  619. i915_gem_object_move_to_flushing(obj);
  620. else
  621. i915_gem_object_move_to_inactive(obj);
  622. }
  623. }
  624. /**
  625. * Returns true if seq1 is later than seq2.
  626. */
  627. static int
  628. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  629. {
  630. return (int32_t)(seq1 - seq2) >= 0;
  631. }
  632. uint32_t
  633. i915_get_gem_seqno(struct drm_device *dev)
  634. {
  635. drm_i915_private_t *dev_priv = dev->dev_private;
  636. return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
  637. }
  638. /**
  639. * This function clears the request list as sequence numbers are passed.
  640. */
  641. void
  642. i915_gem_retire_requests(struct drm_device *dev)
  643. {
  644. drm_i915_private_t *dev_priv = dev->dev_private;
  645. uint32_t seqno;
  646. seqno = i915_get_gem_seqno(dev);
  647. while (!list_empty(&dev_priv->mm.request_list)) {
  648. struct drm_i915_gem_request *request;
  649. uint32_t retiring_seqno;
  650. request = list_first_entry(&dev_priv->mm.request_list,
  651. struct drm_i915_gem_request,
  652. list);
  653. retiring_seqno = request->seqno;
  654. if (i915_seqno_passed(seqno, retiring_seqno) ||
  655. dev_priv->mm.wedged) {
  656. i915_gem_retire_request(dev, request);
  657. list_del(&request->list);
  658. drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
  659. } else
  660. break;
  661. }
  662. }
  663. void
  664. i915_gem_retire_work_handler(struct work_struct *work)
  665. {
  666. drm_i915_private_t *dev_priv;
  667. struct drm_device *dev;
  668. dev_priv = container_of(work, drm_i915_private_t,
  669. mm.retire_work.work);
  670. dev = dev_priv->dev;
  671. mutex_lock(&dev->struct_mutex);
  672. i915_gem_retire_requests(dev);
  673. if (!dev_priv->mm.suspended &&
  674. !list_empty(&dev_priv->mm.request_list))
  675. schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
  676. mutex_unlock(&dev->struct_mutex);
  677. }
  678. /**
  679. * Waits for a sequence number to be signaled, and cleans up the
  680. * request and object lists appropriately for that event.
  681. */
  682. static int
  683. i915_wait_request(struct drm_device *dev, uint32_t seqno)
  684. {
  685. drm_i915_private_t *dev_priv = dev->dev_private;
  686. int ret = 0;
  687. BUG_ON(seqno == 0);
  688. if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
  689. dev_priv->mm.waiting_gem_seqno = seqno;
  690. i915_user_irq_get(dev);
  691. ret = wait_event_interruptible(dev_priv->irq_queue,
  692. i915_seqno_passed(i915_get_gem_seqno(dev),
  693. seqno) ||
  694. dev_priv->mm.wedged);
  695. i915_user_irq_put(dev);
  696. dev_priv->mm.waiting_gem_seqno = 0;
  697. }
  698. if (dev_priv->mm.wedged)
  699. ret = -EIO;
  700. if (ret && ret != -ERESTARTSYS)
  701. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  702. __func__, ret, seqno, i915_get_gem_seqno(dev));
  703. /* Directly dispatch request retiring. While we have the work queue
  704. * to handle this, the waiter on a request often wants an associated
  705. * buffer to have made it to the inactive list, and we would need
  706. * a separate wait queue to handle that.
  707. */
  708. if (ret == 0)
  709. i915_gem_retire_requests(dev);
  710. return ret;
  711. }
  712. static void
  713. i915_gem_flush(struct drm_device *dev,
  714. uint32_t invalidate_domains,
  715. uint32_t flush_domains)
  716. {
  717. drm_i915_private_t *dev_priv = dev->dev_private;
  718. uint32_t cmd;
  719. RING_LOCALS;
  720. #if WATCH_EXEC
  721. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  722. invalidate_domains, flush_domains);
  723. #endif
  724. if (flush_domains & I915_GEM_DOMAIN_CPU)
  725. drm_agp_chipset_flush(dev);
  726. if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
  727. I915_GEM_DOMAIN_GTT)) {
  728. /*
  729. * read/write caches:
  730. *
  731. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  732. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  733. * also flushed at 2d versus 3d pipeline switches.
  734. *
  735. * read-only caches:
  736. *
  737. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  738. * MI_READ_FLUSH is set, and is always flushed on 965.
  739. *
  740. * I915_GEM_DOMAIN_COMMAND may not exist?
  741. *
  742. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  743. * invalidated when MI_EXE_FLUSH is set.
  744. *
  745. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  746. * invalidated with every MI_FLUSH.
  747. *
  748. * TLBs:
  749. *
  750. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  751. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  752. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  753. * are flushed at any MI_FLUSH.
  754. */
  755. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  756. if ((invalidate_domains|flush_domains) &
  757. I915_GEM_DOMAIN_RENDER)
  758. cmd &= ~MI_NO_WRITE_FLUSH;
  759. if (!IS_I965G(dev)) {
  760. /*
  761. * On the 965, the sampler cache always gets flushed
  762. * and this bit is reserved.
  763. */
  764. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  765. cmd |= MI_READ_FLUSH;
  766. }
  767. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  768. cmd |= MI_EXE_FLUSH;
  769. #if WATCH_EXEC
  770. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  771. #endif
  772. BEGIN_LP_RING(2);
  773. OUT_RING(cmd);
  774. OUT_RING(0); /* noop */
  775. ADVANCE_LP_RING();
  776. }
  777. }
  778. /**
  779. * Ensures that all rendering to the object has completed and the object is
  780. * safe to unbind from the GTT or access from the CPU.
  781. */
  782. static int
  783. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  784. {
  785. struct drm_device *dev = obj->dev;
  786. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  787. int ret;
  788. /* This function only exists to support waiting for existing rendering,
  789. * not for emitting required flushes.
  790. */
  791. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  792. /* If there is rendering queued on the buffer being evicted, wait for
  793. * it.
  794. */
  795. if (obj_priv->active) {
  796. #if WATCH_BUF
  797. DRM_INFO("%s: object %p wait for seqno %08x\n",
  798. __func__, obj, obj_priv->last_rendering_seqno);
  799. #endif
  800. ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
  801. if (ret != 0)
  802. return ret;
  803. }
  804. return 0;
  805. }
  806. /**
  807. * Unbinds an object from the GTT aperture.
  808. */
  809. static int
  810. i915_gem_object_unbind(struct drm_gem_object *obj)
  811. {
  812. struct drm_device *dev = obj->dev;
  813. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  814. int ret = 0;
  815. #if WATCH_BUF
  816. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  817. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  818. #endif
  819. if (obj_priv->gtt_space == NULL)
  820. return 0;
  821. if (obj_priv->pin_count != 0) {
  822. DRM_ERROR("Attempting to unbind pinned buffer\n");
  823. return -EINVAL;
  824. }
  825. /* Move the object to the CPU domain to ensure that
  826. * any possible CPU writes while it's not in the GTT
  827. * are flushed when we go to remap it. This will
  828. * also ensure that all pending GPU writes are finished
  829. * before we unbind.
  830. */
  831. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  832. if (ret) {
  833. if (ret != -ERESTARTSYS)
  834. DRM_ERROR("set_domain failed: %d\n", ret);
  835. return ret;
  836. }
  837. if (obj_priv->agp_mem != NULL) {
  838. drm_unbind_agp(obj_priv->agp_mem);
  839. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  840. obj_priv->agp_mem = NULL;
  841. }
  842. BUG_ON(obj_priv->active);
  843. i915_gem_object_free_page_list(obj);
  844. if (obj_priv->gtt_space) {
  845. atomic_dec(&dev->gtt_count);
  846. atomic_sub(obj->size, &dev->gtt_memory);
  847. drm_mm_put_block(obj_priv->gtt_space);
  848. obj_priv->gtt_space = NULL;
  849. }
  850. /* Remove ourselves from the LRU list if present. */
  851. if (!list_empty(&obj_priv->list))
  852. list_del_init(&obj_priv->list);
  853. return 0;
  854. }
  855. static int
  856. i915_gem_evict_something(struct drm_device *dev)
  857. {
  858. drm_i915_private_t *dev_priv = dev->dev_private;
  859. struct drm_gem_object *obj;
  860. struct drm_i915_gem_object *obj_priv;
  861. int ret = 0;
  862. for (;;) {
  863. /* If there's an inactive buffer available now, grab it
  864. * and be done.
  865. */
  866. if (!list_empty(&dev_priv->mm.inactive_list)) {
  867. obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
  868. struct drm_i915_gem_object,
  869. list);
  870. obj = obj_priv->obj;
  871. BUG_ON(obj_priv->pin_count != 0);
  872. #if WATCH_LRU
  873. DRM_INFO("%s: evicting %p\n", __func__, obj);
  874. #endif
  875. BUG_ON(obj_priv->active);
  876. /* Wait on the rendering and unbind the buffer. */
  877. ret = i915_gem_object_unbind(obj);
  878. break;
  879. }
  880. /* If we didn't get anything, but the ring is still processing
  881. * things, wait for one of those things to finish and hopefully
  882. * leave us a buffer to evict.
  883. */
  884. if (!list_empty(&dev_priv->mm.request_list)) {
  885. struct drm_i915_gem_request *request;
  886. request = list_first_entry(&dev_priv->mm.request_list,
  887. struct drm_i915_gem_request,
  888. list);
  889. ret = i915_wait_request(dev, request->seqno);
  890. if (ret)
  891. break;
  892. /* if waiting caused an object to become inactive,
  893. * then loop around and wait for it. Otherwise, we
  894. * assume that waiting freed and unbound something,
  895. * so there should now be some space in the GTT
  896. */
  897. if (!list_empty(&dev_priv->mm.inactive_list))
  898. continue;
  899. break;
  900. }
  901. /* If we didn't have anything on the request list but there
  902. * are buffers awaiting a flush, emit one and try again.
  903. * When we wait on it, those buffers waiting for that flush
  904. * will get moved to inactive.
  905. */
  906. if (!list_empty(&dev_priv->mm.flushing_list)) {
  907. obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
  908. struct drm_i915_gem_object,
  909. list);
  910. obj = obj_priv->obj;
  911. i915_gem_flush(dev,
  912. obj->write_domain,
  913. obj->write_domain);
  914. i915_add_request(dev, obj->write_domain);
  915. obj = NULL;
  916. continue;
  917. }
  918. DRM_ERROR("inactive empty %d request empty %d "
  919. "flushing empty %d\n",
  920. list_empty(&dev_priv->mm.inactive_list),
  921. list_empty(&dev_priv->mm.request_list),
  922. list_empty(&dev_priv->mm.flushing_list));
  923. /* If we didn't do any of the above, there's nothing to be done
  924. * and we just can't fit it in.
  925. */
  926. return -ENOMEM;
  927. }
  928. return ret;
  929. }
  930. static int
  931. i915_gem_evict_everything(struct drm_device *dev)
  932. {
  933. int ret;
  934. for (;;) {
  935. ret = i915_gem_evict_something(dev);
  936. if (ret != 0)
  937. break;
  938. }
  939. if (ret == -ENOMEM)
  940. return 0;
  941. return ret;
  942. }
  943. static int
  944. i915_gem_object_get_page_list(struct drm_gem_object *obj)
  945. {
  946. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  947. int page_count, i;
  948. struct address_space *mapping;
  949. struct inode *inode;
  950. struct page *page;
  951. int ret;
  952. if (obj_priv->page_list)
  953. return 0;
  954. /* Get the list of pages out of our struct file. They'll be pinned
  955. * at this point until we release them.
  956. */
  957. page_count = obj->size / PAGE_SIZE;
  958. BUG_ON(obj_priv->page_list != NULL);
  959. obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
  960. DRM_MEM_DRIVER);
  961. if (obj_priv->page_list == NULL) {
  962. DRM_ERROR("Faled to allocate page list\n");
  963. return -ENOMEM;
  964. }
  965. inode = obj->filp->f_path.dentry->d_inode;
  966. mapping = inode->i_mapping;
  967. for (i = 0; i < page_count; i++) {
  968. page = read_mapping_page(mapping, i, NULL);
  969. if (IS_ERR(page)) {
  970. ret = PTR_ERR(page);
  971. DRM_ERROR("read_mapping_page failed: %d\n", ret);
  972. i915_gem_object_free_page_list(obj);
  973. return ret;
  974. }
  975. obj_priv->page_list[i] = page;
  976. }
  977. return 0;
  978. }
  979. /**
  980. * Finds free space in the GTT aperture and binds the object there.
  981. */
  982. static int
  983. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  984. {
  985. struct drm_device *dev = obj->dev;
  986. drm_i915_private_t *dev_priv = dev->dev_private;
  987. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  988. struct drm_mm_node *free_space;
  989. int page_count, ret;
  990. if (alignment == 0)
  991. alignment = PAGE_SIZE;
  992. if (alignment & (PAGE_SIZE - 1)) {
  993. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  994. return -EINVAL;
  995. }
  996. search_free:
  997. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  998. obj->size, alignment, 0);
  999. if (free_space != NULL) {
  1000. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  1001. alignment);
  1002. if (obj_priv->gtt_space != NULL) {
  1003. obj_priv->gtt_space->private = obj;
  1004. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  1005. }
  1006. }
  1007. if (obj_priv->gtt_space == NULL) {
  1008. /* If the gtt is empty and we're still having trouble
  1009. * fitting our object in, we're out of memory.
  1010. */
  1011. #if WATCH_LRU
  1012. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  1013. #endif
  1014. if (list_empty(&dev_priv->mm.inactive_list) &&
  1015. list_empty(&dev_priv->mm.flushing_list) &&
  1016. list_empty(&dev_priv->mm.active_list)) {
  1017. DRM_ERROR("GTT full, but LRU list empty\n");
  1018. return -ENOMEM;
  1019. }
  1020. ret = i915_gem_evict_something(dev);
  1021. if (ret != 0) {
  1022. if (ret != -ERESTARTSYS)
  1023. DRM_ERROR("Failed to evict a buffer %d\n", ret);
  1024. return ret;
  1025. }
  1026. goto search_free;
  1027. }
  1028. #if WATCH_BUF
  1029. DRM_INFO("Binding object of size %d at 0x%08x\n",
  1030. obj->size, obj_priv->gtt_offset);
  1031. #endif
  1032. ret = i915_gem_object_get_page_list(obj);
  1033. if (ret) {
  1034. drm_mm_put_block(obj_priv->gtt_space);
  1035. obj_priv->gtt_space = NULL;
  1036. return ret;
  1037. }
  1038. page_count = obj->size / PAGE_SIZE;
  1039. /* Create an AGP memory structure pointing at our pages, and bind it
  1040. * into the GTT.
  1041. */
  1042. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  1043. obj_priv->page_list,
  1044. page_count,
  1045. obj_priv->gtt_offset,
  1046. obj_priv->agp_type);
  1047. if (obj_priv->agp_mem == NULL) {
  1048. i915_gem_object_free_page_list(obj);
  1049. drm_mm_put_block(obj_priv->gtt_space);
  1050. obj_priv->gtt_space = NULL;
  1051. return -ENOMEM;
  1052. }
  1053. atomic_inc(&dev->gtt_count);
  1054. atomic_add(obj->size, &dev->gtt_memory);
  1055. /* Assert that the object is not currently in any GPU domain. As it
  1056. * wasn't in the GTT, there shouldn't be any way it could have been in
  1057. * a GPU cache
  1058. */
  1059. BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
  1060. BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
  1061. return 0;
  1062. }
  1063. void
  1064. i915_gem_clflush_object(struct drm_gem_object *obj)
  1065. {
  1066. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1067. /* If we don't have a page list set up, then we're not pinned
  1068. * to GPU, and we can ignore the cache flush because it'll happen
  1069. * again at bind time.
  1070. */
  1071. if (obj_priv->page_list == NULL)
  1072. return;
  1073. drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
  1074. }
  1075. /** Flushes any GPU write domain for the object if it's dirty. */
  1076. static void
  1077. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
  1078. {
  1079. struct drm_device *dev = obj->dev;
  1080. uint32_t seqno;
  1081. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  1082. return;
  1083. /* Queue the GPU write cache flushing we need. */
  1084. i915_gem_flush(dev, 0, obj->write_domain);
  1085. seqno = i915_add_request(dev, obj->write_domain);
  1086. obj->write_domain = 0;
  1087. i915_gem_object_move_to_active(obj, seqno);
  1088. }
  1089. /** Flushes the GTT write domain for the object if it's dirty. */
  1090. static void
  1091. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  1092. {
  1093. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  1094. return;
  1095. /* No actual flushing is required for the GTT write domain. Writes
  1096. * to it immediately go to main memory as far as we know, so there's
  1097. * no chipset flush. It also doesn't land in render cache.
  1098. */
  1099. obj->write_domain = 0;
  1100. }
  1101. /** Flushes the CPU write domain for the object if it's dirty. */
  1102. static void
  1103. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  1104. {
  1105. struct drm_device *dev = obj->dev;
  1106. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  1107. return;
  1108. i915_gem_clflush_object(obj);
  1109. drm_agp_chipset_flush(dev);
  1110. obj->write_domain = 0;
  1111. }
  1112. /**
  1113. * Moves a single object to the GTT read, and possibly write domain.
  1114. *
  1115. * This function returns when the move is complete, including waiting on
  1116. * flushes to occur.
  1117. */
  1118. static int
  1119. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  1120. {
  1121. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1122. int ret;
  1123. /* Not valid to be called on unbound objects. */
  1124. if (obj_priv->gtt_space == NULL)
  1125. return -EINVAL;
  1126. i915_gem_object_flush_gpu_write_domain(obj);
  1127. /* Wait on any GPU rendering and flushing to occur. */
  1128. ret = i915_gem_object_wait_rendering(obj);
  1129. if (ret != 0)
  1130. return ret;
  1131. /* If we're writing through the GTT domain, then CPU and GPU caches
  1132. * will need to be invalidated at next use.
  1133. */
  1134. if (write)
  1135. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  1136. i915_gem_object_flush_cpu_write_domain(obj);
  1137. /* It should now be out of any other write domains, and we can update
  1138. * the domain values for our changes.
  1139. */
  1140. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  1141. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  1142. if (write) {
  1143. obj->write_domain = I915_GEM_DOMAIN_GTT;
  1144. obj_priv->dirty = 1;
  1145. }
  1146. return 0;
  1147. }
  1148. /**
  1149. * Moves a single object to the CPU read, and possibly write domain.
  1150. *
  1151. * This function returns when the move is complete, including waiting on
  1152. * flushes to occur.
  1153. */
  1154. static int
  1155. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  1156. {
  1157. struct drm_device *dev = obj->dev;
  1158. int ret;
  1159. i915_gem_object_flush_gpu_write_domain(obj);
  1160. /* Wait on any GPU rendering and flushing to occur. */
  1161. ret = i915_gem_object_wait_rendering(obj);
  1162. if (ret != 0)
  1163. return ret;
  1164. i915_gem_object_flush_gtt_write_domain(obj);
  1165. /* If we have a partially-valid cache of the object in the CPU,
  1166. * finish invalidating it and free the per-page flags.
  1167. */
  1168. i915_gem_object_set_to_full_cpu_read_domain(obj);
  1169. /* Flush the CPU cache if it's still invalid. */
  1170. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  1171. i915_gem_clflush_object(obj);
  1172. drm_agp_chipset_flush(dev);
  1173. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  1174. }
  1175. /* It should now be out of any other write domains, and we can update
  1176. * the domain values for our changes.
  1177. */
  1178. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  1179. /* If we're writing through the CPU, then the GPU read domains will
  1180. * need to be invalidated at next use.
  1181. */
  1182. if (write) {
  1183. obj->read_domains &= I915_GEM_DOMAIN_CPU;
  1184. obj->write_domain = I915_GEM_DOMAIN_CPU;
  1185. }
  1186. return 0;
  1187. }
  1188. /*
  1189. * Set the next domain for the specified object. This
  1190. * may not actually perform the necessary flushing/invaliding though,
  1191. * as that may want to be batched with other set_domain operations
  1192. *
  1193. * This is (we hope) the only really tricky part of gem. The goal
  1194. * is fairly simple -- track which caches hold bits of the object
  1195. * and make sure they remain coherent. A few concrete examples may
  1196. * help to explain how it works. For shorthand, we use the notation
  1197. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  1198. * a pair of read and write domain masks.
  1199. *
  1200. * Case 1: the batch buffer
  1201. *
  1202. * 1. Allocated
  1203. * 2. Written by CPU
  1204. * 3. Mapped to GTT
  1205. * 4. Read by GPU
  1206. * 5. Unmapped from GTT
  1207. * 6. Freed
  1208. *
  1209. * Let's take these a step at a time
  1210. *
  1211. * 1. Allocated
  1212. * Pages allocated from the kernel may still have
  1213. * cache contents, so we set them to (CPU, CPU) always.
  1214. * 2. Written by CPU (using pwrite)
  1215. * The pwrite function calls set_domain (CPU, CPU) and
  1216. * this function does nothing (as nothing changes)
  1217. * 3. Mapped by GTT
  1218. * This function asserts that the object is not
  1219. * currently in any GPU-based read or write domains
  1220. * 4. Read by GPU
  1221. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  1222. * As write_domain is zero, this function adds in the
  1223. * current read domains (CPU+COMMAND, 0).
  1224. * flush_domains is set to CPU.
  1225. * invalidate_domains is set to COMMAND
  1226. * clflush is run to get data out of the CPU caches
  1227. * then i915_dev_set_domain calls i915_gem_flush to
  1228. * emit an MI_FLUSH and drm_agp_chipset_flush
  1229. * 5. Unmapped from GTT
  1230. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  1231. * flush_domains and invalidate_domains end up both zero
  1232. * so no flushing/invalidating happens
  1233. * 6. Freed
  1234. * yay, done
  1235. *
  1236. * Case 2: The shared render buffer
  1237. *
  1238. * 1. Allocated
  1239. * 2. Mapped to GTT
  1240. * 3. Read/written by GPU
  1241. * 4. set_domain to (CPU,CPU)
  1242. * 5. Read/written by CPU
  1243. * 6. Read/written by GPU
  1244. *
  1245. * 1. Allocated
  1246. * Same as last example, (CPU, CPU)
  1247. * 2. Mapped to GTT
  1248. * Nothing changes (assertions find that it is not in the GPU)
  1249. * 3. Read/written by GPU
  1250. * execbuffer calls set_domain (RENDER, RENDER)
  1251. * flush_domains gets CPU
  1252. * invalidate_domains gets GPU
  1253. * clflush (obj)
  1254. * MI_FLUSH and drm_agp_chipset_flush
  1255. * 4. set_domain (CPU, CPU)
  1256. * flush_domains gets GPU
  1257. * invalidate_domains gets CPU
  1258. * wait_rendering (obj) to make sure all drawing is complete.
  1259. * This will include an MI_FLUSH to get the data from GPU
  1260. * to memory
  1261. * clflush (obj) to invalidate the CPU cache
  1262. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  1263. * 5. Read/written by CPU
  1264. * cache lines are loaded and dirtied
  1265. * 6. Read written by GPU
  1266. * Same as last GPU access
  1267. *
  1268. * Case 3: The constant buffer
  1269. *
  1270. * 1. Allocated
  1271. * 2. Written by CPU
  1272. * 3. Read by GPU
  1273. * 4. Updated (written) by CPU again
  1274. * 5. Read by GPU
  1275. *
  1276. * 1. Allocated
  1277. * (CPU, CPU)
  1278. * 2. Written by CPU
  1279. * (CPU, CPU)
  1280. * 3. Read by GPU
  1281. * (CPU+RENDER, 0)
  1282. * flush_domains = CPU
  1283. * invalidate_domains = RENDER
  1284. * clflush (obj)
  1285. * MI_FLUSH
  1286. * drm_agp_chipset_flush
  1287. * 4. Updated (written) by CPU again
  1288. * (CPU, CPU)
  1289. * flush_domains = 0 (no previous write domain)
  1290. * invalidate_domains = 0 (no new read domains)
  1291. * 5. Read by GPU
  1292. * (CPU+RENDER, 0)
  1293. * flush_domains = CPU
  1294. * invalidate_domains = RENDER
  1295. * clflush (obj)
  1296. * MI_FLUSH
  1297. * drm_agp_chipset_flush
  1298. */
  1299. static void
  1300. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
  1301. uint32_t read_domains,
  1302. uint32_t write_domain)
  1303. {
  1304. struct drm_device *dev = obj->dev;
  1305. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1306. uint32_t invalidate_domains = 0;
  1307. uint32_t flush_domains = 0;
  1308. BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
  1309. BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
  1310. #if WATCH_BUF
  1311. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  1312. __func__, obj,
  1313. obj->read_domains, read_domains,
  1314. obj->write_domain, write_domain);
  1315. #endif
  1316. /*
  1317. * If the object isn't moving to a new write domain,
  1318. * let the object stay in multiple read domains
  1319. */
  1320. if (write_domain == 0)
  1321. read_domains |= obj->read_domains;
  1322. else
  1323. obj_priv->dirty = 1;
  1324. /*
  1325. * Flush the current write domain if
  1326. * the new read domains don't match. Invalidate
  1327. * any read domains which differ from the old
  1328. * write domain
  1329. */
  1330. if (obj->write_domain && obj->write_domain != read_domains) {
  1331. flush_domains |= obj->write_domain;
  1332. invalidate_domains |= read_domains & ~obj->write_domain;
  1333. }
  1334. /*
  1335. * Invalidate any read caches which may have
  1336. * stale data. That is, any new read domains.
  1337. */
  1338. invalidate_domains |= read_domains & ~obj->read_domains;
  1339. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  1340. #if WATCH_BUF
  1341. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  1342. __func__, flush_domains, invalidate_domains);
  1343. #endif
  1344. i915_gem_clflush_object(obj);
  1345. }
  1346. if ((write_domain | flush_domains) != 0)
  1347. obj->write_domain = write_domain;
  1348. obj->read_domains = read_domains;
  1349. dev->invalidate_domains |= invalidate_domains;
  1350. dev->flush_domains |= flush_domains;
  1351. #if WATCH_BUF
  1352. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  1353. __func__,
  1354. obj->read_domains, obj->write_domain,
  1355. dev->invalidate_domains, dev->flush_domains);
  1356. #endif
  1357. }
  1358. /**
  1359. * Moves the object from a partially CPU read to a full one.
  1360. *
  1361. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  1362. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  1363. */
  1364. static void
  1365. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  1366. {
  1367. struct drm_device *dev = obj->dev;
  1368. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1369. if (!obj_priv->page_cpu_valid)
  1370. return;
  1371. /* If we're partially in the CPU read domain, finish moving it in.
  1372. */
  1373. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  1374. int i;
  1375. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  1376. if (obj_priv->page_cpu_valid[i])
  1377. continue;
  1378. drm_clflush_pages(obj_priv->page_list + i, 1);
  1379. }
  1380. drm_agp_chipset_flush(dev);
  1381. }
  1382. /* Free the page_cpu_valid mappings which are now stale, whether
  1383. * or not we've got I915_GEM_DOMAIN_CPU.
  1384. */
  1385. drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
  1386. DRM_MEM_DRIVER);
  1387. obj_priv->page_cpu_valid = NULL;
  1388. }
  1389. /**
  1390. * Set the CPU read domain on a range of the object.
  1391. *
  1392. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  1393. * not entirely valid. The page_cpu_valid member of the object flags which
  1394. * pages have been flushed, and will be respected by
  1395. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  1396. * of the whole object.
  1397. *
  1398. * This function returns when the move is complete, including waiting on
  1399. * flushes to occur.
  1400. */
  1401. static int
  1402. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  1403. uint64_t offset, uint64_t size)
  1404. {
  1405. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1406. int i, ret;
  1407. if (offset == 0 && size == obj->size)
  1408. return i915_gem_object_set_to_cpu_domain(obj, 0);
  1409. i915_gem_object_flush_gpu_write_domain(obj);
  1410. /* Wait on any GPU rendering and flushing to occur. */
  1411. ret = i915_gem_object_wait_rendering(obj);
  1412. if (ret != 0)
  1413. return ret;
  1414. i915_gem_object_flush_gtt_write_domain(obj);
  1415. /* If we're already fully in the CPU read domain, we're done. */
  1416. if (obj_priv->page_cpu_valid == NULL &&
  1417. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  1418. return 0;
  1419. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  1420. * newly adding I915_GEM_DOMAIN_CPU
  1421. */
  1422. if (obj_priv->page_cpu_valid == NULL) {
  1423. obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
  1424. DRM_MEM_DRIVER);
  1425. if (obj_priv->page_cpu_valid == NULL)
  1426. return -ENOMEM;
  1427. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  1428. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  1429. /* Flush the cache on any pages that are still invalid from the CPU's
  1430. * perspective.
  1431. */
  1432. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  1433. i++) {
  1434. if (obj_priv->page_cpu_valid[i])
  1435. continue;
  1436. drm_clflush_pages(obj_priv->page_list + i, 1);
  1437. obj_priv->page_cpu_valid[i] = 1;
  1438. }
  1439. /* It should now be out of any other write domains, and we can update
  1440. * the domain values for our changes.
  1441. */
  1442. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  1443. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  1444. return 0;
  1445. }
  1446. /**
  1447. * Pin an object to the GTT and evaluate the relocations landing in it.
  1448. */
  1449. static int
  1450. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  1451. struct drm_file *file_priv,
  1452. struct drm_i915_gem_exec_object *entry)
  1453. {
  1454. struct drm_device *dev = obj->dev;
  1455. drm_i915_private_t *dev_priv = dev->dev_private;
  1456. struct drm_i915_gem_relocation_entry reloc;
  1457. struct drm_i915_gem_relocation_entry __user *relocs;
  1458. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1459. int i, ret;
  1460. void __iomem *reloc_page;
  1461. /* Choose the GTT offset for our buffer and put it there. */
  1462. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  1463. if (ret)
  1464. return ret;
  1465. entry->offset = obj_priv->gtt_offset;
  1466. relocs = (struct drm_i915_gem_relocation_entry __user *)
  1467. (uintptr_t) entry->relocs_ptr;
  1468. /* Apply the relocations, using the GTT aperture to avoid cache
  1469. * flushing requirements.
  1470. */
  1471. for (i = 0; i < entry->relocation_count; i++) {
  1472. struct drm_gem_object *target_obj;
  1473. struct drm_i915_gem_object *target_obj_priv;
  1474. uint32_t reloc_val, reloc_offset;
  1475. uint32_t __iomem *reloc_entry;
  1476. ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
  1477. if (ret != 0) {
  1478. i915_gem_object_unpin(obj);
  1479. return ret;
  1480. }
  1481. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  1482. reloc.target_handle);
  1483. if (target_obj == NULL) {
  1484. i915_gem_object_unpin(obj);
  1485. return -EBADF;
  1486. }
  1487. target_obj_priv = target_obj->driver_private;
  1488. /* The target buffer should have appeared before us in the
  1489. * exec_object list, so it should have a GTT space bound by now.
  1490. */
  1491. if (target_obj_priv->gtt_space == NULL) {
  1492. DRM_ERROR("No GTT space found for object %d\n",
  1493. reloc.target_handle);
  1494. drm_gem_object_unreference(target_obj);
  1495. i915_gem_object_unpin(obj);
  1496. return -EINVAL;
  1497. }
  1498. if (reloc.offset > obj->size - 4) {
  1499. DRM_ERROR("Relocation beyond object bounds: "
  1500. "obj %p target %d offset %d size %d.\n",
  1501. obj, reloc.target_handle,
  1502. (int) reloc.offset, (int) obj->size);
  1503. drm_gem_object_unreference(target_obj);
  1504. i915_gem_object_unpin(obj);
  1505. return -EINVAL;
  1506. }
  1507. if (reloc.offset & 3) {
  1508. DRM_ERROR("Relocation not 4-byte aligned: "
  1509. "obj %p target %d offset %d.\n",
  1510. obj, reloc.target_handle,
  1511. (int) reloc.offset);
  1512. drm_gem_object_unreference(target_obj);
  1513. i915_gem_object_unpin(obj);
  1514. return -EINVAL;
  1515. }
  1516. if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
  1517. reloc.read_domains & I915_GEM_DOMAIN_CPU) {
  1518. DRM_ERROR("reloc with read/write CPU domains: "
  1519. "obj %p target %d offset %d "
  1520. "read %08x write %08x",
  1521. obj, reloc.target_handle,
  1522. (int) reloc.offset,
  1523. reloc.read_domains,
  1524. reloc.write_domain);
  1525. return -EINVAL;
  1526. }
  1527. if (reloc.write_domain && target_obj->pending_write_domain &&
  1528. reloc.write_domain != target_obj->pending_write_domain) {
  1529. DRM_ERROR("Write domain conflict: "
  1530. "obj %p target %d offset %d "
  1531. "new %08x old %08x\n",
  1532. obj, reloc.target_handle,
  1533. (int) reloc.offset,
  1534. reloc.write_domain,
  1535. target_obj->pending_write_domain);
  1536. drm_gem_object_unreference(target_obj);
  1537. i915_gem_object_unpin(obj);
  1538. return -EINVAL;
  1539. }
  1540. #if WATCH_RELOC
  1541. DRM_INFO("%s: obj %p offset %08x target %d "
  1542. "read %08x write %08x gtt %08x "
  1543. "presumed %08x delta %08x\n",
  1544. __func__,
  1545. obj,
  1546. (int) reloc.offset,
  1547. (int) reloc.target_handle,
  1548. (int) reloc.read_domains,
  1549. (int) reloc.write_domain,
  1550. (int) target_obj_priv->gtt_offset,
  1551. (int) reloc.presumed_offset,
  1552. reloc.delta);
  1553. #endif
  1554. target_obj->pending_read_domains |= reloc.read_domains;
  1555. target_obj->pending_write_domain |= reloc.write_domain;
  1556. /* If the relocation already has the right value in it, no
  1557. * more work needs to be done.
  1558. */
  1559. if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
  1560. drm_gem_object_unreference(target_obj);
  1561. continue;
  1562. }
  1563. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  1564. if (ret != 0) {
  1565. drm_gem_object_unreference(target_obj);
  1566. i915_gem_object_unpin(obj);
  1567. return -EINVAL;
  1568. }
  1569. /* Map the page containing the relocation we're going to
  1570. * perform.
  1571. */
  1572. reloc_offset = obj_priv->gtt_offset + reloc.offset;
  1573. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  1574. (reloc_offset &
  1575. ~(PAGE_SIZE - 1)));
  1576. reloc_entry = (uint32_t __iomem *)(reloc_page +
  1577. (reloc_offset & (PAGE_SIZE - 1)));
  1578. reloc_val = target_obj_priv->gtt_offset + reloc.delta;
  1579. #if WATCH_BUF
  1580. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  1581. obj, (unsigned int) reloc.offset,
  1582. readl(reloc_entry), reloc_val);
  1583. #endif
  1584. writel(reloc_val, reloc_entry);
  1585. io_mapping_unmap_atomic(reloc_page);
  1586. /* Write the updated presumed offset for this entry back out
  1587. * to the user.
  1588. */
  1589. reloc.presumed_offset = target_obj_priv->gtt_offset;
  1590. ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
  1591. if (ret != 0) {
  1592. drm_gem_object_unreference(target_obj);
  1593. i915_gem_object_unpin(obj);
  1594. return ret;
  1595. }
  1596. drm_gem_object_unreference(target_obj);
  1597. }
  1598. #if WATCH_BUF
  1599. if (0)
  1600. i915_gem_dump_object(obj, 128, __func__, ~0);
  1601. #endif
  1602. return 0;
  1603. }
  1604. /** Dispatch a batchbuffer to the ring
  1605. */
  1606. static int
  1607. i915_dispatch_gem_execbuffer(struct drm_device *dev,
  1608. struct drm_i915_gem_execbuffer *exec,
  1609. uint64_t exec_offset)
  1610. {
  1611. drm_i915_private_t *dev_priv = dev->dev_private;
  1612. struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
  1613. (uintptr_t) exec->cliprects_ptr;
  1614. int nbox = exec->num_cliprects;
  1615. int i = 0, count;
  1616. uint32_t exec_start, exec_len;
  1617. RING_LOCALS;
  1618. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  1619. exec_len = (uint32_t) exec->batch_len;
  1620. if ((exec_start | exec_len) & 0x7) {
  1621. DRM_ERROR("alignment\n");
  1622. return -EINVAL;
  1623. }
  1624. if (!exec_start)
  1625. return -EINVAL;
  1626. count = nbox ? nbox : 1;
  1627. for (i = 0; i < count; i++) {
  1628. if (i < nbox) {
  1629. int ret = i915_emit_box(dev, boxes, i,
  1630. exec->DR1, exec->DR4);
  1631. if (ret)
  1632. return ret;
  1633. }
  1634. if (IS_I830(dev) || IS_845G(dev)) {
  1635. BEGIN_LP_RING(4);
  1636. OUT_RING(MI_BATCH_BUFFER);
  1637. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  1638. OUT_RING(exec_start + exec_len - 4);
  1639. OUT_RING(0);
  1640. ADVANCE_LP_RING();
  1641. } else {
  1642. BEGIN_LP_RING(2);
  1643. if (IS_I965G(dev)) {
  1644. OUT_RING(MI_BATCH_BUFFER_START |
  1645. (2 << 6) |
  1646. MI_BATCH_NON_SECURE_I965);
  1647. OUT_RING(exec_start);
  1648. } else {
  1649. OUT_RING(MI_BATCH_BUFFER_START |
  1650. (2 << 6));
  1651. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  1652. }
  1653. ADVANCE_LP_RING();
  1654. }
  1655. }
  1656. /* XXX breadcrumb */
  1657. return 0;
  1658. }
  1659. /* Throttle our rendering by waiting until the ring has completed our requests
  1660. * emitted over 20 msec ago.
  1661. *
  1662. * This should get us reasonable parallelism between CPU and GPU but also
  1663. * relatively low latency when blocking on a particular request to finish.
  1664. */
  1665. static int
  1666. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  1667. {
  1668. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  1669. int ret = 0;
  1670. uint32_t seqno;
  1671. mutex_lock(&dev->struct_mutex);
  1672. seqno = i915_file_priv->mm.last_gem_throttle_seqno;
  1673. i915_file_priv->mm.last_gem_throttle_seqno =
  1674. i915_file_priv->mm.last_gem_seqno;
  1675. if (seqno)
  1676. ret = i915_wait_request(dev, seqno);
  1677. mutex_unlock(&dev->struct_mutex);
  1678. return ret;
  1679. }
  1680. int
  1681. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1682. struct drm_file *file_priv)
  1683. {
  1684. drm_i915_private_t *dev_priv = dev->dev_private;
  1685. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  1686. struct drm_i915_gem_execbuffer *args = data;
  1687. struct drm_i915_gem_exec_object *exec_list = NULL;
  1688. struct drm_gem_object **object_list = NULL;
  1689. struct drm_gem_object *batch_obj;
  1690. int ret, i, pinned = 0;
  1691. uint64_t exec_offset;
  1692. uint32_t seqno, flush_domains;
  1693. int pin_tries;
  1694. #if WATCH_EXEC
  1695. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  1696. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  1697. #endif
  1698. if (args->buffer_count < 1) {
  1699. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  1700. return -EINVAL;
  1701. }
  1702. /* Copy in the exec list from userland */
  1703. exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
  1704. DRM_MEM_DRIVER);
  1705. object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
  1706. DRM_MEM_DRIVER);
  1707. if (exec_list == NULL || object_list == NULL) {
  1708. DRM_ERROR("Failed to allocate exec or object list "
  1709. "for %d buffers\n",
  1710. args->buffer_count);
  1711. ret = -ENOMEM;
  1712. goto pre_mutex_err;
  1713. }
  1714. ret = copy_from_user(exec_list,
  1715. (struct drm_i915_relocation_entry __user *)
  1716. (uintptr_t) args->buffers_ptr,
  1717. sizeof(*exec_list) * args->buffer_count);
  1718. if (ret != 0) {
  1719. DRM_ERROR("copy %d exec entries failed %d\n",
  1720. args->buffer_count, ret);
  1721. goto pre_mutex_err;
  1722. }
  1723. mutex_lock(&dev->struct_mutex);
  1724. i915_verify_inactive(dev, __FILE__, __LINE__);
  1725. if (dev_priv->mm.wedged) {
  1726. DRM_ERROR("Execbuf while wedged\n");
  1727. mutex_unlock(&dev->struct_mutex);
  1728. return -EIO;
  1729. }
  1730. if (dev_priv->mm.suspended) {
  1731. DRM_ERROR("Execbuf while VT-switched.\n");
  1732. mutex_unlock(&dev->struct_mutex);
  1733. return -EBUSY;
  1734. }
  1735. /* Look up object handles */
  1736. for (i = 0; i < args->buffer_count; i++) {
  1737. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  1738. exec_list[i].handle);
  1739. if (object_list[i] == NULL) {
  1740. DRM_ERROR("Invalid object handle %d at index %d\n",
  1741. exec_list[i].handle, i);
  1742. ret = -EBADF;
  1743. goto err;
  1744. }
  1745. }
  1746. /* Pin and relocate */
  1747. for (pin_tries = 0; ; pin_tries++) {
  1748. ret = 0;
  1749. for (i = 0; i < args->buffer_count; i++) {
  1750. object_list[i]->pending_read_domains = 0;
  1751. object_list[i]->pending_write_domain = 0;
  1752. ret = i915_gem_object_pin_and_relocate(object_list[i],
  1753. file_priv,
  1754. &exec_list[i]);
  1755. if (ret)
  1756. break;
  1757. pinned = i + 1;
  1758. }
  1759. /* success */
  1760. if (ret == 0)
  1761. break;
  1762. /* error other than GTT full, or we've already tried again */
  1763. if (ret != -ENOMEM || pin_tries >= 1) {
  1764. DRM_ERROR("Failed to pin buffers %d\n", ret);
  1765. goto err;
  1766. }
  1767. /* unpin all of our buffers */
  1768. for (i = 0; i < pinned; i++)
  1769. i915_gem_object_unpin(object_list[i]);
  1770. /* evict everyone we can from the aperture */
  1771. ret = i915_gem_evict_everything(dev);
  1772. if (ret)
  1773. goto err;
  1774. }
  1775. /* Set the pending read domains for the batch buffer to COMMAND */
  1776. batch_obj = object_list[args->buffer_count-1];
  1777. batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
  1778. batch_obj->pending_write_domain = 0;
  1779. i915_verify_inactive(dev, __FILE__, __LINE__);
  1780. /* Zero the global flush/invalidate flags. These
  1781. * will be modified as new domains are computed
  1782. * for each object
  1783. */
  1784. dev->invalidate_domains = 0;
  1785. dev->flush_domains = 0;
  1786. for (i = 0; i < args->buffer_count; i++) {
  1787. struct drm_gem_object *obj = object_list[i];
  1788. /* Compute new gpu domains and update invalidate/flush */
  1789. i915_gem_object_set_to_gpu_domain(obj,
  1790. obj->pending_read_domains,
  1791. obj->pending_write_domain);
  1792. }
  1793. i915_verify_inactive(dev, __FILE__, __LINE__);
  1794. if (dev->invalidate_domains | dev->flush_domains) {
  1795. #if WATCH_EXEC
  1796. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  1797. __func__,
  1798. dev->invalidate_domains,
  1799. dev->flush_domains);
  1800. #endif
  1801. i915_gem_flush(dev,
  1802. dev->invalidate_domains,
  1803. dev->flush_domains);
  1804. if (dev->flush_domains)
  1805. (void)i915_add_request(dev, dev->flush_domains);
  1806. }
  1807. i915_verify_inactive(dev, __FILE__, __LINE__);
  1808. #if WATCH_COHERENCY
  1809. for (i = 0; i < args->buffer_count; i++) {
  1810. i915_gem_object_check_coherency(object_list[i],
  1811. exec_list[i].handle);
  1812. }
  1813. #endif
  1814. exec_offset = exec_list[args->buffer_count - 1].offset;
  1815. #if WATCH_EXEC
  1816. i915_gem_dump_object(object_list[args->buffer_count - 1],
  1817. args->batch_len,
  1818. __func__,
  1819. ~0);
  1820. #endif
  1821. /* Exec the batchbuffer */
  1822. ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
  1823. if (ret) {
  1824. DRM_ERROR("dispatch failed %d\n", ret);
  1825. goto err;
  1826. }
  1827. /*
  1828. * Ensure that the commands in the batch buffer are
  1829. * finished before the interrupt fires
  1830. */
  1831. flush_domains = i915_retire_commands(dev);
  1832. i915_verify_inactive(dev, __FILE__, __LINE__);
  1833. /*
  1834. * Get a seqno representing the execution of the current buffer,
  1835. * which we can wait on. We would like to mitigate these interrupts,
  1836. * likely by only creating seqnos occasionally (so that we have
  1837. * *some* interrupts representing completion of buffers that we can
  1838. * wait on when trying to clear up gtt space).
  1839. */
  1840. seqno = i915_add_request(dev, flush_domains);
  1841. BUG_ON(seqno == 0);
  1842. i915_file_priv->mm.last_gem_seqno = seqno;
  1843. for (i = 0; i < args->buffer_count; i++) {
  1844. struct drm_gem_object *obj = object_list[i];
  1845. i915_gem_object_move_to_active(obj, seqno);
  1846. #if WATCH_LRU
  1847. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  1848. #endif
  1849. }
  1850. #if WATCH_LRU
  1851. i915_dump_lru(dev, __func__);
  1852. #endif
  1853. i915_verify_inactive(dev, __FILE__, __LINE__);
  1854. /* Copy the new buffer offsets back to the user's exec list. */
  1855. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  1856. (uintptr_t) args->buffers_ptr,
  1857. exec_list,
  1858. sizeof(*exec_list) * args->buffer_count);
  1859. if (ret)
  1860. DRM_ERROR("failed to copy %d exec entries "
  1861. "back to user (%d)\n",
  1862. args->buffer_count, ret);
  1863. err:
  1864. if (object_list != NULL) {
  1865. for (i = 0; i < pinned; i++)
  1866. i915_gem_object_unpin(object_list[i]);
  1867. for (i = 0; i < args->buffer_count; i++)
  1868. drm_gem_object_unreference(object_list[i]);
  1869. }
  1870. mutex_unlock(&dev->struct_mutex);
  1871. pre_mutex_err:
  1872. drm_free(object_list, sizeof(*object_list) * args->buffer_count,
  1873. DRM_MEM_DRIVER);
  1874. drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
  1875. DRM_MEM_DRIVER);
  1876. return ret;
  1877. }
  1878. int
  1879. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  1880. {
  1881. struct drm_device *dev = obj->dev;
  1882. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1883. int ret;
  1884. i915_verify_inactive(dev, __FILE__, __LINE__);
  1885. if (obj_priv->gtt_space == NULL) {
  1886. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  1887. if (ret != 0) {
  1888. DRM_ERROR("Failure to bind: %d", ret);
  1889. return ret;
  1890. }
  1891. }
  1892. obj_priv->pin_count++;
  1893. /* If the object is not active and not pending a flush,
  1894. * remove it from the inactive list
  1895. */
  1896. if (obj_priv->pin_count == 1) {
  1897. atomic_inc(&dev->pin_count);
  1898. atomic_add(obj->size, &dev->pin_memory);
  1899. if (!obj_priv->active &&
  1900. (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
  1901. I915_GEM_DOMAIN_GTT)) == 0 &&
  1902. !list_empty(&obj_priv->list))
  1903. list_del_init(&obj_priv->list);
  1904. }
  1905. i915_verify_inactive(dev, __FILE__, __LINE__);
  1906. return 0;
  1907. }
  1908. void
  1909. i915_gem_object_unpin(struct drm_gem_object *obj)
  1910. {
  1911. struct drm_device *dev = obj->dev;
  1912. drm_i915_private_t *dev_priv = dev->dev_private;
  1913. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1914. i915_verify_inactive(dev, __FILE__, __LINE__);
  1915. obj_priv->pin_count--;
  1916. BUG_ON(obj_priv->pin_count < 0);
  1917. BUG_ON(obj_priv->gtt_space == NULL);
  1918. /* If the object is no longer pinned, and is
  1919. * neither active nor being flushed, then stick it on
  1920. * the inactive list
  1921. */
  1922. if (obj_priv->pin_count == 0) {
  1923. if (!obj_priv->active &&
  1924. (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
  1925. I915_GEM_DOMAIN_GTT)) == 0)
  1926. list_move_tail(&obj_priv->list,
  1927. &dev_priv->mm.inactive_list);
  1928. atomic_dec(&dev->pin_count);
  1929. atomic_sub(obj->size, &dev->pin_memory);
  1930. }
  1931. i915_verify_inactive(dev, __FILE__, __LINE__);
  1932. }
  1933. int
  1934. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  1935. struct drm_file *file_priv)
  1936. {
  1937. struct drm_i915_gem_pin *args = data;
  1938. struct drm_gem_object *obj;
  1939. struct drm_i915_gem_object *obj_priv;
  1940. int ret;
  1941. mutex_lock(&dev->struct_mutex);
  1942. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1943. if (obj == NULL) {
  1944. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  1945. args->handle);
  1946. mutex_unlock(&dev->struct_mutex);
  1947. return -EBADF;
  1948. }
  1949. obj_priv = obj->driver_private;
  1950. ret = i915_gem_object_pin(obj, args->alignment);
  1951. if (ret != 0) {
  1952. drm_gem_object_unreference(obj);
  1953. mutex_unlock(&dev->struct_mutex);
  1954. return ret;
  1955. }
  1956. /* XXX - flush the CPU caches for pinned objects
  1957. * as the X server doesn't manage domains yet
  1958. */
  1959. i915_gem_object_flush_cpu_write_domain(obj);
  1960. args->offset = obj_priv->gtt_offset;
  1961. drm_gem_object_unreference(obj);
  1962. mutex_unlock(&dev->struct_mutex);
  1963. return 0;
  1964. }
  1965. int
  1966. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  1967. struct drm_file *file_priv)
  1968. {
  1969. struct drm_i915_gem_pin *args = data;
  1970. struct drm_gem_object *obj;
  1971. mutex_lock(&dev->struct_mutex);
  1972. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1973. if (obj == NULL) {
  1974. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  1975. args->handle);
  1976. mutex_unlock(&dev->struct_mutex);
  1977. return -EBADF;
  1978. }
  1979. i915_gem_object_unpin(obj);
  1980. drm_gem_object_unreference(obj);
  1981. mutex_unlock(&dev->struct_mutex);
  1982. return 0;
  1983. }
  1984. int
  1985. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  1986. struct drm_file *file_priv)
  1987. {
  1988. struct drm_i915_gem_busy *args = data;
  1989. struct drm_gem_object *obj;
  1990. struct drm_i915_gem_object *obj_priv;
  1991. mutex_lock(&dev->struct_mutex);
  1992. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1993. if (obj == NULL) {
  1994. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  1995. args->handle);
  1996. mutex_unlock(&dev->struct_mutex);
  1997. return -EBADF;
  1998. }
  1999. obj_priv = obj->driver_private;
  2000. /* Don't count being on the flushing list against the object being
  2001. * done. Otherwise, a buffer left on the flushing list but not getting
  2002. * flushed (because nobody's flushing that domain) won't ever return
  2003. * unbusy and get reused by libdrm's bo cache. The other expected
  2004. * consumer of this interface, OpenGL's occlusion queries, also specs
  2005. * that the objects get unbusy "eventually" without any interference.
  2006. */
  2007. args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
  2008. drm_gem_object_unreference(obj);
  2009. mutex_unlock(&dev->struct_mutex);
  2010. return 0;
  2011. }
  2012. int
  2013. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  2014. struct drm_file *file_priv)
  2015. {
  2016. return i915_gem_ring_throttle(dev, file_priv);
  2017. }
  2018. int i915_gem_init_object(struct drm_gem_object *obj)
  2019. {
  2020. struct drm_i915_gem_object *obj_priv;
  2021. obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
  2022. if (obj_priv == NULL)
  2023. return -ENOMEM;
  2024. /*
  2025. * We've just allocated pages from the kernel,
  2026. * so they've just been written by the CPU with
  2027. * zeros. They'll need to be clflushed before we
  2028. * use them with the GPU.
  2029. */
  2030. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2031. obj->read_domains = I915_GEM_DOMAIN_CPU;
  2032. obj_priv->agp_type = AGP_USER_MEMORY;
  2033. obj->driver_private = obj_priv;
  2034. obj_priv->obj = obj;
  2035. INIT_LIST_HEAD(&obj_priv->list);
  2036. return 0;
  2037. }
  2038. void i915_gem_free_object(struct drm_gem_object *obj)
  2039. {
  2040. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2041. while (obj_priv->pin_count > 0)
  2042. i915_gem_object_unpin(obj);
  2043. i915_gem_object_unbind(obj);
  2044. drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
  2045. drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
  2046. }
  2047. /** Unbinds all objects that are on the given buffer list. */
  2048. static int
  2049. i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
  2050. {
  2051. struct drm_gem_object *obj;
  2052. struct drm_i915_gem_object *obj_priv;
  2053. int ret;
  2054. while (!list_empty(head)) {
  2055. obj_priv = list_first_entry(head,
  2056. struct drm_i915_gem_object,
  2057. list);
  2058. obj = obj_priv->obj;
  2059. if (obj_priv->pin_count != 0) {
  2060. DRM_ERROR("Pinned object in unbind list\n");
  2061. mutex_unlock(&dev->struct_mutex);
  2062. return -EINVAL;
  2063. }
  2064. ret = i915_gem_object_unbind(obj);
  2065. if (ret != 0) {
  2066. DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
  2067. ret);
  2068. mutex_unlock(&dev->struct_mutex);
  2069. return ret;
  2070. }
  2071. }
  2072. return 0;
  2073. }
  2074. static int
  2075. i915_gem_idle(struct drm_device *dev)
  2076. {
  2077. drm_i915_private_t *dev_priv = dev->dev_private;
  2078. uint32_t seqno, cur_seqno, last_seqno;
  2079. int stuck, ret;
  2080. mutex_lock(&dev->struct_mutex);
  2081. if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
  2082. mutex_unlock(&dev->struct_mutex);
  2083. return 0;
  2084. }
  2085. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  2086. * We need to replace this with a semaphore, or something.
  2087. */
  2088. dev_priv->mm.suspended = 1;
  2089. /* Cancel the retire work handler, wait for it to finish if running
  2090. */
  2091. mutex_unlock(&dev->struct_mutex);
  2092. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  2093. mutex_lock(&dev->struct_mutex);
  2094. i915_kernel_lost_context(dev);
  2095. /* Flush the GPU along with all non-CPU write domains
  2096. */
  2097. i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
  2098. ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
  2099. seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
  2100. I915_GEM_DOMAIN_GTT));
  2101. if (seqno == 0) {
  2102. mutex_unlock(&dev->struct_mutex);
  2103. return -ENOMEM;
  2104. }
  2105. dev_priv->mm.waiting_gem_seqno = seqno;
  2106. last_seqno = 0;
  2107. stuck = 0;
  2108. for (;;) {
  2109. cur_seqno = i915_get_gem_seqno(dev);
  2110. if (i915_seqno_passed(cur_seqno, seqno))
  2111. break;
  2112. if (last_seqno == cur_seqno) {
  2113. if (stuck++ > 100) {
  2114. DRM_ERROR("hardware wedged\n");
  2115. dev_priv->mm.wedged = 1;
  2116. DRM_WAKEUP(&dev_priv->irq_queue);
  2117. break;
  2118. }
  2119. }
  2120. msleep(10);
  2121. last_seqno = cur_seqno;
  2122. }
  2123. dev_priv->mm.waiting_gem_seqno = 0;
  2124. i915_gem_retire_requests(dev);
  2125. if (!dev_priv->mm.wedged) {
  2126. /* Active and flushing should now be empty as we've
  2127. * waited for a sequence higher than any pending execbuffer
  2128. */
  2129. WARN_ON(!list_empty(&dev_priv->mm.active_list));
  2130. WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
  2131. /* Request should now be empty as we've also waited
  2132. * for the last request in the list
  2133. */
  2134. WARN_ON(!list_empty(&dev_priv->mm.request_list));
  2135. }
  2136. /* Empty the active and flushing lists to inactive. If there's
  2137. * anything left at this point, it means that we're wedged and
  2138. * nothing good's going to happen by leaving them there. So strip
  2139. * the GPU domains and just stuff them onto inactive.
  2140. */
  2141. while (!list_empty(&dev_priv->mm.active_list)) {
  2142. struct drm_i915_gem_object *obj_priv;
  2143. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  2144. struct drm_i915_gem_object,
  2145. list);
  2146. obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  2147. i915_gem_object_move_to_inactive(obj_priv->obj);
  2148. }
  2149. while (!list_empty(&dev_priv->mm.flushing_list)) {
  2150. struct drm_i915_gem_object *obj_priv;
  2151. obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
  2152. struct drm_i915_gem_object,
  2153. list);
  2154. obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  2155. i915_gem_object_move_to_inactive(obj_priv->obj);
  2156. }
  2157. /* Move all inactive buffers out of the GTT. */
  2158. ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
  2159. WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
  2160. if (ret) {
  2161. mutex_unlock(&dev->struct_mutex);
  2162. return ret;
  2163. }
  2164. i915_gem_cleanup_ringbuffer(dev);
  2165. mutex_unlock(&dev->struct_mutex);
  2166. return 0;
  2167. }
  2168. static int
  2169. i915_gem_init_hws(struct drm_device *dev)
  2170. {
  2171. drm_i915_private_t *dev_priv = dev->dev_private;
  2172. struct drm_gem_object *obj;
  2173. struct drm_i915_gem_object *obj_priv;
  2174. int ret;
  2175. /* If we need a physical address for the status page, it's already
  2176. * initialized at driver load time.
  2177. */
  2178. if (!I915_NEED_GFX_HWS(dev))
  2179. return 0;
  2180. obj = drm_gem_object_alloc(dev, 4096);
  2181. if (obj == NULL) {
  2182. DRM_ERROR("Failed to allocate status page\n");
  2183. return -ENOMEM;
  2184. }
  2185. obj_priv = obj->driver_private;
  2186. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  2187. ret = i915_gem_object_pin(obj, 4096);
  2188. if (ret != 0) {
  2189. drm_gem_object_unreference(obj);
  2190. return ret;
  2191. }
  2192. dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  2193. dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
  2194. if (dev_priv->hw_status_page == NULL) {
  2195. DRM_ERROR("Failed to map status page.\n");
  2196. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  2197. drm_gem_object_unreference(obj);
  2198. return -EINVAL;
  2199. }
  2200. dev_priv->hws_obj = obj;
  2201. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  2202. I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
  2203. I915_READ(HWS_PGA); /* posting read */
  2204. DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  2205. return 0;
  2206. }
  2207. static int
  2208. i915_gem_init_ringbuffer(struct drm_device *dev)
  2209. {
  2210. drm_i915_private_t *dev_priv = dev->dev_private;
  2211. struct drm_gem_object *obj;
  2212. struct drm_i915_gem_object *obj_priv;
  2213. int ret;
  2214. u32 head;
  2215. ret = i915_gem_init_hws(dev);
  2216. if (ret != 0)
  2217. return ret;
  2218. obj = drm_gem_object_alloc(dev, 128 * 1024);
  2219. if (obj == NULL) {
  2220. DRM_ERROR("Failed to allocate ringbuffer\n");
  2221. return -ENOMEM;
  2222. }
  2223. obj_priv = obj->driver_private;
  2224. ret = i915_gem_object_pin(obj, 4096);
  2225. if (ret != 0) {
  2226. drm_gem_object_unreference(obj);
  2227. return ret;
  2228. }
  2229. /* Set up the kernel mapping for the ring. */
  2230. dev_priv->ring.Size = obj->size;
  2231. dev_priv->ring.tail_mask = obj->size - 1;
  2232. dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
  2233. dev_priv->ring.map.size = obj->size;
  2234. dev_priv->ring.map.type = 0;
  2235. dev_priv->ring.map.flags = 0;
  2236. dev_priv->ring.map.mtrr = 0;
  2237. drm_core_ioremap_wc(&dev_priv->ring.map, dev);
  2238. if (dev_priv->ring.map.handle == NULL) {
  2239. DRM_ERROR("Failed to map ringbuffer.\n");
  2240. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  2241. drm_gem_object_unreference(obj);
  2242. return -EINVAL;
  2243. }
  2244. dev_priv->ring.ring_obj = obj;
  2245. dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
  2246. /* Stop the ring if it's running. */
  2247. I915_WRITE(PRB0_CTL, 0);
  2248. I915_WRITE(PRB0_TAIL, 0);
  2249. I915_WRITE(PRB0_HEAD, 0);
  2250. /* Initialize the ring. */
  2251. I915_WRITE(PRB0_START, obj_priv->gtt_offset);
  2252. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  2253. /* G45 ring initialization fails to reset head to zero */
  2254. if (head != 0) {
  2255. DRM_ERROR("Ring head not reset to zero "
  2256. "ctl %08x head %08x tail %08x start %08x\n",
  2257. I915_READ(PRB0_CTL),
  2258. I915_READ(PRB0_HEAD),
  2259. I915_READ(PRB0_TAIL),
  2260. I915_READ(PRB0_START));
  2261. I915_WRITE(PRB0_HEAD, 0);
  2262. DRM_ERROR("Ring head forced to zero "
  2263. "ctl %08x head %08x tail %08x start %08x\n",
  2264. I915_READ(PRB0_CTL),
  2265. I915_READ(PRB0_HEAD),
  2266. I915_READ(PRB0_TAIL),
  2267. I915_READ(PRB0_START));
  2268. }
  2269. I915_WRITE(PRB0_CTL,
  2270. ((obj->size - 4096) & RING_NR_PAGES) |
  2271. RING_NO_REPORT |
  2272. RING_VALID);
  2273. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  2274. /* If the head is still not zero, the ring is dead */
  2275. if (head != 0) {
  2276. DRM_ERROR("Ring initialization failed "
  2277. "ctl %08x head %08x tail %08x start %08x\n",
  2278. I915_READ(PRB0_CTL),
  2279. I915_READ(PRB0_HEAD),
  2280. I915_READ(PRB0_TAIL),
  2281. I915_READ(PRB0_START));
  2282. return -EIO;
  2283. }
  2284. /* Update our cache of the ring state */
  2285. i915_kernel_lost_context(dev);
  2286. return 0;
  2287. }
  2288. static void
  2289. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  2290. {
  2291. drm_i915_private_t *dev_priv = dev->dev_private;
  2292. if (dev_priv->ring.ring_obj == NULL)
  2293. return;
  2294. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  2295. i915_gem_object_unpin(dev_priv->ring.ring_obj);
  2296. drm_gem_object_unreference(dev_priv->ring.ring_obj);
  2297. dev_priv->ring.ring_obj = NULL;
  2298. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  2299. if (dev_priv->hws_obj != NULL) {
  2300. struct drm_gem_object *obj = dev_priv->hws_obj;
  2301. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2302. kunmap(obj_priv->page_list[0]);
  2303. i915_gem_object_unpin(obj);
  2304. drm_gem_object_unreference(obj);
  2305. dev_priv->hws_obj = NULL;
  2306. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  2307. dev_priv->hw_status_page = NULL;
  2308. /* Write high address into HWS_PGA when disabling. */
  2309. I915_WRITE(HWS_PGA, 0x1ffff000);
  2310. }
  2311. }
  2312. int
  2313. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  2314. struct drm_file *file_priv)
  2315. {
  2316. drm_i915_private_t *dev_priv = dev->dev_private;
  2317. int ret;
  2318. if (dev_priv->mm.wedged) {
  2319. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  2320. dev_priv->mm.wedged = 0;
  2321. }
  2322. ret = i915_gem_init_ringbuffer(dev);
  2323. if (ret != 0)
  2324. return ret;
  2325. dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
  2326. dev->agp->agp_info.aper_size
  2327. * 1024 * 1024);
  2328. mutex_lock(&dev->struct_mutex);
  2329. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  2330. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  2331. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  2332. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  2333. dev_priv->mm.suspended = 0;
  2334. mutex_unlock(&dev->struct_mutex);
  2335. drm_irq_install(dev);
  2336. return 0;
  2337. }
  2338. int
  2339. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  2340. struct drm_file *file_priv)
  2341. {
  2342. drm_i915_private_t *dev_priv = dev->dev_private;
  2343. int ret;
  2344. ret = i915_gem_idle(dev);
  2345. drm_irq_uninstall(dev);
  2346. io_mapping_free(dev_priv->mm.gtt_mapping);
  2347. return ret;
  2348. }
  2349. void
  2350. i915_gem_lastclose(struct drm_device *dev)
  2351. {
  2352. int ret;
  2353. ret = i915_gem_idle(dev);
  2354. if (ret)
  2355. DRM_ERROR("failed to idle hardware: %d\n", ret);
  2356. }
  2357. void
  2358. i915_gem_load(struct drm_device *dev)
  2359. {
  2360. drm_i915_private_t *dev_priv = dev->dev_private;
  2361. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  2362. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  2363. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  2364. INIT_LIST_HEAD(&dev_priv->mm.request_list);
  2365. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  2366. i915_gem_retire_work_handler);
  2367. dev_priv->mm.next_gem_seqno = 1;
  2368. i915_gem_detect_bit_6_swizzle(dev);
  2369. }