i915_gem.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include <linux/swap.h>
  32. static int
  33. i915_gem_object_set_domain(struct drm_gem_object *obj,
  34. uint32_t read_domains,
  35. uint32_t write_domain);
  36. static int
  37. i915_gem_object_set_domain_range(struct drm_gem_object *obj,
  38. uint64_t offset,
  39. uint64_t size,
  40. uint32_t read_domains,
  41. uint32_t write_domain);
  42. static int
  43. i915_gem_set_domain(struct drm_gem_object *obj,
  44. struct drm_file *file_priv,
  45. uint32_t read_domains,
  46. uint32_t write_domain);
  47. static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
  48. static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
  49. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  50. static void
  51. i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  52. int
  53. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  54. struct drm_file *file_priv)
  55. {
  56. drm_i915_private_t *dev_priv = dev->dev_private;
  57. struct drm_i915_gem_init *args = data;
  58. mutex_lock(&dev->struct_mutex);
  59. if (args->gtt_start >= args->gtt_end ||
  60. (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
  61. (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
  62. mutex_unlock(&dev->struct_mutex);
  63. return -EINVAL;
  64. }
  65. drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
  66. args->gtt_end - args->gtt_start);
  67. dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
  68. mutex_unlock(&dev->struct_mutex);
  69. return 0;
  70. }
  71. int
  72. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  73. struct drm_file *file_priv)
  74. {
  75. drm_i915_private_t *dev_priv = dev->dev_private;
  76. struct drm_i915_gem_get_aperture *args = data;
  77. struct drm_i915_gem_object *obj_priv;
  78. if (!(dev->driver->driver_features & DRIVER_GEM))
  79. return -ENODEV;
  80. args->aper_size = dev->gtt_total;
  81. args->aper_available_size = args->aper_size;
  82. list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
  83. if (obj_priv->pin_count > 0)
  84. args->aper_available_size -= obj_priv->obj->size;
  85. }
  86. return 0;
  87. }
  88. /**
  89. * Creates a new mm object and returns a handle to it.
  90. */
  91. int
  92. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  93. struct drm_file *file_priv)
  94. {
  95. struct drm_i915_gem_create *args = data;
  96. struct drm_gem_object *obj;
  97. int handle, ret;
  98. args->size = roundup(args->size, PAGE_SIZE);
  99. /* Allocate the new object */
  100. obj = drm_gem_object_alloc(dev, args->size);
  101. if (obj == NULL)
  102. return -ENOMEM;
  103. ret = drm_gem_handle_create(file_priv, obj, &handle);
  104. mutex_lock(&dev->struct_mutex);
  105. drm_gem_object_handle_unreference(obj);
  106. mutex_unlock(&dev->struct_mutex);
  107. if (ret)
  108. return ret;
  109. args->handle = handle;
  110. return 0;
  111. }
  112. /**
  113. * Reads data from the object referenced by handle.
  114. *
  115. * On error, the contents of *data are undefined.
  116. */
  117. int
  118. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  119. struct drm_file *file_priv)
  120. {
  121. struct drm_i915_gem_pread *args = data;
  122. struct drm_gem_object *obj;
  123. struct drm_i915_gem_object *obj_priv;
  124. ssize_t read;
  125. loff_t offset;
  126. int ret;
  127. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  128. if (obj == NULL)
  129. return -EBADF;
  130. obj_priv = obj->driver_private;
  131. /* Bounds check source.
  132. *
  133. * XXX: This could use review for overflow issues...
  134. */
  135. if (args->offset > obj->size || args->size > obj->size ||
  136. args->offset + args->size > obj->size) {
  137. drm_gem_object_unreference(obj);
  138. return -EINVAL;
  139. }
  140. mutex_lock(&dev->struct_mutex);
  141. ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
  142. I915_GEM_DOMAIN_CPU, 0);
  143. if (ret != 0) {
  144. drm_gem_object_unreference(obj);
  145. mutex_unlock(&dev->struct_mutex);
  146. return ret;
  147. }
  148. offset = args->offset;
  149. read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
  150. args->size, &offset);
  151. if (read != args->size) {
  152. drm_gem_object_unreference(obj);
  153. mutex_unlock(&dev->struct_mutex);
  154. if (read < 0)
  155. return read;
  156. else
  157. return -EINVAL;
  158. }
  159. drm_gem_object_unreference(obj);
  160. mutex_unlock(&dev->struct_mutex);
  161. return 0;
  162. }
  163. /* This is the fast write path which cannot handle
  164. * page faults in the source data
  165. */
  166. static inline int
  167. fast_user_write(struct io_mapping *mapping,
  168. loff_t page_base, int page_offset,
  169. char __user *user_data,
  170. int length)
  171. {
  172. char *vaddr_atomic;
  173. unsigned long unwritten;
  174. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  175. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  176. user_data, length);
  177. io_mapping_unmap_atomic(vaddr_atomic);
  178. if (unwritten)
  179. return -EFAULT;
  180. return 0;
  181. }
  182. /* Here's the write path which can sleep for
  183. * page faults
  184. */
  185. static inline int
  186. slow_user_write(struct io_mapping *mapping,
  187. loff_t page_base, int page_offset,
  188. char __user *user_data,
  189. int length)
  190. {
  191. char __iomem *vaddr;
  192. unsigned long unwritten;
  193. vaddr = io_mapping_map_wc(mapping, page_base);
  194. if (vaddr == NULL)
  195. return -EFAULT;
  196. unwritten = __copy_from_user(vaddr + page_offset,
  197. user_data, length);
  198. io_mapping_unmap(vaddr);
  199. if (unwritten)
  200. return -EFAULT;
  201. return 0;
  202. }
  203. static int
  204. i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  205. struct drm_i915_gem_pwrite *args,
  206. struct drm_file *file_priv)
  207. {
  208. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  209. drm_i915_private_t *dev_priv = dev->dev_private;
  210. ssize_t remain;
  211. loff_t offset, page_base;
  212. char __user *user_data;
  213. int page_offset, page_length;
  214. int ret;
  215. user_data = (char __user *) (uintptr_t) args->data_ptr;
  216. remain = args->size;
  217. if (!access_ok(VERIFY_READ, user_data, remain))
  218. return -EFAULT;
  219. mutex_lock(&dev->struct_mutex);
  220. ret = i915_gem_object_pin(obj, 0);
  221. if (ret) {
  222. mutex_unlock(&dev->struct_mutex);
  223. return ret;
  224. }
  225. ret = i915_gem_set_domain(obj, file_priv,
  226. I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
  227. if (ret)
  228. goto fail;
  229. obj_priv = obj->driver_private;
  230. offset = obj_priv->gtt_offset + args->offset;
  231. obj_priv->dirty = 1;
  232. while (remain > 0) {
  233. /* Operation in this page
  234. *
  235. * page_base = page offset within aperture
  236. * page_offset = offset within page
  237. * page_length = bytes to copy for this page
  238. */
  239. page_base = (offset & ~(PAGE_SIZE-1));
  240. page_offset = offset & (PAGE_SIZE-1);
  241. page_length = remain;
  242. if ((page_offset + remain) > PAGE_SIZE)
  243. page_length = PAGE_SIZE - page_offset;
  244. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  245. page_offset, user_data, page_length);
  246. /* If we get a fault while copying data, then (presumably) our
  247. * source page isn't available. In this case, use the
  248. * non-atomic function
  249. */
  250. if (ret) {
  251. ret = slow_user_write (dev_priv->mm.gtt_mapping,
  252. page_base, page_offset,
  253. user_data, page_length);
  254. if (ret)
  255. goto fail;
  256. }
  257. remain -= page_length;
  258. user_data += page_length;
  259. offset += page_length;
  260. }
  261. fail:
  262. i915_gem_object_unpin(obj);
  263. mutex_unlock(&dev->struct_mutex);
  264. return ret;
  265. }
  266. static int
  267. i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  268. struct drm_i915_gem_pwrite *args,
  269. struct drm_file *file_priv)
  270. {
  271. int ret;
  272. loff_t offset;
  273. ssize_t written;
  274. mutex_lock(&dev->struct_mutex);
  275. ret = i915_gem_set_domain(obj, file_priv,
  276. I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
  277. if (ret) {
  278. mutex_unlock(&dev->struct_mutex);
  279. return ret;
  280. }
  281. offset = args->offset;
  282. written = vfs_write(obj->filp,
  283. (char __user *)(uintptr_t) args->data_ptr,
  284. args->size, &offset);
  285. if (written != args->size) {
  286. mutex_unlock(&dev->struct_mutex);
  287. if (written < 0)
  288. return written;
  289. else
  290. return -EINVAL;
  291. }
  292. mutex_unlock(&dev->struct_mutex);
  293. return 0;
  294. }
  295. /**
  296. * Writes data to the object referenced by handle.
  297. *
  298. * On error, the contents of the buffer that were to be modified are undefined.
  299. */
  300. int
  301. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  302. struct drm_file *file_priv)
  303. {
  304. struct drm_i915_gem_pwrite *args = data;
  305. struct drm_gem_object *obj;
  306. struct drm_i915_gem_object *obj_priv;
  307. int ret = 0;
  308. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  309. if (obj == NULL)
  310. return -EBADF;
  311. obj_priv = obj->driver_private;
  312. /* Bounds check destination.
  313. *
  314. * XXX: This could use review for overflow issues...
  315. */
  316. if (args->offset > obj->size || args->size > obj->size ||
  317. args->offset + args->size > obj->size) {
  318. drm_gem_object_unreference(obj);
  319. return -EINVAL;
  320. }
  321. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  322. * it would end up going through the fenced access, and we'll get
  323. * different detiling behavior between reading and writing.
  324. * pread/pwrite currently are reading and writing from the CPU
  325. * perspective, requiring manual detiling by the client.
  326. */
  327. if (obj_priv->tiling_mode == I915_TILING_NONE &&
  328. dev->gtt_total != 0)
  329. ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
  330. else
  331. ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
  332. #if WATCH_PWRITE
  333. if (ret)
  334. DRM_INFO("pwrite failed %d\n", ret);
  335. #endif
  336. drm_gem_object_unreference(obj);
  337. return ret;
  338. }
  339. /**
  340. * Called when user space prepares to use an object
  341. */
  342. int
  343. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  344. struct drm_file *file_priv)
  345. {
  346. struct drm_i915_gem_set_domain *args = data;
  347. struct drm_gem_object *obj;
  348. int ret;
  349. if (!(dev->driver->driver_features & DRIVER_GEM))
  350. return -ENODEV;
  351. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  352. if (obj == NULL)
  353. return -EBADF;
  354. mutex_lock(&dev->struct_mutex);
  355. #if WATCH_BUF
  356. DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
  357. obj, obj->size, args->read_domains, args->write_domain);
  358. #endif
  359. ret = i915_gem_set_domain(obj, file_priv,
  360. args->read_domains, args->write_domain);
  361. drm_gem_object_unreference(obj);
  362. mutex_unlock(&dev->struct_mutex);
  363. return ret;
  364. }
  365. /**
  366. * Called when user space has done writes to this buffer
  367. */
  368. int
  369. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  370. struct drm_file *file_priv)
  371. {
  372. struct drm_i915_gem_sw_finish *args = data;
  373. struct drm_gem_object *obj;
  374. struct drm_i915_gem_object *obj_priv;
  375. int ret = 0;
  376. if (!(dev->driver->driver_features & DRIVER_GEM))
  377. return -ENODEV;
  378. mutex_lock(&dev->struct_mutex);
  379. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  380. if (obj == NULL) {
  381. mutex_unlock(&dev->struct_mutex);
  382. return -EBADF;
  383. }
  384. #if WATCH_BUF
  385. DRM_INFO("%s: sw_finish %d (%p %d)\n",
  386. __func__, args->handle, obj, obj->size);
  387. #endif
  388. obj_priv = obj->driver_private;
  389. /* Pinned buffers may be scanout, so flush the cache */
  390. if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
  391. i915_gem_clflush_object(obj);
  392. drm_agp_chipset_flush(dev);
  393. }
  394. drm_gem_object_unreference(obj);
  395. mutex_unlock(&dev->struct_mutex);
  396. return ret;
  397. }
  398. /**
  399. * Maps the contents of an object, returning the address it is mapped
  400. * into.
  401. *
  402. * While the mapping holds a reference on the contents of the object, it doesn't
  403. * imply a ref on the object itself.
  404. */
  405. int
  406. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  407. struct drm_file *file_priv)
  408. {
  409. struct drm_i915_gem_mmap *args = data;
  410. struct drm_gem_object *obj;
  411. loff_t offset;
  412. unsigned long addr;
  413. if (!(dev->driver->driver_features & DRIVER_GEM))
  414. return -ENODEV;
  415. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  416. if (obj == NULL)
  417. return -EBADF;
  418. offset = args->offset;
  419. down_write(&current->mm->mmap_sem);
  420. addr = do_mmap(obj->filp, 0, args->size,
  421. PROT_READ | PROT_WRITE, MAP_SHARED,
  422. args->offset);
  423. up_write(&current->mm->mmap_sem);
  424. mutex_lock(&dev->struct_mutex);
  425. drm_gem_object_unreference(obj);
  426. mutex_unlock(&dev->struct_mutex);
  427. if (IS_ERR((void *)addr))
  428. return addr;
  429. args->addr_ptr = (uint64_t) addr;
  430. return 0;
  431. }
  432. static void
  433. i915_gem_object_free_page_list(struct drm_gem_object *obj)
  434. {
  435. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  436. int page_count = obj->size / PAGE_SIZE;
  437. int i;
  438. if (obj_priv->page_list == NULL)
  439. return;
  440. for (i = 0; i < page_count; i++)
  441. if (obj_priv->page_list[i] != NULL) {
  442. if (obj_priv->dirty)
  443. set_page_dirty(obj_priv->page_list[i]);
  444. mark_page_accessed(obj_priv->page_list[i]);
  445. page_cache_release(obj_priv->page_list[i]);
  446. }
  447. obj_priv->dirty = 0;
  448. drm_free(obj_priv->page_list,
  449. page_count * sizeof(struct page *),
  450. DRM_MEM_DRIVER);
  451. obj_priv->page_list = NULL;
  452. }
  453. static void
  454. i915_gem_object_move_to_active(struct drm_gem_object *obj)
  455. {
  456. struct drm_device *dev = obj->dev;
  457. drm_i915_private_t *dev_priv = dev->dev_private;
  458. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  459. /* Add a reference if we're newly entering the active list. */
  460. if (!obj_priv->active) {
  461. drm_gem_object_reference(obj);
  462. obj_priv->active = 1;
  463. }
  464. /* Move from whatever list we were on to the tail of execution. */
  465. list_move_tail(&obj_priv->list,
  466. &dev_priv->mm.active_list);
  467. }
  468. static void
  469. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  470. {
  471. struct drm_device *dev = obj->dev;
  472. drm_i915_private_t *dev_priv = dev->dev_private;
  473. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  474. i915_verify_inactive(dev, __FILE__, __LINE__);
  475. if (obj_priv->pin_count != 0)
  476. list_del_init(&obj_priv->list);
  477. else
  478. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  479. if (obj_priv->active) {
  480. obj_priv->active = 0;
  481. drm_gem_object_unreference(obj);
  482. }
  483. i915_verify_inactive(dev, __FILE__, __LINE__);
  484. }
  485. /**
  486. * Creates a new sequence number, emitting a write of it to the status page
  487. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  488. *
  489. * Must be called with struct_lock held.
  490. *
  491. * Returned sequence numbers are nonzero on success.
  492. */
  493. static uint32_t
  494. i915_add_request(struct drm_device *dev, uint32_t flush_domains)
  495. {
  496. drm_i915_private_t *dev_priv = dev->dev_private;
  497. struct drm_i915_gem_request *request;
  498. uint32_t seqno;
  499. int was_empty;
  500. RING_LOCALS;
  501. request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
  502. if (request == NULL)
  503. return 0;
  504. /* Grab the seqno we're going to make this request be, and bump the
  505. * next (skipping 0 so it can be the reserved no-seqno value).
  506. */
  507. seqno = dev_priv->mm.next_gem_seqno;
  508. dev_priv->mm.next_gem_seqno++;
  509. if (dev_priv->mm.next_gem_seqno == 0)
  510. dev_priv->mm.next_gem_seqno++;
  511. BEGIN_LP_RING(4);
  512. OUT_RING(MI_STORE_DWORD_INDEX);
  513. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  514. OUT_RING(seqno);
  515. OUT_RING(MI_USER_INTERRUPT);
  516. ADVANCE_LP_RING();
  517. DRM_DEBUG("%d\n", seqno);
  518. request->seqno = seqno;
  519. request->emitted_jiffies = jiffies;
  520. request->flush_domains = flush_domains;
  521. was_empty = list_empty(&dev_priv->mm.request_list);
  522. list_add_tail(&request->list, &dev_priv->mm.request_list);
  523. if (was_empty && !dev_priv->mm.suspended)
  524. schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
  525. return seqno;
  526. }
  527. /**
  528. * Command execution barrier
  529. *
  530. * Ensures that all commands in the ring are finished
  531. * before signalling the CPU
  532. */
  533. static uint32_t
  534. i915_retire_commands(struct drm_device *dev)
  535. {
  536. drm_i915_private_t *dev_priv = dev->dev_private;
  537. uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  538. uint32_t flush_domains = 0;
  539. RING_LOCALS;
  540. /* The sampler always gets flushed on i965 (sigh) */
  541. if (IS_I965G(dev))
  542. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  543. BEGIN_LP_RING(2);
  544. OUT_RING(cmd);
  545. OUT_RING(0); /* noop */
  546. ADVANCE_LP_RING();
  547. return flush_domains;
  548. }
  549. /**
  550. * Moves buffers associated only with the given active seqno from the active
  551. * to inactive list, potentially freeing them.
  552. */
  553. static void
  554. i915_gem_retire_request(struct drm_device *dev,
  555. struct drm_i915_gem_request *request)
  556. {
  557. drm_i915_private_t *dev_priv = dev->dev_private;
  558. /* Move any buffers on the active list that are no longer referenced
  559. * by the ringbuffer to the flushing/inactive lists as appropriate.
  560. */
  561. while (!list_empty(&dev_priv->mm.active_list)) {
  562. struct drm_gem_object *obj;
  563. struct drm_i915_gem_object *obj_priv;
  564. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  565. struct drm_i915_gem_object,
  566. list);
  567. obj = obj_priv->obj;
  568. /* If the seqno being retired doesn't match the oldest in the
  569. * list, then the oldest in the list must still be newer than
  570. * this seqno.
  571. */
  572. if (obj_priv->last_rendering_seqno != request->seqno)
  573. return;
  574. #if WATCH_LRU
  575. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  576. __func__, request->seqno, obj);
  577. #endif
  578. if (obj->write_domain != 0) {
  579. list_move_tail(&obj_priv->list,
  580. &dev_priv->mm.flushing_list);
  581. } else {
  582. i915_gem_object_move_to_inactive(obj);
  583. }
  584. }
  585. if (request->flush_domains != 0) {
  586. struct drm_i915_gem_object *obj_priv, *next;
  587. /* Clear the write domain and activity from any buffers
  588. * that are just waiting for a flush matching the one retired.
  589. */
  590. list_for_each_entry_safe(obj_priv, next,
  591. &dev_priv->mm.flushing_list, list) {
  592. struct drm_gem_object *obj = obj_priv->obj;
  593. if (obj->write_domain & request->flush_domains) {
  594. obj->write_domain = 0;
  595. i915_gem_object_move_to_inactive(obj);
  596. }
  597. }
  598. }
  599. }
  600. /**
  601. * Returns true if seq1 is later than seq2.
  602. */
  603. static int
  604. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  605. {
  606. return (int32_t)(seq1 - seq2) >= 0;
  607. }
  608. uint32_t
  609. i915_get_gem_seqno(struct drm_device *dev)
  610. {
  611. drm_i915_private_t *dev_priv = dev->dev_private;
  612. return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
  613. }
  614. /**
  615. * This function clears the request list as sequence numbers are passed.
  616. */
  617. void
  618. i915_gem_retire_requests(struct drm_device *dev)
  619. {
  620. drm_i915_private_t *dev_priv = dev->dev_private;
  621. uint32_t seqno;
  622. seqno = i915_get_gem_seqno(dev);
  623. while (!list_empty(&dev_priv->mm.request_list)) {
  624. struct drm_i915_gem_request *request;
  625. uint32_t retiring_seqno;
  626. request = list_first_entry(&dev_priv->mm.request_list,
  627. struct drm_i915_gem_request,
  628. list);
  629. retiring_seqno = request->seqno;
  630. if (i915_seqno_passed(seqno, retiring_seqno) ||
  631. dev_priv->mm.wedged) {
  632. i915_gem_retire_request(dev, request);
  633. list_del(&request->list);
  634. drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
  635. } else
  636. break;
  637. }
  638. }
  639. void
  640. i915_gem_retire_work_handler(struct work_struct *work)
  641. {
  642. drm_i915_private_t *dev_priv;
  643. struct drm_device *dev;
  644. dev_priv = container_of(work, drm_i915_private_t,
  645. mm.retire_work.work);
  646. dev = dev_priv->dev;
  647. mutex_lock(&dev->struct_mutex);
  648. i915_gem_retire_requests(dev);
  649. if (!dev_priv->mm.suspended &&
  650. !list_empty(&dev_priv->mm.request_list))
  651. schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
  652. mutex_unlock(&dev->struct_mutex);
  653. }
  654. /**
  655. * Waits for a sequence number to be signaled, and cleans up the
  656. * request and object lists appropriately for that event.
  657. */
  658. static int
  659. i915_wait_request(struct drm_device *dev, uint32_t seqno)
  660. {
  661. drm_i915_private_t *dev_priv = dev->dev_private;
  662. int ret = 0;
  663. BUG_ON(seqno == 0);
  664. if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
  665. dev_priv->mm.waiting_gem_seqno = seqno;
  666. i915_user_irq_get(dev);
  667. ret = wait_event_interruptible(dev_priv->irq_queue,
  668. i915_seqno_passed(i915_get_gem_seqno(dev),
  669. seqno) ||
  670. dev_priv->mm.wedged);
  671. i915_user_irq_put(dev);
  672. dev_priv->mm.waiting_gem_seqno = 0;
  673. }
  674. if (dev_priv->mm.wedged)
  675. ret = -EIO;
  676. if (ret && ret != -ERESTARTSYS)
  677. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  678. __func__, ret, seqno, i915_get_gem_seqno(dev));
  679. /* Directly dispatch request retiring. While we have the work queue
  680. * to handle this, the waiter on a request often wants an associated
  681. * buffer to have made it to the inactive list, and we would need
  682. * a separate wait queue to handle that.
  683. */
  684. if (ret == 0)
  685. i915_gem_retire_requests(dev);
  686. return ret;
  687. }
  688. static void
  689. i915_gem_flush(struct drm_device *dev,
  690. uint32_t invalidate_domains,
  691. uint32_t flush_domains)
  692. {
  693. drm_i915_private_t *dev_priv = dev->dev_private;
  694. uint32_t cmd;
  695. RING_LOCALS;
  696. #if WATCH_EXEC
  697. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  698. invalidate_domains, flush_domains);
  699. #endif
  700. if (flush_domains & I915_GEM_DOMAIN_CPU)
  701. drm_agp_chipset_flush(dev);
  702. if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
  703. I915_GEM_DOMAIN_GTT)) {
  704. /*
  705. * read/write caches:
  706. *
  707. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  708. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  709. * also flushed at 2d versus 3d pipeline switches.
  710. *
  711. * read-only caches:
  712. *
  713. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  714. * MI_READ_FLUSH is set, and is always flushed on 965.
  715. *
  716. * I915_GEM_DOMAIN_COMMAND may not exist?
  717. *
  718. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  719. * invalidated when MI_EXE_FLUSH is set.
  720. *
  721. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  722. * invalidated with every MI_FLUSH.
  723. *
  724. * TLBs:
  725. *
  726. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  727. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  728. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  729. * are flushed at any MI_FLUSH.
  730. */
  731. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  732. if ((invalidate_domains|flush_domains) &
  733. I915_GEM_DOMAIN_RENDER)
  734. cmd &= ~MI_NO_WRITE_FLUSH;
  735. if (!IS_I965G(dev)) {
  736. /*
  737. * On the 965, the sampler cache always gets flushed
  738. * and this bit is reserved.
  739. */
  740. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  741. cmd |= MI_READ_FLUSH;
  742. }
  743. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  744. cmd |= MI_EXE_FLUSH;
  745. #if WATCH_EXEC
  746. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  747. #endif
  748. BEGIN_LP_RING(2);
  749. OUT_RING(cmd);
  750. OUT_RING(0); /* noop */
  751. ADVANCE_LP_RING();
  752. }
  753. }
  754. /**
  755. * Ensures that all rendering to the object has completed and the object is
  756. * safe to unbind from the GTT or access from the CPU.
  757. */
  758. static int
  759. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  760. {
  761. struct drm_device *dev = obj->dev;
  762. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  763. int ret;
  764. /* If there are writes queued to the buffer, flush and
  765. * create a new seqno to wait for.
  766. */
  767. if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
  768. uint32_t write_domain = obj->write_domain;
  769. #if WATCH_BUF
  770. DRM_INFO("%s: flushing object %p from write domain %08x\n",
  771. __func__, obj, write_domain);
  772. #endif
  773. i915_gem_flush(dev, 0, write_domain);
  774. i915_gem_object_move_to_active(obj);
  775. obj_priv->last_rendering_seqno = i915_add_request(dev,
  776. write_domain);
  777. BUG_ON(obj_priv->last_rendering_seqno == 0);
  778. #if WATCH_LRU
  779. DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
  780. #endif
  781. }
  782. /* If there is rendering queued on the buffer being evicted, wait for
  783. * it.
  784. */
  785. if (obj_priv->active) {
  786. #if WATCH_BUF
  787. DRM_INFO("%s: object %p wait for seqno %08x\n",
  788. __func__, obj, obj_priv->last_rendering_seqno);
  789. #endif
  790. ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
  791. if (ret != 0)
  792. return ret;
  793. }
  794. return 0;
  795. }
  796. /**
  797. * Unbinds an object from the GTT aperture.
  798. */
  799. static int
  800. i915_gem_object_unbind(struct drm_gem_object *obj)
  801. {
  802. struct drm_device *dev = obj->dev;
  803. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  804. int ret = 0;
  805. #if WATCH_BUF
  806. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  807. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  808. #endif
  809. if (obj_priv->gtt_space == NULL)
  810. return 0;
  811. if (obj_priv->pin_count != 0) {
  812. DRM_ERROR("Attempting to unbind pinned buffer\n");
  813. return -EINVAL;
  814. }
  815. /* Wait for any rendering to complete
  816. */
  817. ret = i915_gem_object_wait_rendering(obj);
  818. if (ret) {
  819. DRM_ERROR("wait_rendering failed: %d\n", ret);
  820. return ret;
  821. }
  822. /* Move the object to the CPU domain to ensure that
  823. * any possible CPU writes while it's not in the GTT
  824. * are flushed when we go to remap it. This will
  825. * also ensure that all pending GPU writes are finished
  826. * before we unbind.
  827. */
  828. ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
  829. I915_GEM_DOMAIN_CPU);
  830. if (ret) {
  831. DRM_ERROR("set_domain failed: %d\n", ret);
  832. return ret;
  833. }
  834. if (obj_priv->agp_mem != NULL) {
  835. drm_unbind_agp(obj_priv->agp_mem);
  836. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  837. obj_priv->agp_mem = NULL;
  838. }
  839. BUG_ON(obj_priv->active);
  840. i915_gem_object_free_page_list(obj);
  841. if (obj_priv->gtt_space) {
  842. atomic_dec(&dev->gtt_count);
  843. atomic_sub(obj->size, &dev->gtt_memory);
  844. drm_mm_put_block(obj_priv->gtt_space);
  845. obj_priv->gtt_space = NULL;
  846. }
  847. /* Remove ourselves from the LRU list if present. */
  848. if (!list_empty(&obj_priv->list))
  849. list_del_init(&obj_priv->list);
  850. return 0;
  851. }
  852. static int
  853. i915_gem_evict_something(struct drm_device *dev)
  854. {
  855. drm_i915_private_t *dev_priv = dev->dev_private;
  856. struct drm_gem_object *obj;
  857. struct drm_i915_gem_object *obj_priv;
  858. int ret = 0;
  859. for (;;) {
  860. /* If there's an inactive buffer available now, grab it
  861. * and be done.
  862. */
  863. if (!list_empty(&dev_priv->mm.inactive_list)) {
  864. obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
  865. struct drm_i915_gem_object,
  866. list);
  867. obj = obj_priv->obj;
  868. BUG_ON(obj_priv->pin_count != 0);
  869. #if WATCH_LRU
  870. DRM_INFO("%s: evicting %p\n", __func__, obj);
  871. #endif
  872. BUG_ON(obj_priv->active);
  873. /* Wait on the rendering and unbind the buffer. */
  874. ret = i915_gem_object_unbind(obj);
  875. break;
  876. }
  877. /* If we didn't get anything, but the ring is still processing
  878. * things, wait for one of those things to finish and hopefully
  879. * leave us a buffer to evict.
  880. */
  881. if (!list_empty(&dev_priv->mm.request_list)) {
  882. struct drm_i915_gem_request *request;
  883. request = list_first_entry(&dev_priv->mm.request_list,
  884. struct drm_i915_gem_request,
  885. list);
  886. ret = i915_wait_request(dev, request->seqno);
  887. if (ret)
  888. break;
  889. /* if waiting caused an object to become inactive,
  890. * then loop around and wait for it. Otherwise, we
  891. * assume that waiting freed and unbound something,
  892. * so there should now be some space in the GTT
  893. */
  894. if (!list_empty(&dev_priv->mm.inactive_list))
  895. continue;
  896. break;
  897. }
  898. /* If we didn't have anything on the request list but there
  899. * are buffers awaiting a flush, emit one and try again.
  900. * When we wait on it, those buffers waiting for that flush
  901. * will get moved to inactive.
  902. */
  903. if (!list_empty(&dev_priv->mm.flushing_list)) {
  904. obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
  905. struct drm_i915_gem_object,
  906. list);
  907. obj = obj_priv->obj;
  908. i915_gem_flush(dev,
  909. obj->write_domain,
  910. obj->write_domain);
  911. i915_add_request(dev, obj->write_domain);
  912. obj = NULL;
  913. continue;
  914. }
  915. DRM_ERROR("inactive empty %d request empty %d "
  916. "flushing empty %d\n",
  917. list_empty(&dev_priv->mm.inactive_list),
  918. list_empty(&dev_priv->mm.request_list),
  919. list_empty(&dev_priv->mm.flushing_list));
  920. /* If we didn't do any of the above, there's nothing to be done
  921. * and we just can't fit it in.
  922. */
  923. return -ENOMEM;
  924. }
  925. return ret;
  926. }
  927. static int
  928. i915_gem_object_get_page_list(struct drm_gem_object *obj)
  929. {
  930. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  931. int page_count, i;
  932. struct address_space *mapping;
  933. struct inode *inode;
  934. struct page *page;
  935. int ret;
  936. if (obj_priv->page_list)
  937. return 0;
  938. /* Get the list of pages out of our struct file. They'll be pinned
  939. * at this point until we release them.
  940. */
  941. page_count = obj->size / PAGE_SIZE;
  942. BUG_ON(obj_priv->page_list != NULL);
  943. obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
  944. DRM_MEM_DRIVER);
  945. if (obj_priv->page_list == NULL) {
  946. DRM_ERROR("Faled to allocate page list\n");
  947. return -ENOMEM;
  948. }
  949. inode = obj->filp->f_path.dentry->d_inode;
  950. mapping = inode->i_mapping;
  951. for (i = 0; i < page_count; i++) {
  952. page = read_mapping_page(mapping, i, NULL);
  953. if (IS_ERR(page)) {
  954. ret = PTR_ERR(page);
  955. DRM_ERROR("read_mapping_page failed: %d\n", ret);
  956. i915_gem_object_free_page_list(obj);
  957. return ret;
  958. }
  959. obj_priv->page_list[i] = page;
  960. }
  961. return 0;
  962. }
  963. /**
  964. * Finds free space in the GTT aperture and binds the object there.
  965. */
  966. static int
  967. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  968. {
  969. struct drm_device *dev = obj->dev;
  970. drm_i915_private_t *dev_priv = dev->dev_private;
  971. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  972. struct drm_mm_node *free_space;
  973. int page_count, ret;
  974. if (alignment == 0)
  975. alignment = PAGE_SIZE;
  976. if (alignment & (PAGE_SIZE - 1)) {
  977. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  978. return -EINVAL;
  979. }
  980. search_free:
  981. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  982. obj->size, alignment, 0);
  983. if (free_space != NULL) {
  984. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  985. alignment);
  986. if (obj_priv->gtt_space != NULL) {
  987. obj_priv->gtt_space->private = obj;
  988. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  989. }
  990. }
  991. if (obj_priv->gtt_space == NULL) {
  992. /* If the gtt is empty and we're still having trouble
  993. * fitting our object in, we're out of memory.
  994. */
  995. #if WATCH_LRU
  996. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  997. #endif
  998. if (list_empty(&dev_priv->mm.inactive_list) &&
  999. list_empty(&dev_priv->mm.flushing_list) &&
  1000. list_empty(&dev_priv->mm.active_list)) {
  1001. DRM_ERROR("GTT full, but LRU list empty\n");
  1002. return -ENOMEM;
  1003. }
  1004. ret = i915_gem_evict_something(dev);
  1005. if (ret != 0) {
  1006. DRM_ERROR("Failed to evict a buffer %d\n", ret);
  1007. return ret;
  1008. }
  1009. goto search_free;
  1010. }
  1011. #if WATCH_BUF
  1012. DRM_INFO("Binding object of size %d at 0x%08x\n",
  1013. obj->size, obj_priv->gtt_offset);
  1014. #endif
  1015. ret = i915_gem_object_get_page_list(obj);
  1016. if (ret) {
  1017. drm_mm_put_block(obj_priv->gtt_space);
  1018. obj_priv->gtt_space = NULL;
  1019. return ret;
  1020. }
  1021. page_count = obj->size / PAGE_SIZE;
  1022. /* Create an AGP memory structure pointing at our pages, and bind it
  1023. * into the GTT.
  1024. */
  1025. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  1026. obj_priv->page_list,
  1027. page_count,
  1028. obj_priv->gtt_offset,
  1029. obj_priv->agp_type);
  1030. if (obj_priv->agp_mem == NULL) {
  1031. i915_gem_object_free_page_list(obj);
  1032. drm_mm_put_block(obj_priv->gtt_space);
  1033. obj_priv->gtt_space = NULL;
  1034. return -ENOMEM;
  1035. }
  1036. atomic_inc(&dev->gtt_count);
  1037. atomic_add(obj->size, &dev->gtt_memory);
  1038. /* Assert that the object is not currently in any GPU domain. As it
  1039. * wasn't in the GTT, there shouldn't be any way it could have been in
  1040. * a GPU cache
  1041. */
  1042. BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
  1043. BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
  1044. return 0;
  1045. }
  1046. void
  1047. i915_gem_clflush_object(struct drm_gem_object *obj)
  1048. {
  1049. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1050. /* If we don't have a page list set up, then we're not pinned
  1051. * to GPU, and we can ignore the cache flush because it'll happen
  1052. * again at bind time.
  1053. */
  1054. if (obj_priv->page_list == NULL)
  1055. return;
  1056. drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
  1057. }
  1058. /*
  1059. * Set the next domain for the specified object. This
  1060. * may not actually perform the necessary flushing/invaliding though,
  1061. * as that may want to be batched with other set_domain operations
  1062. *
  1063. * This is (we hope) the only really tricky part of gem. The goal
  1064. * is fairly simple -- track which caches hold bits of the object
  1065. * and make sure they remain coherent. A few concrete examples may
  1066. * help to explain how it works. For shorthand, we use the notation
  1067. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  1068. * a pair of read and write domain masks.
  1069. *
  1070. * Case 1: the batch buffer
  1071. *
  1072. * 1. Allocated
  1073. * 2. Written by CPU
  1074. * 3. Mapped to GTT
  1075. * 4. Read by GPU
  1076. * 5. Unmapped from GTT
  1077. * 6. Freed
  1078. *
  1079. * Let's take these a step at a time
  1080. *
  1081. * 1. Allocated
  1082. * Pages allocated from the kernel may still have
  1083. * cache contents, so we set them to (CPU, CPU) always.
  1084. * 2. Written by CPU (using pwrite)
  1085. * The pwrite function calls set_domain (CPU, CPU) and
  1086. * this function does nothing (as nothing changes)
  1087. * 3. Mapped by GTT
  1088. * This function asserts that the object is not
  1089. * currently in any GPU-based read or write domains
  1090. * 4. Read by GPU
  1091. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  1092. * As write_domain is zero, this function adds in the
  1093. * current read domains (CPU+COMMAND, 0).
  1094. * flush_domains is set to CPU.
  1095. * invalidate_domains is set to COMMAND
  1096. * clflush is run to get data out of the CPU caches
  1097. * then i915_dev_set_domain calls i915_gem_flush to
  1098. * emit an MI_FLUSH and drm_agp_chipset_flush
  1099. * 5. Unmapped from GTT
  1100. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  1101. * flush_domains and invalidate_domains end up both zero
  1102. * so no flushing/invalidating happens
  1103. * 6. Freed
  1104. * yay, done
  1105. *
  1106. * Case 2: The shared render buffer
  1107. *
  1108. * 1. Allocated
  1109. * 2. Mapped to GTT
  1110. * 3. Read/written by GPU
  1111. * 4. set_domain to (CPU,CPU)
  1112. * 5. Read/written by CPU
  1113. * 6. Read/written by GPU
  1114. *
  1115. * 1. Allocated
  1116. * Same as last example, (CPU, CPU)
  1117. * 2. Mapped to GTT
  1118. * Nothing changes (assertions find that it is not in the GPU)
  1119. * 3. Read/written by GPU
  1120. * execbuffer calls set_domain (RENDER, RENDER)
  1121. * flush_domains gets CPU
  1122. * invalidate_domains gets GPU
  1123. * clflush (obj)
  1124. * MI_FLUSH and drm_agp_chipset_flush
  1125. * 4. set_domain (CPU, CPU)
  1126. * flush_domains gets GPU
  1127. * invalidate_domains gets CPU
  1128. * wait_rendering (obj) to make sure all drawing is complete.
  1129. * This will include an MI_FLUSH to get the data from GPU
  1130. * to memory
  1131. * clflush (obj) to invalidate the CPU cache
  1132. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  1133. * 5. Read/written by CPU
  1134. * cache lines are loaded and dirtied
  1135. * 6. Read written by GPU
  1136. * Same as last GPU access
  1137. *
  1138. * Case 3: The constant buffer
  1139. *
  1140. * 1. Allocated
  1141. * 2. Written by CPU
  1142. * 3. Read by GPU
  1143. * 4. Updated (written) by CPU again
  1144. * 5. Read by GPU
  1145. *
  1146. * 1. Allocated
  1147. * (CPU, CPU)
  1148. * 2. Written by CPU
  1149. * (CPU, CPU)
  1150. * 3. Read by GPU
  1151. * (CPU+RENDER, 0)
  1152. * flush_domains = CPU
  1153. * invalidate_domains = RENDER
  1154. * clflush (obj)
  1155. * MI_FLUSH
  1156. * drm_agp_chipset_flush
  1157. * 4. Updated (written) by CPU again
  1158. * (CPU, CPU)
  1159. * flush_domains = 0 (no previous write domain)
  1160. * invalidate_domains = 0 (no new read domains)
  1161. * 5. Read by GPU
  1162. * (CPU+RENDER, 0)
  1163. * flush_domains = CPU
  1164. * invalidate_domains = RENDER
  1165. * clflush (obj)
  1166. * MI_FLUSH
  1167. * drm_agp_chipset_flush
  1168. */
  1169. static int
  1170. i915_gem_object_set_domain(struct drm_gem_object *obj,
  1171. uint32_t read_domains,
  1172. uint32_t write_domain)
  1173. {
  1174. struct drm_device *dev = obj->dev;
  1175. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1176. uint32_t invalidate_domains = 0;
  1177. uint32_t flush_domains = 0;
  1178. int ret;
  1179. #if WATCH_BUF
  1180. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  1181. __func__, obj,
  1182. obj->read_domains, read_domains,
  1183. obj->write_domain, write_domain);
  1184. #endif
  1185. /*
  1186. * If the object isn't moving to a new write domain,
  1187. * let the object stay in multiple read domains
  1188. */
  1189. if (write_domain == 0)
  1190. read_domains |= obj->read_domains;
  1191. else
  1192. obj_priv->dirty = 1;
  1193. /*
  1194. * Flush the current write domain if
  1195. * the new read domains don't match. Invalidate
  1196. * any read domains which differ from the old
  1197. * write domain
  1198. */
  1199. if (obj->write_domain && obj->write_domain != read_domains) {
  1200. flush_domains |= obj->write_domain;
  1201. invalidate_domains |= read_domains & ~obj->write_domain;
  1202. }
  1203. /*
  1204. * Invalidate any read caches which may have
  1205. * stale data. That is, any new read domains.
  1206. */
  1207. invalidate_domains |= read_domains & ~obj->read_domains;
  1208. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  1209. #if WATCH_BUF
  1210. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  1211. __func__, flush_domains, invalidate_domains);
  1212. #endif
  1213. /*
  1214. * If we're invaliding the CPU cache and flushing a GPU cache,
  1215. * then pause for rendering so that the GPU caches will be
  1216. * flushed before the cpu cache is invalidated
  1217. */
  1218. if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
  1219. (flush_domains & ~(I915_GEM_DOMAIN_CPU |
  1220. I915_GEM_DOMAIN_GTT))) {
  1221. ret = i915_gem_object_wait_rendering(obj);
  1222. if (ret)
  1223. return ret;
  1224. }
  1225. i915_gem_clflush_object(obj);
  1226. }
  1227. if ((write_domain | flush_domains) != 0)
  1228. obj->write_domain = write_domain;
  1229. /* If we're invalidating the CPU domain, clear the per-page CPU
  1230. * domain list as well.
  1231. */
  1232. if (obj_priv->page_cpu_valid != NULL &&
  1233. (write_domain != 0 ||
  1234. read_domains & I915_GEM_DOMAIN_CPU)) {
  1235. drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
  1236. DRM_MEM_DRIVER);
  1237. obj_priv->page_cpu_valid = NULL;
  1238. }
  1239. obj->read_domains = read_domains;
  1240. dev->invalidate_domains |= invalidate_domains;
  1241. dev->flush_domains |= flush_domains;
  1242. #if WATCH_BUF
  1243. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  1244. __func__,
  1245. obj->read_domains, obj->write_domain,
  1246. dev->invalidate_domains, dev->flush_domains);
  1247. #endif
  1248. return 0;
  1249. }
  1250. /**
  1251. * Set the read/write domain on a range of the object.
  1252. *
  1253. * Currently only implemented for CPU reads, otherwise drops to normal
  1254. * i915_gem_object_set_domain().
  1255. */
  1256. static int
  1257. i915_gem_object_set_domain_range(struct drm_gem_object *obj,
  1258. uint64_t offset,
  1259. uint64_t size,
  1260. uint32_t read_domains,
  1261. uint32_t write_domain)
  1262. {
  1263. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1264. int ret, i;
  1265. if (obj->read_domains & I915_GEM_DOMAIN_CPU)
  1266. return 0;
  1267. if (read_domains != I915_GEM_DOMAIN_CPU ||
  1268. write_domain != 0)
  1269. return i915_gem_object_set_domain(obj,
  1270. read_domains, write_domain);
  1271. /* Wait on any GPU rendering to the object to be flushed. */
  1272. if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
  1273. ret = i915_gem_object_wait_rendering(obj);
  1274. if (ret)
  1275. return ret;
  1276. }
  1277. if (obj_priv->page_cpu_valid == NULL) {
  1278. obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
  1279. DRM_MEM_DRIVER);
  1280. }
  1281. /* Flush the cache on any pages that are still invalid from the CPU's
  1282. * perspective.
  1283. */
  1284. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
  1285. if (obj_priv->page_cpu_valid[i])
  1286. continue;
  1287. drm_clflush_pages(obj_priv->page_list + i, 1);
  1288. obj_priv->page_cpu_valid[i] = 1;
  1289. }
  1290. return 0;
  1291. }
  1292. /**
  1293. * Once all of the objects have been set in the proper domain,
  1294. * perform the necessary flush and invalidate operations.
  1295. *
  1296. * Returns the write domains flushed, for use in flush tracking.
  1297. */
  1298. static uint32_t
  1299. i915_gem_dev_set_domain(struct drm_device *dev)
  1300. {
  1301. uint32_t flush_domains = dev->flush_domains;
  1302. /*
  1303. * Now that all the buffers are synced to the proper domains,
  1304. * flush and invalidate the collected domains
  1305. */
  1306. if (dev->invalidate_domains | dev->flush_domains) {
  1307. #if WATCH_EXEC
  1308. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  1309. __func__,
  1310. dev->invalidate_domains,
  1311. dev->flush_domains);
  1312. #endif
  1313. i915_gem_flush(dev,
  1314. dev->invalidate_domains,
  1315. dev->flush_domains);
  1316. dev->invalidate_domains = 0;
  1317. dev->flush_domains = 0;
  1318. }
  1319. return flush_domains;
  1320. }
  1321. /**
  1322. * Pin an object to the GTT and evaluate the relocations landing in it.
  1323. */
  1324. static int
  1325. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  1326. struct drm_file *file_priv,
  1327. struct drm_i915_gem_exec_object *entry)
  1328. {
  1329. struct drm_device *dev = obj->dev;
  1330. drm_i915_private_t *dev_priv = dev->dev_private;
  1331. struct drm_i915_gem_relocation_entry reloc;
  1332. struct drm_i915_gem_relocation_entry __user *relocs;
  1333. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1334. int i, ret;
  1335. void __iomem *reloc_page;
  1336. /* Choose the GTT offset for our buffer and put it there. */
  1337. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  1338. if (ret)
  1339. return ret;
  1340. entry->offset = obj_priv->gtt_offset;
  1341. relocs = (struct drm_i915_gem_relocation_entry __user *)
  1342. (uintptr_t) entry->relocs_ptr;
  1343. /* Apply the relocations, using the GTT aperture to avoid cache
  1344. * flushing requirements.
  1345. */
  1346. for (i = 0; i < entry->relocation_count; i++) {
  1347. struct drm_gem_object *target_obj;
  1348. struct drm_i915_gem_object *target_obj_priv;
  1349. uint32_t reloc_val, reloc_offset;
  1350. uint32_t __iomem *reloc_entry;
  1351. ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
  1352. if (ret != 0) {
  1353. i915_gem_object_unpin(obj);
  1354. return ret;
  1355. }
  1356. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  1357. reloc.target_handle);
  1358. if (target_obj == NULL) {
  1359. i915_gem_object_unpin(obj);
  1360. return -EBADF;
  1361. }
  1362. target_obj_priv = target_obj->driver_private;
  1363. /* The target buffer should have appeared before us in the
  1364. * exec_object list, so it should have a GTT space bound by now.
  1365. */
  1366. if (target_obj_priv->gtt_space == NULL) {
  1367. DRM_ERROR("No GTT space found for object %d\n",
  1368. reloc.target_handle);
  1369. drm_gem_object_unreference(target_obj);
  1370. i915_gem_object_unpin(obj);
  1371. return -EINVAL;
  1372. }
  1373. if (reloc.offset > obj->size - 4) {
  1374. DRM_ERROR("Relocation beyond object bounds: "
  1375. "obj %p target %d offset %d size %d.\n",
  1376. obj, reloc.target_handle,
  1377. (int) reloc.offset, (int) obj->size);
  1378. drm_gem_object_unreference(target_obj);
  1379. i915_gem_object_unpin(obj);
  1380. return -EINVAL;
  1381. }
  1382. if (reloc.offset & 3) {
  1383. DRM_ERROR("Relocation not 4-byte aligned: "
  1384. "obj %p target %d offset %d.\n",
  1385. obj, reloc.target_handle,
  1386. (int) reloc.offset);
  1387. drm_gem_object_unreference(target_obj);
  1388. i915_gem_object_unpin(obj);
  1389. return -EINVAL;
  1390. }
  1391. if (reloc.write_domain && target_obj->pending_write_domain &&
  1392. reloc.write_domain != target_obj->pending_write_domain) {
  1393. DRM_ERROR("Write domain conflict: "
  1394. "obj %p target %d offset %d "
  1395. "new %08x old %08x\n",
  1396. obj, reloc.target_handle,
  1397. (int) reloc.offset,
  1398. reloc.write_domain,
  1399. target_obj->pending_write_domain);
  1400. drm_gem_object_unreference(target_obj);
  1401. i915_gem_object_unpin(obj);
  1402. return -EINVAL;
  1403. }
  1404. #if WATCH_RELOC
  1405. DRM_INFO("%s: obj %p offset %08x target %d "
  1406. "read %08x write %08x gtt %08x "
  1407. "presumed %08x delta %08x\n",
  1408. __func__,
  1409. obj,
  1410. (int) reloc.offset,
  1411. (int) reloc.target_handle,
  1412. (int) reloc.read_domains,
  1413. (int) reloc.write_domain,
  1414. (int) target_obj_priv->gtt_offset,
  1415. (int) reloc.presumed_offset,
  1416. reloc.delta);
  1417. #endif
  1418. target_obj->pending_read_domains |= reloc.read_domains;
  1419. target_obj->pending_write_domain |= reloc.write_domain;
  1420. /* If the relocation already has the right value in it, no
  1421. * more work needs to be done.
  1422. */
  1423. if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
  1424. drm_gem_object_unreference(target_obj);
  1425. continue;
  1426. }
  1427. /* Now that we're going to actually write some data in,
  1428. * make sure that any rendering using this buffer's contents
  1429. * is completed.
  1430. */
  1431. i915_gem_object_wait_rendering(obj);
  1432. /* As we're writing through the gtt, flush
  1433. * any CPU writes before we write the relocations
  1434. */
  1435. if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
  1436. i915_gem_clflush_object(obj);
  1437. drm_agp_chipset_flush(dev);
  1438. obj->write_domain = 0;
  1439. }
  1440. /* Map the page containing the relocation we're going to
  1441. * perform.
  1442. */
  1443. reloc_offset = obj_priv->gtt_offset + reloc.offset;
  1444. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  1445. (reloc_offset &
  1446. ~(PAGE_SIZE - 1)));
  1447. reloc_entry = (uint32_t __iomem *)(reloc_page +
  1448. (reloc_offset & (PAGE_SIZE - 1)));
  1449. reloc_val = target_obj_priv->gtt_offset + reloc.delta;
  1450. #if WATCH_BUF
  1451. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  1452. obj, (unsigned int) reloc.offset,
  1453. readl(reloc_entry), reloc_val);
  1454. #endif
  1455. writel(reloc_val, reloc_entry);
  1456. io_mapping_unmap_atomic(reloc_page);
  1457. /* Write the updated presumed offset for this entry back out
  1458. * to the user.
  1459. */
  1460. reloc.presumed_offset = target_obj_priv->gtt_offset;
  1461. ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
  1462. if (ret != 0) {
  1463. drm_gem_object_unreference(target_obj);
  1464. i915_gem_object_unpin(obj);
  1465. return ret;
  1466. }
  1467. drm_gem_object_unreference(target_obj);
  1468. }
  1469. #if WATCH_BUF
  1470. if (0)
  1471. i915_gem_dump_object(obj, 128, __func__, ~0);
  1472. #endif
  1473. return 0;
  1474. }
  1475. /** Dispatch a batchbuffer to the ring
  1476. */
  1477. static int
  1478. i915_dispatch_gem_execbuffer(struct drm_device *dev,
  1479. struct drm_i915_gem_execbuffer *exec,
  1480. uint64_t exec_offset)
  1481. {
  1482. drm_i915_private_t *dev_priv = dev->dev_private;
  1483. struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
  1484. (uintptr_t) exec->cliprects_ptr;
  1485. int nbox = exec->num_cliprects;
  1486. int i = 0, count;
  1487. uint32_t exec_start, exec_len;
  1488. RING_LOCALS;
  1489. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  1490. exec_len = (uint32_t) exec->batch_len;
  1491. if ((exec_start | exec_len) & 0x7) {
  1492. DRM_ERROR("alignment\n");
  1493. return -EINVAL;
  1494. }
  1495. if (!exec_start)
  1496. return -EINVAL;
  1497. count = nbox ? nbox : 1;
  1498. for (i = 0; i < count; i++) {
  1499. if (i < nbox) {
  1500. int ret = i915_emit_box(dev, boxes, i,
  1501. exec->DR1, exec->DR4);
  1502. if (ret)
  1503. return ret;
  1504. }
  1505. if (IS_I830(dev) || IS_845G(dev)) {
  1506. BEGIN_LP_RING(4);
  1507. OUT_RING(MI_BATCH_BUFFER);
  1508. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  1509. OUT_RING(exec_start + exec_len - 4);
  1510. OUT_RING(0);
  1511. ADVANCE_LP_RING();
  1512. } else {
  1513. BEGIN_LP_RING(2);
  1514. if (IS_I965G(dev)) {
  1515. OUT_RING(MI_BATCH_BUFFER_START |
  1516. (2 << 6) |
  1517. MI_BATCH_NON_SECURE_I965);
  1518. OUT_RING(exec_start);
  1519. } else {
  1520. OUT_RING(MI_BATCH_BUFFER_START |
  1521. (2 << 6));
  1522. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  1523. }
  1524. ADVANCE_LP_RING();
  1525. }
  1526. }
  1527. /* XXX breadcrumb */
  1528. return 0;
  1529. }
  1530. /* Throttle our rendering by waiting until the ring has completed our requests
  1531. * emitted over 20 msec ago.
  1532. *
  1533. * This should get us reasonable parallelism between CPU and GPU but also
  1534. * relatively low latency when blocking on a particular request to finish.
  1535. */
  1536. static int
  1537. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  1538. {
  1539. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  1540. int ret = 0;
  1541. uint32_t seqno;
  1542. mutex_lock(&dev->struct_mutex);
  1543. seqno = i915_file_priv->mm.last_gem_throttle_seqno;
  1544. i915_file_priv->mm.last_gem_throttle_seqno =
  1545. i915_file_priv->mm.last_gem_seqno;
  1546. if (seqno)
  1547. ret = i915_wait_request(dev, seqno);
  1548. mutex_unlock(&dev->struct_mutex);
  1549. return ret;
  1550. }
  1551. int
  1552. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1553. struct drm_file *file_priv)
  1554. {
  1555. drm_i915_private_t *dev_priv = dev->dev_private;
  1556. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  1557. struct drm_i915_gem_execbuffer *args = data;
  1558. struct drm_i915_gem_exec_object *exec_list = NULL;
  1559. struct drm_gem_object **object_list = NULL;
  1560. struct drm_gem_object *batch_obj;
  1561. int ret, i, pinned = 0;
  1562. uint64_t exec_offset;
  1563. uint32_t seqno, flush_domains;
  1564. #if WATCH_EXEC
  1565. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  1566. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  1567. #endif
  1568. if (args->buffer_count < 1) {
  1569. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  1570. return -EINVAL;
  1571. }
  1572. /* Copy in the exec list from userland */
  1573. exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
  1574. DRM_MEM_DRIVER);
  1575. object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
  1576. DRM_MEM_DRIVER);
  1577. if (exec_list == NULL || object_list == NULL) {
  1578. DRM_ERROR("Failed to allocate exec or object list "
  1579. "for %d buffers\n",
  1580. args->buffer_count);
  1581. ret = -ENOMEM;
  1582. goto pre_mutex_err;
  1583. }
  1584. ret = copy_from_user(exec_list,
  1585. (struct drm_i915_relocation_entry __user *)
  1586. (uintptr_t) args->buffers_ptr,
  1587. sizeof(*exec_list) * args->buffer_count);
  1588. if (ret != 0) {
  1589. DRM_ERROR("copy %d exec entries failed %d\n",
  1590. args->buffer_count, ret);
  1591. goto pre_mutex_err;
  1592. }
  1593. mutex_lock(&dev->struct_mutex);
  1594. i915_verify_inactive(dev, __FILE__, __LINE__);
  1595. if (dev_priv->mm.wedged) {
  1596. DRM_ERROR("Execbuf while wedged\n");
  1597. mutex_unlock(&dev->struct_mutex);
  1598. return -EIO;
  1599. }
  1600. if (dev_priv->mm.suspended) {
  1601. DRM_ERROR("Execbuf while VT-switched.\n");
  1602. mutex_unlock(&dev->struct_mutex);
  1603. return -EBUSY;
  1604. }
  1605. /* Zero the gloabl flush/invalidate flags. These
  1606. * will be modified as each object is bound to the
  1607. * gtt
  1608. */
  1609. dev->invalidate_domains = 0;
  1610. dev->flush_domains = 0;
  1611. /* Look up object handles and perform the relocations */
  1612. for (i = 0; i < args->buffer_count; i++) {
  1613. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  1614. exec_list[i].handle);
  1615. if (object_list[i] == NULL) {
  1616. DRM_ERROR("Invalid object handle %d at index %d\n",
  1617. exec_list[i].handle, i);
  1618. ret = -EBADF;
  1619. goto err;
  1620. }
  1621. object_list[i]->pending_read_domains = 0;
  1622. object_list[i]->pending_write_domain = 0;
  1623. ret = i915_gem_object_pin_and_relocate(object_list[i],
  1624. file_priv,
  1625. &exec_list[i]);
  1626. if (ret) {
  1627. DRM_ERROR("object bind and relocate failed %d\n", ret);
  1628. goto err;
  1629. }
  1630. pinned = i + 1;
  1631. }
  1632. /* Set the pending read domains for the batch buffer to COMMAND */
  1633. batch_obj = object_list[args->buffer_count-1];
  1634. batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
  1635. batch_obj->pending_write_domain = 0;
  1636. i915_verify_inactive(dev, __FILE__, __LINE__);
  1637. for (i = 0; i < args->buffer_count; i++) {
  1638. struct drm_gem_object *obj = object_list[i];
  1639. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1640. if (obj_priv->gtt_space == NULL) {
  1641. /* We evicted the buffer in the process of validating
  1642. * our set of buffers in. We could try to recover by
  1643. * kicking them everything out and trying again from
  1644. * the start.
  1645. */
  1646. ret = -ENOMEM;
  1647. goto err;
  1648. }
  1649. /* make sure all previous memory operations have passed */
  1650. ret = i915_gem_object_set_domain(obj,
  1651. obj->pending_read_domains,
  1652. obj->pending_write_domain);
  1653. if (ret)
  1654. goto err;
  1655. }
  1656. i915_verify_inactive(dev, __FILE__, __LINE__);
  1657. /* Flush/invalidate caches and chipset buffer */
  1658. flush_domains = i915_gem_dev_set_domain(dev);
  1659. i915_verify_inactive(dev, __FILE__, __LINE__);
  1660. #if WATCH_COHERENCY
  1661. for (i = 0; i < args->buffer_count; i++) {
  1662. i915_gem_object_check_coherency(object_list[i],
  1663. exec_list[i].handle);
  1664. }
  1665. #endif
  1666. exec_offset = exec_list[args->buffer_count - 1].offset;
  1667. #if WATCH_EXEC
  1668. i915_gem_dump_object(object_list[args->buffer_count - 1],
  1669. args->batch_len,
  1670. __func__,
  1671. ~0);
  1672. #endif
  1673. (void)i915_add_request(dev, flush_domains);
  1674. /* Exec the batchbuffer */
  1675. ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
  1676. if (ret) {
  1677. DRM_ERROR("dispatch failed %d\n", ret);
  1678. goto err;
  1679. }
  1680. /*
  1681. * Ensure that the commands in the batch buffer are
  1682. * finished before the interrupt fires
  1683. */
  1684. flush_domains = i915_retire_commands(dev);
  1685. i915_verify_inactive(dev, __FILE__, __LINE__);
  1686. /*
  1687. * Get a seqno representing the execution of the current buffer,
  1688. * which we can wait on. We would like to mitigate these interrupts,
  1689. * likely by only creating seqnos occasionally (so that we have
  1690. * *some* interrupts representing completion of buffers that we can
  1691. * wait on when trying to clear up gtt space).
  1692. */
  1693. seqno = i915_add_request(dev, flush_domains);
  1694. BUG_ON(seqno == 0);
  1695. i915_file_priv->mm.last_gem_seqno = seqno;
  1696. for (i = 0; i < args->buffer_count; i++) {
  1697. struct drm_gem_object *obj = object_list[i];
  1698. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1699. i915_gem_object_move_to_active(obj);
  1700. obj_priv->last_rendering_seqno = seqno;
  1701. #if WATCH_LRU
  1702. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  1703. #endif
  1704. }
  1705. #if WATCH_LRU
  1706. i915_dump_lru(dev, __func__);
  1707. #endif
  1708. i915_verify_inactive(dev, __FILE__, __LINE__);
  1709. /* Copy the new buffer offsets back to the user's exec list. */
  1710. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  1711. (uintptr_t) args->buffers_ptr,
  1712. exec_list,
  1713. sizeof(*exec_list) * args->buffer_count);
  1714. if (ret)
  1715. DRM_ERROR("failed to copy %d exec entries "
  1716. "back to user (%d)\n",
  1717. args->buffer_count, ret);
  1718. err:
  1719. if (object_list != NULL) {
  1720. for (i = 0; i < pinned; i++)
  1721. i915_gem_object_unpin(object_list[i]);
  1722. for (i = 0; i < args->buffer_count; i++)
  1723. drm_gem_object_unreference(object_list[i]);
  1724. }
  1725. mutex_unlock(&dev->struct_mutex);
  1726. pre_mutex_err:
  1727. drm_free(object_list, sizeof(*object_list) * args->buffer_count,
  1728. DRM_MEM_DRIVER);
  1729. drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
  1730. DRM_MEM_DRIVER);
  1731. return ret;
  1732. }
  1733. int
  1734. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  1735. {
  1736. struct drm_device *dev = obj->dev;
  1737. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1738. int ret;
  1739. i915_verify_inactive(dev, __FILE__, __LINE__);
  1740. if (obj_priv->gtt_space == NULL) {
  1741. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  1742. if (ret != 0) {
  1743. DRM_ERROR("Failure to bind: %d", ret);
  1744. return ret;
  1745. }
  1746. }
  1747. obj_priv->pin_count++;
  1748. /* If the object is not active and not pending a flush,
  1749. * remove it from the inactive list
  1750. */
  1751. if (obj_priv->pin_count == 1) {
  1752. atomic_inc(&dev->pin_count);
  1753. atomic_add(obj->size, &dev->pin_memory);
  1754. if (!obj_priv->active &&
  1755. (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
  1756. I915_GEM_DOMAIN_GTT)) == 0 &&
  1757. !list_empty(&obj_priv->list))
  1758. list_del_init(&obj_priv->list);
  1759. }
  1760. i915_verify_inactive(dev, __FILE__, __LINE__);
  1761. return 0;
  1762. }
  1763. void
  1764. i915_gem_object_unpin(struct drm_gem_object *obj)
  1765. {
  1766. struct drm_device *dev = obj->dev;
  1767. drm_i915_private_t *dev_priv = dev->dev_private;
  1768. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1769. i915_verify_inactive(dev, __FILE__, __LINE__);
  1770. obj_priv->pin_count--;
  1771. BUG_ON(obj_priv->pin_count < 0);
  1772. BUG_ON(obj_priv->gtt_space == NULL);
  1773. /* If the object is no longer pinned, and is
  1774. * neither active nor being flushed, then stick it on
  1775. * the inactive list
  1776. */
  1777. if (obj_priv->pin_count == 0) {
  1778. if (!obj_priv->active &&
  1779. (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
  1780. I915_GEM_DOMAIN_GTT)) == 0)
  1781. list_move_tail(&obj_priv->list,
  1782. &dev_priv->mm.inactive_list);
  1783. atomic_dec(&dev->pin_count);
  1784. atomic_sub(obj->size, &dev->pin_memory);
  1785. }
  1786. i915_verify_inactive(dev, __FILE__, __LINE__);
  1787. }
  1788. int
  1789. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  1790. struct drm_file *file_priv)
  1791. {
  1792. struct drm_i915_gem_pin *args = data;
  1793. struct drm_gem_object *obj;
  1794. struct drm_i915_gem_object *obj_priv;
  1795. int ret;
  1796. mutex_lock(&dev->struct_mutex);
  1797. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1798. if (obj == NULL) {
  1799. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  1800. args->handle);
  1801. mutex_unlock(&dev->struct_mutex);
  1802. return -EBADF;
  1803. }
  1804. obj_priv = obj->driver_private;
  1805. ret = i915_gem_object_pin(obj, args->alignment);
  1806. if (ret != 0) {
  1807. drm_gem_object_unreference(obj);
  1808. mutex_unlock(&dev->struct_mutex);
  1809. return ret;
  1810. }
  1811. /* XXX - flush the CPU caches for pinned objects
  1812. * as the X server doesn't manage domains yet
  1813. */
  1814. if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
  1815. i915_gem_clflush_object(obj);
  1816. drm_agp_chipset_flush(dev);
  1817. obj->write_domain = 0;
  1818. }
  1819. args->offset = obj_priv->gtt_offset;
  1820. drm_gem_object_unreference(obj);
  1821. mutex_unlock(&dev->struct_mutex);
  1822. return 0;
  1823. }
  1824. int
  1825. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  1826. struct drm_file *file_priv)
  1827. {
  1828. struct drm_i915_gem_pin *args = data;
  1829. struct drm_gem_object *obj;
  1830. mutex_lock(&dev->struct_mutex);
  1831. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1832. if (obj == NULL) {
  1833. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  1834. args->handle);
  1835. mutex_unlock(&dev->struct_mutex);
  1836. return -EBADF;
  1837. }
  1838. i915_gem_object_unpin(obj);
  1839. drm_gem_object_unreference(obj);
  1840. mutex_unlock(&dev->struct_mutex);
  1841. return 0;
  1842. }
  1843. int
  1844. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  1845. struct drm_file *file_priv)
  1846. {
  1847. struct drm_i915_gem_busy *args = data;
  1848. struct drm_gem_object *obj;
  1849. struct drm_i915_gem_object *obj_priv;
  1850. mutex_lock(&dev->struct_mutex);
  1851. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1852. if (obj == NULL) {
  1853. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  1854. args->handle);
  1855. mutex_unlock(&dev->struct_mutex);
  1856. return -EBADF;
  1857. }
  1858. obj_priv = obj->driver_private;
  1859. args->busy = obj_priv->active;
  1860. drm_gem_object_unreference(obj);
  1861. mutex_unlock(&dev->struct_mutex);
  1862. return 0;
  1863. }
  1864. int
  1865. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  1866. struct drm_file *file_priv)
  1867. {
  1868. return i915_gem_ring_throttle(dev, file_priv);
  1869. }
  1870. int i915_gem_init_object(struct drm_gem_object *obj)
  1871. {
  1872. struct drm_i915_gem_object *obj_priv;
  1873. obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
  1874. if (obj_priv == NULL)
  1875. return -ENOMEM;
  1876. /*
  1877. * We've just allocated pages from the kernel,
  1878. * so they've just been written by the CPU with
  1879. * zeros. They'll need to be clflushed before we
  1880. * use them with the GPU.
  1881. */
  1882. obj->write_domain = I915_GEM_DOMAIN_CPU;
  1883. obj->read_domains = I915_GEM_DOMAIN_CPU;
  1884. obj_priv->agp_type = AGP_USER_MEMORY;
  1885. obj->driver_private = obj_priv;
  1886. obj_priv->obj = obj;
  1887. INIT_LIST_HEAD(&obj_priv->list);
  1888. return 0;
  1889. }
  1890. void i915_gem_free_object(struct drm_gem_object *obj)
  1891. {
  1892. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1893. while (obj_priv->pin_count > 0)
  1894. i915_gem_object_unpin(obj);
  1895. i915_gem_object_unbind(obj);
  1896. drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
  1897. drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
  1898. }
  1899. static int
  1900. i915_gem_set_domain(struct drm_gem_object *obj,
  1901. struct drm_file *file_priv,
  1902. uint32_t read_domains,
  1903. uint32_t write_domain)
  1904. {
  1905. struct drm_device *dev = obj->dev;
  1906. int ret;
  1907. uint32_t flush_domains;
  1908. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1909. ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
  1910. if (ret)
  1911. return ret;
  1912. flush_domains = i915_gem_dev_set_domain(obj->dev);
  1913. if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
  1914. (void) i915_add_request(dev, flush_domains);
  1915. return 0;
  1916. }
  1917. /** Unbinds all objects that are on the given buffer list. */
  1918. static int
  1919. i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
  1920. {
  1921. struct drm_gem_object *obj;
  1922. struct drm_i915_gem_object *obj_priv;
  1923. int ret;
  1924. while (!list_empty(head)) {
  1925. obj_priv = list_first_entry(head,
  1926. struct drm_i915_gem_object,
  1927. list);
  1928. obj = obj_priv->obj;
  1929. if (obj_priv->pin_count != 0) {
  1930. DRM_ERROR("Pinned object in unbind list\n");
  1931. mutex_unlock(&dev->struct_mutex);
  1932. return -EINVAL;
  1933. }
  1934. ret = i915_gem_object_unbind(obj);
  1935. if (ret != 0) {
  1936. DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
  1937. ret);
  1938. mutex_unlock(&dev->struct_mutex);
  1939. return ret;
  1940. }
  1941. }
  1942. return 0;
  1943. }
  1944. static int
  1945. i915_gem_idle(struct drm_device *dev)
  1946. {
  1947. drm_i915_private_t *dev_priv = dev->dev_private;
  1948. uint32_t seqno, cur_seqno, last_seqno;
  1949. int stuck, ret;
  1950. mutex_lock(&dev->struct_mutex);
  1951. if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
  1952. mutex_unlock(&dev->struct_mutex);
  1953. return 0;
  1954. }
  1955. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  1956. * We need to replace this with a semaphore, or something.
  1957. */
  1958. dev_priv->mm.suspended = 1;
  1959. /* Cancel the retire work handler, wait for it to finish if running
  1960. */
  1961. mutex_unlock(&dev->struct_mutex);
  1962. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  1963. mutex_lock(&dev->struct_mutex);
  1964. i915_kernel_lost_context(dev);
  1965. /* Flush the GPU along with all non-CPU write domains
  1966. */
  1967. i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
  1968. ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
  1969. seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
  1970. I915_GEM_DOMAIN_GTT));
  1971. if (seqno == 0) {
  1972. mutex_unlock(&dev->struct_mutex);
  1973. return -ENOMEM;
  1974. }
  1975. dev_priv->mm.waiting_gem_seqno = seqno;
  1976. last_seqno = 0;
  1977. stuck = 0;
  1978. for (;;) {
  1979. cur_seqno = i915_get_gem_seqno(dev);
  1980. if (i915_seqno_passed(cur_seqno, seqno))
  1981. break;
  1982. if (last_seqno == cur_seqno) {
  1983. if (stuck++ > 100) {
  1984. DRM_ERROR("hardware wedged\n");
  1985. dev_priv->mm.wedged = 1;
  1986. DRM_WAKEUP(&dev_priv->irq_queue);
  1987. break;
  1988. }
  1989. }
  1990. msleep(10);
  1991. last_seqno = cur_seqno;
  1992. }
  1993. dev_priv->mm.waiting_gem_seqno = 0;
  1994. i915_gem_retire_requests(dev);
  1995. /* Active and flushing should now be empty as we've
  1996. * waited for a sequence higher than any pending execbuffer
  1997. */
  1998. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  1999. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  2000. /* Request should now be empty as we've also waited
  2001. * for the last request in the list
  2002. */
  2003. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  2004. /* Move all buffers out of the GTT. */
  2005. ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
  2006. if (ret) {
  2007. mutex_unlock(&dev->struct_mutex);
  2008. return ret;
  2009. }
  2010. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  2011. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  2012. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  2013. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  2014. i915_gem_cleanup_ringbuffer(dev);
  2015. mutex_unlock(&dev->struct_mutex);
  2016. return 0;
  2017. }
  2018. static int
  2019. i915_gem_init_hws(struct drm_device *dev)
  2020. {
  2021. drm_i915_private_t *dev_priv = dev->dev_private;
  2022. struct drm_gem_object *obj;
  2023. struct drm_i915_gem_object *obj_priv;
  2024. int ret;
  2025. /* If we need a physical address for the status page, it's already
  2026. * initialized at driver load time.
  2027. */
  2028. if (!I915_NEED_GFX_HWS(dev))
  2029. return 0;
  2030. obj = drm_gem_object_alloc(dev, 4096);
  2031. if (obj == NULL) {
  2032. DRM_ERROR("Failed to allocate status page\n");
  2033. return -ENOMEM;
  2034. }
  2035. obj_priv = obj->driver_private;
  2036. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  2037. ret = i915_gem_object_pin(obj, 4096);
  2038. if (ret != 0) {
  2039. drm_gem_object_unreference(obj);
  2040. return ret;
  2041. }
  2042. dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  2043. dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
  2044. if (dev_priv->hw_status_page == NULL) {
  2045. DRM_ERROR("Failed to map status page.\n");
  2046. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  2047. drm_gem_object_unreference(obj);
  2048. return -EINVAL;
  2049. }
  2050. dev_priv->hws_obj = obj;
  2051. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  2052. I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
  2053. I915_READ(HWS_PGA); /* posting read */
  2054. DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  2055. return 0;
  2056. }
  2057. static int
  2058. i915_gem_init_ringbuffer(struct drm_device *dev)
  2059. {
  2060. drm_i915_private_t *dev_priv = dev->dev_private;
  2061. struct drm_gem_object *obj;
  2062. struct drm_i915_gem_object *obj_priv;
  2063. int ret;
  2064. u32 head;
  2065. ret = i915_gem_init_hws(dev);
  2066. if (ret != 0)
  2067. return ret;
  2068. obj = drm_gem_object_alloc(dev, 128 * 1024);
  2069. if (obj == NULL) {
  2070. DRM_ERROR("Failed to allocate ringbuffer\n");
  2071. return -ENOMEM;
  2072. }
  2073. obj_priv = obj->driver_private;
  2074. ret = i915_gem_object_pin(obj, 4096);
  2075. if (ret != 0) {
  2076. drm_gem_object_unreference(obj);
  2077. return ret;
  2078. }
  2079. /* Set up the kernel mapping for the ring. */
  2080. dev_priv->ring.Size = obj->size;
  2081. dev_priv->ring.tail_mask = obj->size - 1;
  2082. dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
  2083. dev_priv->ring.map.size = obj->size;
  2084. dev_priv->ring.map.type = 0;
  2085. dev_priv->ring.map.flags = 0;
  2086. dev_priv->ring.map.mtrr = 0;
  2087. drm_core_ioremap_wc(&dev_priv->ring.map, dev);
  2088. if (dev_priv->ring.map.handle == NULL) {
  2089. DRM_ERROR("Failed to map ringbuffer.\n");
  2090. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  2091. drm_gem_object_unreference(obj);
  2092. return -EINVAL;
  2093. }
  2094. dev_priv->ring.ring_obj = obj;
  2095. dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
  2096. /* Stop the ring if it's running. */
  2097. I915_WRITE(PRB0_CTL, 0);
  2098. I915_WRITE(PRB0_TAIL, 0);
  2099. I915_WRITE(PRB0_HEAD, 0);
  2100. /* Initialize the ring. */
  2101. I915_WRITE(PRB0_START, obj_priv->gtt_offset);
  2102. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  2103. /* G45 ring initialization fails to reset head to zero */
  2104. if (head != 0) {
  2105. DRM_ERROR("Ring head not reset to zero "
  2106. "ctl %08x head %08x tail %08x start %08x\n",
  2107. I915_READ(PRB0_CTL),
  2108. I915_READ(PRB0_HEAD),
  2109. I915_READ(PRB0_TAIL),
  2110. I915_READ(PRB0_START));
  2111. I915_WRITE(PRB0_HEAD, 0);
  2112. DRM_ERROR("Ring head forced to zero "
  2113. "ctl %08x head %08x tail %08x start %08x\n",
  2114. I915_READ(PRB0_CTL),
  2115. I915_READ(PRB0_HEAD),
  2116. I915_READ(PRB0_TAIL),
  2117. I915_READ(PRB0_START));
  2118. }
  2119. I915_WRITE(PRB0_CTL,
  2120. ((obj->size - 4096) & RING_NR_PAGES) |
  2121. RING_NO_REPORT |
  2122. RING_VALID);
  2123. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  2124. /* If the head is still not zero, the ring is dead */
  2125. if (head != 0) {
  2126. DRM_ERROR("Ring initialization failed "
  2127. "ctl %08x head %08x tail %08x start %08x\n",
  2128. I915_READ(PRB0_CTL),
  2129. I915_READ(PRB0_HEAD),
  2130. I915_READ(PRB0_TAIL),
  2131. I915_READ(PRB0_START));
  2132. return -EIO;
  2133. }
  2134. /* Update our cache of the ring state */
  2135. i915_kernel_lost_context(dev);
  2136. return 0;
  2137. }
  2138. static void
  2139. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  2140. {
  2141. drm_i915_private_t *dev_priv = dev->dev_private;
  2142. if (dev_priv->ring.ring_obj == NULL)
  2143. return;
  2144. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  2145. i915_gem_object_unpin(dev_priv->ring.ring_obj);
  2146. drm_gem_object_unreference(dev_priv->ring.ring_obj);
  2147. dev_priv->ring.ring_obj = NULL;
  2148. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  2149. if (dev_priv->hws_obj != NULL) {
  2150. struct drm_gem_object *obj = dev_priv->hws_obj;
  2151. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2152. kunmap(obj_priv->page_list[0]);
  2153. i915_gem_object_unpin(obj);
  2154. drm_gem_object_unreference(obj);
  2155. dev_priv->hws_obj = NULL;
  2156. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  2157. dev_priv->hw_status_page = NULL;
  2158. /* Write high address into HWS_PGA when disabling. */
  2159. I915_WRITE(HWS_PGA, 0x1ffff000);
  2160. }
  2161. }
  2162. int
  2163. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  2164. struct drm_file *file_priv)
  2165. {
  2166. drm_i915_private_t *dev_priv = dev->dev_private;
  2167. int ret;
  2168. if (dev_priv->mm.wedged) {
  2169. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  2170. dev_priv->mm.wedged = 0;
  2171. }
  2172. ret = i915_gem_init_ringbuffer(dev);
  2173. if (ret != 0)
  2174. return ret;
  2175. dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
  2176. dev->agp->agp_info.aper_size
  2177. * 1024 * 1024);
  2178. mutex_lock(&dev->struct_mutex);
  2179. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  2180. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  2181. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  2182. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  2183. dev_priv->mm.suspended = 0;
  2184. mutex_unlock(&dev->struct_mutex);
  2185. drm_irq_install(dev);
  2186. return 0;
  2187. }
  2188. int
  2189. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  2190. struct drm_file *file_priv)
  2191. {
  2192. drm_i915_private_t *dev_priv = dev->dev_private;
  2193. int ret;
  2194. ret = i915_gem_idle(dev);
  2195. drm_irq_uninstall(dev);
  2196. io_mapping_free(dev_priv->mm.gtt_mapping);
  2197. return ret;
  2198. }
  2199. void
  2200. i915_gem_lastclose(struct drm_device *dev)
  2201. {
  2202. int ret;
  2203. ret = i915_gem_idle(dev);
  2204. if (ret)
  2205. DRM_ERROR("failed to idle hardware: %d\n", ret);
  2206. }
  2207. void
  2208. i915_gem_load(struct drm_device *dev)
  2209. {
  2210. drm_i915_private_t *dev_priv = dev->dev_private;
  2211. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  2212. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  2213. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  2214. INIT_LIST_HEAD(&dev_priv->mm.request_list);
  2215. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  2216. i915_gem_retire_work_handler);
  2217. dev_priv->mm.next_gem_seqno = 1;
  2218. i915_gem_detect_bit_6_swizzle(dev);
  2219. }