i915_gem.c 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/i915_drm.h>
  29. #include "i915_drv.h"
  30. #include "i915_trace.h"
  31. #include "intel_drv.h"
  32. #include <linux/shmem_fs.h>
  33. #include <linux/slab.h>
  34. #include <linux/swap.h>
  35. #include <linux/pci.h>
  36. #include <linux/dma-buf.h>
  37. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  38. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  39. static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  40. unsigned alignment,
  41. bool map_and_fenceable,
  42. bool nonblocking);
  43. static int i915_gem_phys_pwrite(struct drm_device *dev,
  44. struct drm_i915_gem_object *obj,
  45. struct drm_i915_gem_pwrite *args,
  46. struct drm_file *file);
  47. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  48. struct drm_i915_gem_object *obj);
  49. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  50. struct drm_i915_fence_reg *fence,
  51. bool enable);
  52. static int i915_gem_inactive_shrink(struct shrinker *shrinker,
  53. struct shrink_control *sc);
  54. static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
  55. static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
  56. static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  57. static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
  58. {
  59. if (obj->tiling_mode)
  60. i915_gem_release_mmap(obj);
  61. /* As we do not have an associated fence register, we will force
  62. * a tiling change if we ever need to acquire one.
  63. */
  64. obj->fence_dirty = false;
  65. obj->fence_reg = I915_FENCE_REG_NONE;
  66. }
  67. /* some bookkeeping */
  68. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  69. size_t size)
  70. {
  71. dev_priv->mm.object_count++;
  72. dev_priv->mm.object_memory += size;
  73. }
  74. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  75. size_t size)
  76. {
  77. dev_priv->mm.object_count--;
  78. dev_priv->mm.object_memory -= size;
  79. }
  80. static int
  81. i915_gem_wait_for_error(struct i915_gpu_error *error)
  82. {
  83. int ret;
  84. #define EXIT_COND (!i915_reset_in_progress(error))
  85. if (EXIT_COND)
  86. return 0;
  87. /* GPU is already declared terminally dead, give up. */
  88. if (i915_terminally_wedged(error))
  89. return -EIO;
  90. /*
  91. * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  92. * userspace. If it takes that long something really bad is going on and
  93. * we should simply try to bail out and fail as gracefully as possible.
  94. */
  95. ret = wait_event_interruptible_timeout(error->reset_queue,
  96. EXIT_COND,
  97. 10*HZ);
  98. if (ret == 0) {
  99. DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  100. return -EIO;
  101. } else if (ret < 0) {
  102. return ret;
  103. }
  104. #undef EXIT_COND
  105. return 0;
  106. }
  107. int i915_mutex_lock_interruptible(struct drm_device *dev)
  108. {
  109. struct drm_i915_private *dev_priv = dev->dev_private;
  110. int ret;
  111. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  112. if (ret)
  113. return ret;
  114. ret = mutex_lock_interruptible(&dev->struct_mutex);
  115. if (ret)
  116. return ret;
  117. WARN_ON(i915_verify_lists(dev));
  118. return 0;
  119. }
  120. static inline bool
  121. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  122. {
  123. return obj->gtt_space && !obj->active;
  124. }
  125. int
  126. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  127. struct drm_file *file)
  128. {
  129. struct drm_i915_private *dev_priv = dev->dev_private;
  130. struct drm_i915_gem_init *args = data;
  131. if (drm_core_check_feature(dev, DRIVER_MODESET))
  132. return -ENODEV;
  133. if (args->gtt_start >= args->gtt_end ||
  134. (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
  135. return -EINVAL;
  136. /* GEM with user mode setting was never supported on ilk and later. */
  137. if (INTEL_INFO(dev)->gen >= 5)
  138. return -ENODEV;
  139. mutex_lock(&dev->struct_mutex);
  140. i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
  141. args->gtt_end);
  142. dev_priv->gtt.mappable_end = args->gtt_end;
  143. mutex_unlock(&dev->struct_mutex);
  144. return 0;
  145. }
  146. int
  147. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  148. struct drm_file *file)
  149. {
  150. struct drm_i915_private *dev_priv = dev->dev_private;
  151. struct drm_i915_gem_get_aperture *args = data;
  152. struct drm_i915_gem_object *obj;
  153. size_t pinned;
  154. pinned = 0;
  155. mutex_lock(&dev->struct_mutex);
  156. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  157. if (obj->pin_count)
  158. pinned += obj->gtt_space->size;
  159. mutex_unlock(&dev->struct_mutex);
  160. args->aper_size = dev_priv->gtt.total;
  161. args->aper_available_size = args->aper_size - pinned;
  162. return 0;
  163. }
  164. void *i915_gem_object_alloc(struct drm_device *dev)
  165. {
  166. struct drm_i915_private *dev_priv = dev->dev_private;
  167. return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
  168. }
  169. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  170. {
  171. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  172. kmem_cache_free(dev_priv->slab, obj);
  173. }
  174. static int
  175. i915_gem_create(struct drm_file *file,
  176. struct drm_device *dev,
  177. uint64_t size,
  178. uint32_t *handle_p)
  179. {
  180. struct drm_i915_gem_object *obj;
  181. int ret;
  182. u32 handle;
  183. size = roundup(size, PAGE_SIZE);
  184. if (size == 0)
  185. return -EINVAL;
  186. /* Allocate the new object */
  187. obj = i915_gem_alloc_object(dev, size);
  188. if (obj == NULL)
  189. return -ENOMEM;
  190. ret = drm_gem_handle_create(file, &obj->base, &handle);
  191. if (ret) {
  192. drm_gem_object_release(&obj->base);
  193. i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
  194. i915_gem_object_free(obj);
  195. return ret;
  196. }
  197. /* drop reference from allocate - handle holds it now */
  198. drm_gem_object_unreference(&obj->base);
  199. trace_i915_gem_object_create(obj);
  200. *handle_p = handle;
  201. return 0;
  202. }
  203. int
  204. i915_gem_dumb_create(struct drm_file *file,
  205. struct drm_device *dev,
  206. struct drm_mode_create_dumb *args)
  207. {
  208. /* have to work out size/pitch and return them */
  209. args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
  210. args->size = args->pitch * args->height;
  211. return i915_gem_create(file, dev,
  212. args->size, &args->handle);
  213. }
  214. int i915_gem_dumb_destroy(struct drm_file *file,
  215. struct drm_device *dev,
  216. uint32_t handle)
  217. {
  218. return drm_gem_handle_delete(file, handle);
  219. }
  220. /**
  221. * Creates a new mm object and returns a handle to it.
  222. */
  223. int
  224. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  225. struct drm_file *file)
  226. {
  227. struct drm_i915_gem_create *args = data;
  228. return i915_gem_create(file, dev,
  229. args->size, &args->handle);
  230. }
  231. static inline int
  232. __copy_to_user_swizzled(char __user *cpu_vaddr,
  233. const char *gpu_vaddr, int gpu_offset,
  234. int length)
  235. {
  236. int ret, cpu_offset = 0;
  237. while (length > 0) {
  238. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  239. int this_length = min(cacheline_end - gpu_offset, length);
  240. int swizzled_gpu_offset = gpu_offset ^ 64;
  241. ret = __copy_to_user(cpu_vaddr + cpu_offset,
  242. gpu_vaddr + swizzled_gpu_offset,
  243. this_length);
  244. if (ret)
  245. return ret + length;
  246. cpu_offset += this_length;
  247. gpu_offset += this_length;
  248. length -= this_length;
  249. }
  250. return 0;
  251. }
  252. static inline int
  253. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  254. const char __user *cpu_vaddr,
  255. int length)
  256. {
  257. int ret, cpu_offset = 0;
  258. while (length > 0) {
  259. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  260. int this_length = min(cacheline_end - gpu_offset, length);
  261. int swizzled_gpu_offset = gpu_offset ^ 64;
  262. ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  263. cpu_vaddr + cpu_offset,
  264. this_length);
  265. if (ret)
  266. return ret + length;
  267. cpu_offset += this_length;
  268. gpu_offset += this_length;
  269. length -= this_length;
  270. }
  271. return 0;
  272. }
  273. /* Per-page copy function for the shmem pread fastpath.
  274. * Flushes invalid cachelines before reading the target if
  275. * needs_clflush is set. */
  276. static int
  277. shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
  278. char __user *user_data,
  279. bool page_do_bit17_swizzling, bool needs_clflush)
  280. {
  281. char *vaddr;
  282. int ret;
  283. if (unlikely(page_do_bit17_swizzling))
  284. return -EINVAL;
  285. vaddr = kmap_atomic(page);
  286. if (needs_clflush)
  287. drm_clflush_virt_range(vaddr + shmem_page_offset,
  288. page_length);
  289. ret = __copy_to_user_inatomic(user_data,
  290. vaddr + shmem_page_offset,
  291. page_length);
  292. kunmap_atomic(vaddr);
  293. return ret ? -EFAULT : 0;
  294. }
  295. static void
  296. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  297. bool swizzled)
  298. {
  299. if (unlikely(swizzled)) {
  300. unsigned long start = (unsigned long) addr;
  301. unsigned long end = (unsigned long) addr + length;
  302. /* For swizzling simply ensure that we always flush both
  303. * channels. Lame, but simple and it works. Swizzled
  304. * pwrite/pread is far from a hotpath - current userspace
  305. * doesn't use it at all. */
  306. start = round_down(start, 128);
  307. end = round_up(end, 128);
  308. drm_clflush_virt_range((void *)start, end - start);
  309. } else {
  310. drm_clflush_virt_range(addr, length);
  311. }
  312. }
  313. /* Only difference to the fast-path function is that this can handle bit17
  314. * and uses non-atomic copy and kmap functions. */
  315. static int
  316. shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
  317. char __user *user_data,
  318. bool page_do_bit17_swizzling, bool needs_clflush)
  319. {
  320. char *vaddr;
  321. int ret;
  322. vaddr = kmap(page);
  323. if (needs_clflush)
  324. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  325. page_length,
  326. page_do_bit17_swizzling);
  327. if (page_do_bit17_swizzling)
  328. ret = __copy_to_user_swizzled(user_data,
  329. vaddr, shmem_page_offset,
  330. page_length);
  331. else
  332. ret = __copy_to_user(user_data,
  333. vaddr + shmem_page_offset,
  334. page_length);
  335. kunmap(page);
  336. return ret ? - EFAULT : 0;
  337. }
  338. static int
  339. i915_gem_shmem_pread(struct drm_device *dev,
  340. struct drm_i915_gem_object *obj,
  341. struct drm_i915_gem_pread *args,
  342. struct drm_file *file)
  343. {
  344. char __user *user_data;
  345. ssize_t remain;
  346. loff_t offset;
  347. int shmem_page_offset, page_length, ret = 0;
  348. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  349. int prefaulted = 0;
  350. int needs_clflush = 0;
  351. struct scatterlist *sg;
  352. int i;
  353. user_data = (char __user *) (uintptr_t) args->data_ptr;
  354. remain = args->size;
  355. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  356. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
  357. /* If we're not in the cpu read domain, set ourself into the gtt
  358. * read domain and manually flush cachelines (if required). This
  359. * optimizes for the case when the gpu will dirty the data
  360. * anyway again before the next pread happens. */
  361. if (obj->cache_level == I915_CACHE_NONE)
  362. needs_clflush = 1;
  363. if (obj->gtt_space) {
  364. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  365. if (ret)
  366. return ret;
  367. }
  368. }
  369. ret = i915_gem_object_get_pages(obj);
  370. if (ret)
  371. return ret;
  372. i915_gem_object_pin_pages(obj);
  373. offset = args->offset;
  374. for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
  375. struct page *page;
  376. if (i < offset >> PAGE_SHIFT)
  377. continue;
  378. if (remain <= 0)
  379. break;
  380. /* Operation in this page
  381. *
  382. * shmem_page_offset = offset within page in shmem file
  383. * page_length = bytes to copy for this page
  384. */
  385. shmem_page_offset = offset_in_page(offset);
  386. page_length = remain;
  387. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  388. page_length = PAGE_SIZE - shmem_page_offset;
  389. page = sg_page(sg);
  390. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  391. (page_to_phys(page) & (1 << 17)) != 0;
  392. ret = shmem_pread_fast(page, shmem_page_offset, page_length,
  393. user_data, page_do_bit17_swizzling,
  394. needs_clflush);
  395. if (ret == 0)
  396. goto next_page;
  397. mutex_unlock(&dev->struct_mutex);
  398. if (!prefaulted) {
  399. ret = fault_in_multipages_writeable(user_data, remain);
  400. /* Userspace is tricking us, but we've already clobbered
  401. * its pages with the prefault and promised to write the
  402. * data up to the first fault. Hence ignore any errors
  403. * and just continue. */
  404. (void)ret;
  405. prefaulted = 1;
  406. }
  407. ret = shmem_pread_slow(page, shmem_page_offset, page_length,
  408. user_data, page_do_bit17_swizzling,
  409. needs_clflush);
  410. mutex_lock(&dev->struct_mutex);
  411. next_page:
  412. mark_page_accessed(page);
  413. if (ret)
  414. goto out;
  415. remain -= page_length;
  416. user_data += page_length;
  417. offset += page_length;
  418. }
  419. out:
  420. i915_gem_object_unpin_pages(obj);
  421. return ret;
  422. }
  423. /**
  424. * Reads data from the object referenced by handle.
  425. *
  426. * On error, the contents of *data are undefined.
  427. */
  428. int
  429. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  430. struct drm_file *file)
  431. {
  432. struct drm_i915_gem_pread *args = data;
  433. struct drm_i915_gem_object *obj;
  434. int ret = 0;
  435. if (args->size == 0)
  436. return 0;
  437. if (!access_ok(VERIFY_WRITE,
  438. (char __user *)(uintptr_t)args->data_ptr,
  439. args->size))
  440. return -EFAULT;
  441. ret = i915_mutex_lock_interruptible(dev);
  442. if (ret)
  443. return ret;
  444. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  445. if (&obj->base == NULL) {
  446. ret = -ENOENT;
  447. goto unlock;
  448. }
  449. /* Bounds check source. */
  450. if (args->offset > obj->base.size ||
  451. args->size > obj->base.size - args->offset) {
  452. ret = -EINVAL;
  453. goto out;
  454. }
  455. /* prime objects have no backing filp to GEM pread/pwrite
  456. * pages from.
  457. */
  458. if (!obj->base.filp) {
  459. ret = -EINVAL;
  460. goto out;
  461. }
  462. trace_i915_gem_object_pread(obj, args->offset, args->size);
  463. ret = i915_gem_shmem_pread(dev, obj, args, file);
  464. out:
  465. drm_gem_object_unreference(&obj->base);
  466. unlock:
  467. mutex_unlock(&dev->struct_mutex);
  468. return ret;
  469. }
  470. /* This is the fast write path which cannot handle
  471. * page faults in the source data
  472. */
  473. static inline int
  474. fast_user_write(struct io_mapping *mapping,
  475. loff_t page_base, int page_offset,
  476. char __user *user_data,
  477. int length)
  478. {
  479. void __iomem *vaddr_atomic;
  480. void *vaddr;
  481. unsigned long unwritten;
  482. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  483. /* We can use the cpu mem copy function because this is X86. */
  484. vaddr = (void __force*)vaddr_atomic + page_offset;
  485. unwritten = __copy_from_user_inatomic_nocache(vaddr,
  486. user_data, length);
  487. io_mapping_unmap_atomic(vaddr_atomic);
  488. return unwritten;
  489. }
  490. /**
  491. * This is the fast pwrite path, where we copy the data directly from the
  492. * user into the GTT, uncached.
  493. */
  494. static int
  495. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  496. struct drm_i915_gem_object *obj,
  497. struct drm_i915_gem_pwrite *args,
  498. struct drm_file *file)
  499. {
  500. drm_i915_private_t *dev_priv = dev->dev_private;
  501. ssize_t remain;
  502. loff_t offset, page_base;
  503. char __user *user_data;
  504. int page_offset, page_length, ret;
  505. ret = i915_gem_object_pin(obj, 0, true, true);
  506. if (ret)
  507. goto out;
  508. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  509. if (ret)
  510. goto out_unpin;
  511. ret = i915_gem_object_put_fence(obj);
  512. if (ret)
  513. goto out_unpin;
  514. user_data = (char __user *) (uintptr_t) args->data_ptr;
  515. remain = args->size;
  516. offset = obj->gtt_offset + args->offset;
  517. while (remain > 0) {
  518. /* Operation in this page
  519. *
  520. * page_base = page offset within aperture
  521. * page_offset = offset within page
  522. * page_length = bytes to copy for this page
  523. */
  524. page_base = offset & PAGE_MASK;
  525. page_offset = offset_in_page(offset);
  526. page_length = remain;
  527. if ((page_offset + remain) > PAGE_SIZE)
  528. page_length = PAGE_SIZE - page_offset;
  529. /* If we get a fault while copying data, then (presumably) our
  530. * source page isn't available. Return the error and we'll
  531. * retry in the slow path.
  532. */
  533. if (fast_user_write(dev_priv->gtt.mappable, page_base,
  534. page_offset, user_data, page_length)) {
  535. ret = -EFAULT;
  536. goto out_unpin;
  537. }
  538. remain -= page_length;
  539. user_data += page_length;
  540. offset += page_length;
  541. }
  542. out_unpin:
  543. i915_gem_object_unpin(obj);
  544. out:
  545. return ret;
  546. }
  547. /* Per-page copy function for the shmem pwrite fastpath.
  548. * Flushes invalid cachelines before writing to the target if
  549. * needs_clflush_before is set and flushes out any written cachelines after
  550. * writing if needs_clflush is set. */
  551. static int
  552. shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
  553. char __user *user_data,
  554. bool page_do_bit17_swizzling,
  555. bool needs_clflush_before,
  556. bool needs_clflush_after)
  557. {
  558. char *vaddr;
  559. int ret;
  560. if (unlikely(page_do_bit17_swizzling))
  561. return -EINVAL;
  562. vaddr = kmap_atomic(page);
  563. if (needs_clflush_before)
  564. drm_clflush_virt_range(vaddr + shmem_page_offset,
  565. page_length);
  566. ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
  567. user_data,
  568. page_length);
  569. if (needs_clflush_after)
  570. drm_clflush_virt_range(vaddr + shmem_page_offset,
  571. page_length);
  572. kunmap_atomic(vaddr);
  573. return ret ? -EFAULT : 0;
  574. }
  575. /* Only difference to the fast-path function is that this can handle bit17
  576. * and uses non-atomic copy and kmap functions. */
  577. static int
  578. shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
  579. char __user *user_data,
  580. bool page_do_bit17_swizzling,
  581. bool needs_clflush_before,
  582. bool needs_clflush_after)
  583. {
  584. char *vaddr;
  585. int ret;
  586. vaddr = kmap(page);
  587. if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  588. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  589. page_length,
  590. page_do_bit17_swizzling);
  591. if (page_do_bit17_swizzling)
  592. ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
  593. user_data,
  594. page_length);
  595. else
  596. ret = __copy_from_user(vaddr + shmem_page_offset,
  597. user_data,
  598. page_length);
  599. if (needs_clflush_after)
  600. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  601. page_length,
  602. page_do_bit17_swizzling);
  603. kunmap(page);
  604. return ret ? -EFAULT : 0;
  605. }
  606. static int
  607. i915_gem_shmem_pwrite(struct drm_device *dev,
  608. struct drm_i915_gem_object *obj,
  609. struct drm_i915_gem_pwrite *args,
  610. struct drm_file *file)
  611. {
  612. ssize_t remain;
  613. loff_t offset;
  614. char __user *user_data;
  615. int shmem_page_offset, page_length, ret = 0;
  616. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  617. int hit_slowpath = 0;
  618. int needs_clflush_after = 0;
  619. int needs_clflush_before = 0;
  620. int i;
  621. struct scatterlist *sg;
  622. user_data = (char __user *) (uintptr_t) args->data_ptr;
  623. remain = args->size;
  624. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  625. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  626. /* If we're not in the cpu write domain, set ourself into the gtt
  627. * write domain and manually flush cachelines (if required). This
  628. * optimizes for the case when the gpu will use the data
  629. * right away and we therefore have to clflush anyway. */
  630. if (obj->cache_level == I915_CACHE_NONE)
  631. needs_clflush_after = 1;
  632. if (obj->gtt_space) {
  633. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  634. if (ret)
  635. return ret;
  636. }
  637. }
  638. /* Same trick applies for invalidate partially written cachelines before
  639. * writing. */
  640. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
  641. && obj->cache_level == I915_CACHE_NONE)
  642. needs_clflush_before = 1;
  643. ret = i915_gem_object_get_pages(obj);
  644. if (ret)
  645. return ret;
  646. i915_gem_object_pin_pages(obj);
  647. offset = args->offset;
  648. obj->dirty = 1;
  649. for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
  650. struct page *page;
  651. int partial_cacheline_write;
  652. if (i < offset >> PAGE_SHIFT)
  653. continue;
  654. if (remain <= 0)
  655. break;
  656. /* Operation in this page
  657. *
  658. * shmem_page_offset = offset within page in shmem file
  659. * page_length = bytes to copy for this page
  660. */
  661. shmem_page_offset = offset_in_page(offset);
  662. page_length = remain;
  663. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  664. page_length = PAGE_SIZE - shmem_page_offset;
  665. /* If we don't overwrite a cacheline completely we need to be
  666. * careful to have up-to-date data by first clflushing. Don't
  667. * overcomplicate things and flush the entire patch. */
  668. partial_cacheline_write = needs_clflush_before &&
  669. ((shmem_page_offset | page_length)
  670. & (boot_cpu_data.x86_clflush_size - 1));
  671. page = sg_page(sg);
  672. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  673. (page_to_phys(page) & (1 << 17)) != 0;
  674. ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
  675. user_data, page_do_bit17_swizzling,
  676. partial_cacheline_write,
  677. needs_clflush_after);
  678. if (ret == 0)
  679. goto next_page;
  680. hit_slowpath = 1;
  681. mutex_unlock(&dev->struct_mutex);
  682. ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
  683. user_data, page_do_bit17_swizzling,
  684. partial_cacheline_write,
  685. needs_clflush_after);
  686. mutex_lock(&dev->struct_mutex);
  687. next_page:
  688. set_page_dirty(page);
  689. mark_page_accessed(page);
  690. if (ret)
  691. goto out;
  692. remain -= page_length;
  693. user_data += page_length;
  694. offset += page_length;
  695. }
  696. out:
  697. i915_gem_object_unpin_pages(obj);
  698. if (hit_slowpath) {
  699. /*
  700. * Fixup: Flush cpu caches in case we didn't flush the dirty
  701. * cachelines in-line while writing and the object moved
  702. * out of the cpu write domain while we've dropped the lock.
  703. */
  704. if (!needs_clflush_after &&
  705. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  706. i915_gem_clflush_object(obj);
  707. i915_gem_chipset_flush(dev);
  708. }
  709. }
  710. if (needs_clflush_after)
  711. i915_gem_chipset_flush(dev);
  712. return ret;
  713. }
  714. /**
  715. * Writes data to the object referenced by handle.
  716. *
  717. * On error, the contents of the buffer that were to be modified are undefined.
  718. */
  719. int
  720. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  721. struct drm_file *file)
  722. {
  723. struct drm_i915_gem_pwrite *args = data;
  724. struct drm_i915_gem_object *obj;
  725. int ret;
  726. if (args->size == 0)
  727. return 0;
  728. if (!access_ok(VERIFY_READ,
  729. (char __user *)(uintptr_t)args->data_ptr,
  730. args->size))
  731. return -EFAULT;
  732. ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
  733. args->size);
  734. if (ret)
  735. return -EFAULT;
  736. ret = i915_mutex_lock_interruptible(dev);
  737. if (ret)
  738. return ret;
  739. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  740. if (&obj->base == NULL) {
  741. ret = -ENOENT;
  742. goto unlock;
  743. }
  744. /* Bounds check destination. */
  745. if (args->offset > obj->base.size ||
  746. args->size > obj->base.size - args->offset) {
  747. ret = -EINVAL;
  748. goto out;
  749. }
  750. /* prime objects have no backing filp to GEM pread/pwrite
  751. * pages from.
  752. */
  753. if (!obj->base.filp) {
  754. ret = -EINVAL;
  755. goto out;
  756. }
  757. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  758. ret = -EFAULT;
  759. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  760. * it would end up going through the fenced access, and we'll get
  761. * different detiling behavior between reading and writing.
  762. * pread/pwrite currently are reading and writing from the CPU
  763. * perspective, requiring manual detiling by the client.
  764. */
  765. if (obj->phys_obj) {
  766. ret = i915_gem_phys_pwrite(dev, obj, args, file);
  767. goto out;
  768. }
  769. if (obj->cache_level == I915_CACHE_NONE &&
  770. obj->tiling_mode == I915_TILING_NONE &&
  771. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  772. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  773. /* Note that the gtt paths might fail with non-page-backed user
  774. * pointers (e.g. gtt mappings when moving data between
  775. * textures). Fallback to the shmem path in that case. */
  776. }
  777. if (ret == -EFAULT || ret == -ENOSPC)
  778. ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  779. out:
  780. drm_gem_object_unreference(&obj->base);
  781. unlock:
  782. mutex_unlock(&dev->struct_mutex);
  783. return ret;
  784. }
  785. int
  786. i915_gem_check_wedge(struct i915_gpu_error *error,
  787. bool interruptible)
  788. {
  789. if (i915_reset_in_progress(error)) {
  790. /* Non-interruptible callers can't handle -EAGAIN, hence return
  791. * -EIO unconditionally for these. */
  792. if (!interruptible)
  793. return -EIO;
  794. /* Recovery complete, but the reset failed ... */
  795. if (i915_terminally_wedged(error))
  796. return -EIO;
  797. return -EAGAIN;
  798. }
  799. return 0;
  800. }
  801. /*
  802. * Compare seqno against outstanding lazy request. Emit a request if they are
  803. * equal.
  804. */
  805. static int
  806. i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  807. {
  808. int ret;
  809. BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
  810. ret = 0;
  811. if (seqno == ring->outstanding_lazy_request)
  812. ret = i915_add_request(ring, NULL, NULL);
  813. return ret;
  814. }
  815. /**
  816. * __wait_seqno - wait until execution of seqno has finished
  817. * @ring: the ring expected to report seqno
  818. * @seqno: duh!
  819. * @interruptible: do an interruptible wait (normally yes)
  820. * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  821. *
  822. * Returns 0 if the seqno was found within the alloted time. Else returns the
  823. * errno with remaining time filled in timeout argument.
  824. */
  825. static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
  826. bool interruptible, struct timespec *timeout)
  827. {
  828. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  829. struct timespec before, now, wait_time={1,0};
  830. unsigned long timeout_jiffies;
  831. long end;
  832. bool wait_forever = true;
  833. int ret;
  834. if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
  835. return 0;
  836. trace_i915_gem_request_wait_begin(ring, seqno);
  837. if (timeout != NULL) {
  838. wait_time = *timeout;
  839. wait_forever = false;
  840. }
  841. timeout_jiffies = timespec_to_jiffies(&wait_time);
  842. if (WARN_ON(!ring->irq_get(ring)))
  843. return -ENODEV;
  844. /* Record current time in case interrupted by signal, or wedged * */
  845. getrawmonotonic(&before);
  846. #define EXIT_COND \
  847. (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
  848. i915_reset_in_progress(&dev_priv->gpu_error))
  849. do {
  850. if (interruptible)
  851. end = wait_event_interruptible_timeout(ring->irq_queue,
  852. EXIT_COND,
  853. timeout_jiffies);
  854. else
  855. end = wait_event_timeout(ring->irq_queue, EXIT_COND,
  856. timeout_jiffies);
  857. ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  858. if (ret)
  859. end = ret;
  860. } while (end == 0 && wait_forever);
  861. getrawmonotonic(&now);
  862. ring->irq_put(ring);
  863. trace_i915_gem_request_wait_end(ring, seqno);
  864. #undef EXIT_COND
  865. if (timeout) {
  866. struct timespec sleep_time = timespec_sub(now, before);
  867. *timeout = timespec_sub(*timeout, sleep_time);
  868. }
  869. switch (end) {
  870. case -EIO:
  871. case -EAGAIN: /* Wedged */
  872. case -ERESTARTSYS: /* Signal */
  873. return (int)end;
  874. case 0: /* Timeout */
  875. if (timeout)
  876. set_normalized_timespec(timeout, 0, 0);
  877. return -ETIME;
  878. default: /* Completed */
  879. WARN_ON(end < 0); /* We're not aware of other errors */
  880. return 0;
  881. }
  882. }
  883. /**
  884. * Waits for a sequence number to be signaled, and cleans up the
  885. * request and object lists appropriately for that event.
  886. */
  887. int
  888. i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
  889. {
  890. struct drm_device *dev = ring->dev;
  891. struct drm_i915_private *dev_priv = dev->dev_private;
  892. bool interruptible = dev_priv->mm.interruptible;
  893. int ret;
  894. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  895. BUG_ON(seqno == 0);
  896. ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  897. if (ret)
  898. return ret;
  899. ret = i915_gem_check_olr(ring, seqno);
  900. if (ret)
  901. return ret;
  902. return __wait_seqno(ring, seqno, interruptible, NULL);
  903. }
  904. /**
  905. * Ensures that all rendering to the object has completed and the object is
  906. * safe to unbind from the GTT or access from the CPU.
  907. */
  908. static __must_check int
  909. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  910. bool readonly)
  911. {
  912. struct intel_ring_buffer *ring = obj->ring;
  913. u32 seqno;
  914. int ret;
  915. seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  916. if (seqno == 0)
  917. return 0;
  918. ret = i915_wait_seqno(ring, seqno);
  919. if (ret)
  920. return ret;
  921. i915_gem_retire_requests_ring(ring);
  922. /* Manually manage the write flush as we may have not yet
  923. * retired the buffer.
  924. */
  925. if (obj->last_write_seqno &&
  926. i915_seqno_passed(seqno, obj->last_write_seqno)) {
  927. obj->last_write_seqno = 0;
  928. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  929. }
  930. return 0;
  931. }
  932. /* A nonblocking variant of the above wait. This is a highly dangerous routine
  933. * as the object state may change during this call.
  934. */
  935. static __must_check int
  936. i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
  937. bool readonly)
  938. {
  939. struct drm_device *dev = obj->base.dev;
  940. struct drm_i915_private *dev_priv = dev->dev_private;
  941. struct intel_ring_buffer *ring = obj->ring;
  942. u32 seqno;
  943. int ret;
  944. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  945. BUG_ON(!dev_priv->mm.interruptible);
  946. seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  947. if (seqno == 0)
  948. return 0;
  949. ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
  950. if (ret)
  951. return ret;
  952. ret = i915_gem_check_olr(ring, seqno);
  953. if (ret)
  954. return ret;
  955. mutex_unlock(&dev->struct_mutex);
  956. ret = __wait_seqno(ring, seqno, true, NULL);
  957. mutex_lock(&dev->struct_mutex);
  958. i915_gem_retire_requests_ring(ring);
  959. /* Manually manage the write flush as we may have not yet
  960. * retired the buffer.
  961. */
  962. if (obj->last_write_seqno &&
  963. i915_seqno_passed(seqno, obj->last_write_seqno)) {
  964. obj->last_write_seqno = 0;
  965. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  966. }
  967. return ret;
  968. }
  969. /**
  970. * Called when user space prepares to use an object with the CPU, either
  971. * through the mmap ioctl's mapping or a GTT mapping.
  972. */
  973. int
  974. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  975. struct drm_file *file)
  976. {
  977. struct drm_i915_gem_set_domain *args = data;
  978. struct drm_i915_gem_object *obj;
  979. uint32_t read_domains = args->read_domains;
  980. uint32_t write_domain = args->write_domain;
  981. int ret;
  982. /* Only handle setting domains to types used by the CPU. */
  983. if (write_domain & I915_GEM_GPU_DOMAINS)
  984. return -EINVAL;
  985. if (read_domains & I915_GEM_GPU_DOMAINS)
  986. return -EINVAL;
  987. /* Having something in the write domain implies it's in the read
  988. * domain, and only that read domain. Enforce that in the request.
  989. */
  990. if (write_domain != 0 && read_domains != write_domain)
  991. return -EINVAL;
  992. ret = i915_mutex_lock_interruptible(dev);
  993. if (ret)
  994. return ret;
  995. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  996. if (&obj->base == NULL) {
  997. ret = -ENOENT;
  998. goto unlock;
  999. }
  1000. /* Try to flush the object off the GPU without holding the lock.
  1001. * We will repeat the flush holding the lock in the normal manner
  1002. * to catch cases where we are gazumped.
  1003. */
  1004. ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
  1005. if (ret)
  1006. goto unref;
  1007. if (read_domains & I915_GEM_DOMAIN_GTT) {
  1008. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  1009. /* Silently promote "you're not bound, there was nothing to do"
  1010. * to success, since the client was just asking us to
  1011. * make sure everything was done.
  1012. */
  1013. if (ret == -EINVAL)
  1014. ret = 0;
  1015. } else {
  1016. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  1017. }
  1018. unref:
  1019. drm_gem_object_unreference(&obj->base);
  1020. unlock:
  1021. mutex_unlock(&dev->struct_mutex);
  1022. return ret;
  1023. }
  1024. /**
  1025. * Called when user space has done writes to this buffer
  1026. */
  1027. int
  1028. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1029. struct drm_file *file)
  1030. {
  1031. struct drm_i915_gem_sw_finish *args = data;
  1032. struct drm_i915_gem_object *obj;
  1033. int ret = 0;
  1034. ret = i915_mutex_lock_interruptible(dev);
  1035. if (ret)
  1036. return ret;
  1037. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1038. if (&obj->base == NULL) {
  1039. ret = -ENOENT;
  1040. goto unlock;
  1041. }
  1042. /* Pinned buffers may be scanout, so flush the cache */
  1043. if (obj->pin_count)
  1044. i915_gem_object_flush_cpu_write_domain(obj);
  1045. drm_gem_object_unreference(&obj->base);
  1046. unlock:
  1047. mutex_unlock(&dev->struct_mutex);
  1048. return ret;
  1049. }
  1050. /**
  1051. * Maps the contents of an object, returning the address it is mapped
  1052. * into.
  1053. *
  1054. * While the mapping holds a reference on the contents of the object, it doesn't
  1055. * imply a ref on the object itself.
  1056. */
  1057. int
  1058. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1059. struct drm_file *file)
  1060. {
  1061. struct drm_i915_gem_mmap *args = data;
  1062. struct drm_gem_object *obj;
  1063. unsigned long addr;
  1064. obj = drm_gem_object_lookup(dev, file, args->handle);
  1065. if (obj == NULL)
  1066. return -ENOENT;
  1067. /* prime objects have no backing filp to GEM mmap
  1068. * pages from.
  1069. */
  1070. if (!obj->filp) {
  1071. drm_gem_object_unreference_unlocked(obj);
  1072. return -EINVAL;
  1073. }
  1074. addr = vm_mmap(obj->filp, 0, args->size,
  1075. PROT_READ | PROT_WRITE, MAP_SHARED,
  1076. args->offset);
  1077. drm_gem_object_unreference_unlocked(obj);
  1078. if (IS_ERR((void *)addr))
  1079. return addr;
  1080. args->addr_ptr = (uint64_t) addr;
  1081. return 0;
  1082. }
  1083. /**
  1084. * i915_gem_fault - fault a page into the GTT
  1085. * vma: VMA in question
  1086. * vmf: fault info
  1087. *
  1088. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1089. * from userspace. The fault handler takes care of binding the object to
  1090. * the GTT (if needed), allocating and programming a fence register (again,
  1091. * only if needed based on whether the old reg is still valid or the object
  1092. * is tiled) and inserting a new PTE into the faulting process.
  1093. *
  1094. * Note that the faulting process may involve evicting existing objects
  1095. * from the GTT and/or fence registers to make room. So performance may
  1096. * suffer if the GTT working set is large or there are few fence registers
  1097. * left.
  1098. */
  1099. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1100. {
  1101. struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
  1102. struct drm_device *dev = obj->base.dev;
  1103. drm_i915_private_t *dev_priv = dev->dev_private;
  1104. pgoff_t page_offset;
  1105. unsigned long pfn;
  1106. int ret = 0;
  1107. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1108. /* We don't use vmf->pgoff since that has the fake offset */
  1109. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1110. PAGE_SHIFT;
  1111. ret = i915_mutex_lock_interruptible(dev);
  1112. if (ret)
  1113. goto out;
  1114. trace_i915_gem_object_fault(obj, page_offset, true, write);
  1115. /* Access to snoopable pages through the GTT is incoherent. */
  1116. if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
  1117. ret = -EINVAL;
  1118. goto unlock;
  1119. }
  1120. /* Now bind it into the GTT if needed */
  1121. ret = i915_gem_object_pin(obj, 0, true, false);
  1122. if (ret)
  1123. goto unlock;
  1124. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1125. if (ret)
  1126. goto unpin;
  1127. ret = i915_gem_object_get_fence(obj);
  1128. if (ret)
  1129. goto unpin;
  1130. obj->fault_mappable = true;
  1131. pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
  1132. page_offset;
  1133. /* Finally, remap it using the new GTT offset */
  1134. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1135. unpin:
  1136. i915_gem_object_unpin(obj);
  1137. unlock:
  1138. mutex_unlock(&dev->struct_mutex);
  1139. out:
  1140. switch (ret) {
  1141. case -EIO:
  1142. /* If this -EIO is due to a gpu hang, give the reset code a
  1143. * chance to clean up the mess. Otherwise return the proper
  1144. * SIGBUS. */
  1145. if (i915_terminally_wedged(&dev_priv->gpu_error))
  1146. return VM_FAULT_SIGBUS;
  1147. case -EAGAIN:
  1148. /* Give the error handler a chance to run and move the
  1149. * objects off the GPU active list. Next time we service the
  1150. * fault, we should be able to transition the page into the
  1151. * GTT without touching the GPU (and so avoid further
  1152. * EIO/EGAIN). If the GPU is wedged, then there is no issue
  1153. * with coherency, just lost writes.
  1154. */
  1155. set_need_resched();
  1156. case 0:
  1157. case -ERESTARTSYS:
  1158. case -EINTR:
  1159. case -EBUSY:
  1160. /*
  1161. * EBUSY is ok: this just means that another thread
  1162. * already did the job.
  1163. */
  1164. return VM_FAULT_NOPAGE;
  1165. case -ENOMEM:
  1166. return VM_FAULT_OOM;
  1167. case -ENOSPC:
  1168. return VM_FAULT_SIGBUS;
  1169. default:
  1170. WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
  1171. return VM_FAULT_SIGBUS;
  1172. }
  1173. }
  1174. /**
  1175. * i915_gem_release_mmap - remove physical page mappings
  1176. * @obj: obj in question
  1177. *
  1178. * Preserve the reservation of the mmapping with the DRM core code, but
  1179. * relinquish ownership of the pages back to the system.
  1180. *
  1181. * It is vital that we remove the page mapping if we have mapped a tiled
  1182. * object through the GTT and then lose the fence register due to
  1183. * resource pressure. Similarly if the object has been moved out of the
  1184. * aperture, than pages mapped into userspace must be revoked. Removing the
  1185. * mapping will then trigger a page fault on the next user access, allowing
  1186. * fixup by i915_gem_fault().
  1187. */
  1188. void
  1189. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1190. {
  1191. if (!obj->fault_mappable)
  1192. return;
  1193. if (obj->base.dev->dev_mapping)
  1194. unmap_mapping_range(obj->base.dev->dev_mapping,
  1195. (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
  1196. obj->base.size, 1);
  1197. obj->fault_mappable = false;
  1198. }
  1199. uint32_t
  1200. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  1201. {
  1202. uint32_t gtt_size;
  1203. if (INTEL_INFO(dev)->gen >= 4 ||
  1204. tiling_mode == I915_TILING_NONE)
  1205. return size;
  1206. /* Previous chips need a power-of-two fence region when tiling */
  1207. if (INTEL_INFO(dev)->gen == 3)
  1208. gtt_size = 1024*1024;
  1209. else
  1210. gtt_size = 512*1024;
  1211. while (gtt_size < size)
  1212. gtt_size <<= 1;
  1213. return gtt_size;
  1214. }
  1215. /**
  1216. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1217. * @obj: object to check
  1218. *
  1219. * Return the required GTT alignment for an object, taking into account
  1220. * potential fence register mapping.
  1221. */
  1222. uint32_t
  1223. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1224. int tiling_mode, bool fenced)
  1225. {
  1226. /*
  1227. * Minimum alignment is 4k (GTT page size), but might be greater
  1228. * if a fence register is needed for the object.
  1229. */
  1230. if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
  1231. tiling_mode == I915_TILING_NONE)
  1232. return 4096;
  1233. /*
  1234. * Previous chips need to be aligned to the size of the smallest
  1235. * fence register that can contain the object.
  1236. */
  1237. return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1238. }
  1239. static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
  1240. {
  1241. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1242. int ret;
  1243. if (obj->base.map_list.map)
  1244. return 0;
  1245. dev_priv->mm.shrinker_no_lock_stealing = true;
  1246. ret = drm_gem_create_mmap_offset(&obj->base);
  1247. if (ret != -ENOSPC)
  1248. goto out;
  1249. /* Badly fragmented mmap space? The only way we can recover
  1250. * space is by destroying unwanted objects. We can't randomly release
  1251. * mmap_offsets as userspace expects them to be persistent for the
  1252. * lifetime of the objects. The closest we can is to release the
  1253. * offsets on purgeable objects by truncating it and marking it purged,
  1254. * which prevents userspace from ever using that object again.
  1255. */
  1256. i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
  1257. ret = drm_gem_create_mmap_offset(&obj->base);
  1258. if (ret != -ENOSPC)
  1259. goto out;
  1260. i915_gem_shrink_all(dev_priv);
  1261. ret = drm_gem_create_mmap_offset(&obj->base);
  1262. out:
  1263. dev_priv->mm.shrinker_no_lock_stealing = false;
  1264. return ret;
  1265. }
  1266. static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
  1267. {
  1268. if (!obj->base.map_list.map)
  1269. return;
  1270. drm_gem_free_mmap_offset(&obj->base);
  1271. }
  1272. int
  1273. i915_gem_mmap_gtt(struct drm_file *file,
  1274. struct drm_device *dev,
  1275. uint32_t handle,
  1276. uint64_t *offset)
  1277. {
  1278. struct drm_i915_private *dev_priv = dev->dev_private;
  1279. struct drm_i915_gem_object *obj;
  1280. int ret;
  1281. ret = i915_mutex_lock_interruptible(dev);
  1282. if (ret)
  1283. return ret;
  1284. obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  1285. if (&obj->base == NULL) {
  1286. ret = -ENOENT;
  1287. goto unlock;
  1288. }
  1289. if (obj->base.size > dev_priv->gtt.mappable_end) {
  1290. ret = -E2BIG;
  1291. goto out;
  1292. }
  1293. if (obj->madv != I915_MADV_WILLNEED) {
  1294. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1295. ret = -EINVAL;
  1296. goto out;
  1297. }
  1298. ret = i915_gem_object_create_mmap_offset(obj);
  1299. if (ret)
  1300. goto out;
  1301. *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
  1302. out:
  1303. drm_gem_object_unreference(&obj->base);
  1304. unlock:
  1305. mutex_unlock(&dev->struct_mutex);
  1306. return ret;
  1307. }
  1308. /**
  1309. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1310. * @dev: DRM device
  1311. * @data: GTT mapping ioctl data
  1312. * @file: GEM object info
  1313. *
  1314. * Simply returns the fake offset to userspace so it can mmap it.
  1315. * The mmap call will end up in drm_gem_mmap(), which will set things
  1316. * up so we can get faults in the handler above.
  1317. *
  1318. * The fault handler will take care of binding the object into the GTT
  1319. * (since it may have been evicted to make room for something), allocating
  1320. * a fence register, and mapping the appropriate aperture address into
  1321. * userspace.
  1322. */
  1323. int
  1324. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1325. struct drm_file *file)
  1326. {
  1327. struct drm_i915_gem_mmap_gtt *args = data;
  1328. return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1329. }
  1330. /* Immediately discard the backing storage */
  1331. static void
  1332. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1333. {
  1334. struct inode *inode;
  1335. i915_gem_object_free_mmap_offset(obj);
  1336. if (obj->base.filp == NULL)
  1337. return;
  1338. /* Our goal here is to return as much of the memory as
  1339. * is possible back to the system as we are called from OOM.
  1340. * To do this we must instruct the shmfs to drop all of its
  1341. * backing pages, *now*.
  1342. */
  1343. inode = obj->base.filp->f_path.dentry->d_inode;
  1344. shmem_truncate_range(inode, 0, (loff_t)-1);
  1345. obj->madv = __I915_MADV_PURGED;
  1346. }
  1347. static inline int
  1348. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
  1349. {
  1350. return obj->madv == I915_MADV_DONTNEED;
  1351. }
  1352. static void
  1353. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1354. {
  1355. int page_count = obj->base.size / PAGE_SIZE;
  1356. struct scatterlist *sg;
  1357. int ret, i;
  1358. BUG_ON(obj->madv == __I915_MADV_PURGED);
  1359. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  1360. if (ret) {
  1361. /* In the event of a disaster, abandon all caches and
  1362. * hope for the best.
  1363. */
  1364. WARN_ON(ret != -EIO);
  1365. i915_gem_clflush_object(obj);
  1366. obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1367. }
  1368. if (i915_gem_object_needs_bit17_swizzle(obj))
  1369. i915_gem_object_save_bit_17_swizzle(obj);
  1370. if (obj->madv == I915_MADV_DONTNEED)
  1371. obj->dirty = 0;
  1372. for_each_sg(obj->pages->sgl, sg, page_count, i) {
  1373. struct page *page = sg_page(sg);
  1374. if (obj->dirty)
  1375. set_page_dirty(page);
  1376. if (obj->madv == I915_MADV_WILLNEED)
  1377. mark_page_accessed(page);
  1378. page_cache_release(page);
  1379. }
  1380. obj->dirty = 0;
  1381. sg_free_table(obj->pages);
  1382. kfree(obj->pages);
  1383. }
  1384. int
  1385. i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
  1386. {
  1387. const struct drm_i915_gem_object_ops *ops = obj->ops;
  1388. if (obj->pages == NULL)
  1389. return 0;
  1390. BUG_ON(obj->gtt_space);
  1391. if (obj->pages_pin_count)
  1392. return -EBUSY;
  1393. /* ->put_pages might need to allocate memory for the bit17 swizzle
  1394. * array, hence protect them from being reaped by removing them from gtt
  1395. * lists early. */
  1396. list_del(&obj->gtt_list);
  1397. ops->put_pages(obj);
  1398. obj->pages = NULL;
  1399. if (i915_gem_object_is_purgeable(obj))
  1400. i915_gem_object_truncate(obj);
  1401. return 0;
  1402. }
  1403. static long
  1404. i915_gem_purge(struct drm_i915_private *dev_priv, long target)
  1405. {
  1406. struct drm_i915_gem_object *obj, *next;
  1407. long count = 0;
  1408. list_for_each_entry_safe(obj, next,
  1409. &dev_priv->mm.unbound_list,
  1410. gtt_list) {
  1411. if (i915_gem_object_is_purgeable(obj) &&
  1412. i915_gem_object_put_pages(obj) == 0) {
  1413. count += obj->base.size >> PAGE_SHIFT;
  1414. if (count >= target)
  1415. return count;
  1416. }
  1417. }
  1418. list_for_each_entry_safe(obj, next,
  1419. &dev_priv->mm.inactive_list,
  1420. mm_list) {
  1421. if (i915_gem_object_is_purgeable(obj) &&
  1422. i915_gem_object_unbind(obj) == 0 &&
  1423. i915_gem_object_put_pages(obj) == 0) {
  1424. count += obj->base.size >> PAGE_SHIFT;
  1425. if (count >= target)
  1426. return count;
  1427. }
  1428. }
  1429. return count;
  1430. }
  1431. static void
  1432. i915_gem_shrink_all(struct drm_i915_private *dev_priv)
  1433. {
  1434. struct drm_i915_gem_object *obj, *next;
  1435. i915_gem_evict_everything(dev_priv->dev);
  1436. list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
  1437. i915_gem_object_put_pages(obj);
  1438. }
  1439. static int
  1440. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  1441. {
  1442. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1443. int page_count, i;
  1444. struct address_space *mapping;
  1445. struct sg_table *st;
  1446. struct scatterlist *sg;
  1447. struct page *page;
  1448. gfp_t gfp;
  1449. /* Assert that the object is not currently in any GPU domain. As it
  1450. * wasn't in the GTT, there shouldn't be any way it could have been in
  1451. * a GPU cache
  1452. */
  1453. BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  1454. BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  1455. st = kmalloc(sizeof(*st), GFP_KERNEL);
  1456. if (st == NULL)
  1457. return -ENOMEM;
  1458. page_count = obj->base.size / PAGE_SIZE;
  1459. if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  1460. sg_free_table(st);
  1461. kfree(st);
  1462. return -ENOMEM;
  1463. }
  1464. /* Get the list of pages out of our struct file. They'll be pinned
  1465. * at this point until we release them.
  1466. *
  1467. * Fail silently without starting the shrinker
  1468. */
  1469. mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  1470. gfp = mapping_gfp_mask(mapping);
  1471. gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
  1472. gfp &= ~(__GFP_IO | __GFP_WAIT);
  1473. for_each_sg(st->sgl, sg, page_count, i) {
  1474. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1475. if (IS_ERR(page)) {
  1476. i915_gem_purge(dev_priv, page_count);
  1477. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1478. }
  1479. if (IS_ERR(page)) {
  1480. /* We've tried hard to allocate the memory by reaping
  1481. * our own buffer, now let the real VM do its job and
  1482. * go down in flames if truly OOM.
  1483. */
  1484. gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
  1485. gfp |= __GFP_IO | __GFP_WAIT;
  1486. i915_gem_shrink_all(dev_priv);
  1487. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1488. if (IS_ERR(page))
  1489. goto err_pages;
  1490. gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
  1491. gfp &= ~(__GFP_IO | __GFP_WAIT);
  1492. }
  1493. sg_set_page(sg, page, PAGE_SIZE, 0);
  1494. }
  1495. obj->pages = st;
  1496. if (i915_gem_object_needs_bit17_swizzle(obj))
  1497. i915_gem_object_do_bit_17_swizzle(obj);
  1498. return 0;
  1499. err_pages:
  1500. for_each_sg(st->sgl, sg, i, page_count)
  1501. page_cache_release(sg_page(sg));
  1502. sg_free_table(st);
  1503. kfree(st);
  1504. return PTR_ERR(page);
  1505. }
  1506. /* Ensure that the associated pages are gathered from the backing storage
  1507. * and pinned into our object. i915_gem_object_get_pages() may be called
  1508. * multiple times before they are released by a single call to
  1509. * i915_gem_object_put_pages() - once the pages are no longer referenced
  1510. * either as a result of memory pressure (reaping pages under the shrinker)
  1511. * or as the object is itself released.
  1512. */
  1513. int
  1514. i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  1515. {
  1516. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1517. const struct drm_i915_gem_object_ops *ops = obj->ops;
  1518. int ret;
  1519. if (obj->pages)
  1520. return 0;
  1521. if (obj->madv != I915_MADV_WILLNEED) {
  1522. DRM_ERROR("Attempting to obtain a purgeable object\n");
  1523. return -EINVAL;
  1524. }
  1525. BUG_ON(obj->pages_pin_count);
  1526. ret = ops->get_pages(obj);
  1527. if (ret)
  1528. return ret;
  1529. list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
  1530. return 0;
  1531. }
  1532. void
  1533. i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1534. struct intel_ring_buffer *ring)
  1535. {
  1536. struct drm_device *dev = obj->base.dev;
  1537. struct drm_i915_private *dev_priv = dev->dev_private;
  1538. u32 seqno = intel_ring_get_seqno(ring);
  1539. BUG_ON(ring == NULL);
  1540. obj->ring = ring;
  1541. /* Add a reference if we're newly entering the active list. */
  1542. if (!obj->active) {
  1543. drm_gem_object_reference(&obj->base);
  1544. obj->active = 1;
  1545. }
  1546. /* Move from whatever list we were on to the tail of execution. */
  1547. list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
  1548. list_move_tail(&obj->ring_list, &ring->active_list);
  1549. obj->last_read_seqno = seqno;
  1550. if (obj->fenced_gpu_access) {
  1551. obj->last_fenced_seqno = seqno;
  1552. /* Bump MRU to take account of the delayed flush */
  1553. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1554. struct drm_i915_fence_reg *reg;
  1555. reg = &dev_priv->fence_regs[obj->fence_reg];
  1556. list_move_tail(&reg->lru_list,
  1557. &dev_priv->mm.fence_list);
  1558. }
  1559. }
  1560. }
  1561. static void
  1562. i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  1563. {
  1564. struct drm_device *dev = obj->base.dev;
  1565. struct drm_i915_private *dev_priv = dev->dev_private;
  1566. BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
  1567. BUG_ON(!obj->active);
  1568. if (obj->pin_count) /* are we a framebuffer? */
  1569. intel_mark_fb_idle(obj);
  1570. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  1571. list_del_init(&obj->ring_list);
  1572. obj->ring = NULL;
  1573. obj->last_read_seqno = 0;
  1574. obj->last_write_seqno = 0;
  1575. obj->base.write_domain = 0;
  1576. obj->last_fenced_seqno = 0;
  1577. obj->fenced_gpu_access = false;
  1578. obj->active = 0;
  1579. drm_gem_object_unreference(&obj->base);
  1580. WARN_ON(i915_verify_lists(dev));
  1581. }
  1582. static int
  1583. i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  1584. {
  1585. struct drm_i915_private *dev_priv = dev->dev_private;
  1586. struct intel_ring_buffer *ring;
  1587. int ret, i, j;
  1588. /* Carefully retire all requests without writing to the rings */
  1589. for_each_ring(ring, dev_priv, i) {
  1590. ret = intel_ring_idle(ring);
  1591. if (ret)
  1592. return ret;
  1593. }
  1594. i915_gem_retire_requests(dev);
  1595. /* Finally reset hw state */
  1596. for_each_ring(ring, dev_priv, i) {
  1597. intel_ring_init_seqno(ring, seqno);
  1598. for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
  1599. ring->sync_seqno[j] = 0;
  1600. }
  1601. return 0;
  1602. }
  1603. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  1604. {
  1605. struct drm_i915_private *dev_priv = dev->dev_private;
  1606. int ret;
  1607. if (seqno == 0)
  1608. return -EINVAL;
  1609. /* HWS page needs to be set less than what we
  1610. * will inject to ring
  1611. */
  1612. ret = i915_gem_init_seqno(dev, seqno - 1);
  1613. if (ret)
  1614. return ret;
  1615. /* Carefully set the last_seqno value so that wrap
  1616. * detection still works
  1617. */
  1618. dev_priv->next_seqno = seqno;
  1619. dev_priv->last_seqno = seqno - 1;
  1620. if (dev_priv->last_seqno == 0)
  1621. dev_priv->last_seqno--;
  1622. return 0;
  1623. }
  1624. int
  1625. i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  1626. {
  1627. struct drm_i915_private *dev_priv = dev->dev_private;
  1628. /* reserve 0 for non-seqno */
  1629. if (dev_priv->next_seqno == 0) {
  1630. int ret = i915_gem_init_seqno(dev, 0);
  1631. if (ret)
  1632. return ret;
  1633. dev_priv->next_seqno = 1;
  1634. }
  1635. *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
  1636. return 0;
  1637. }
  1638. int
  1639. i915_add_request(struct intel_ring_buffer *ring,
  1640. struct drm_file *file,
  1641. u32 *out_seqno)
  1642. {
  1643. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1644. struct drm_i915_gem_request *request;
  1645. u32 request_ring_position;
  1646. int was_empty;
  1647. int ret;
  1648. /*
  1649. * Emit any outstanding flushes - execbuf can fail to emit the flush
  1650. * after having emitted the batchbuffer command. Hence we need to fix
  1651. * things up similar to emitting the lazy request. The difference here
  1652. * is that the flush _must_ happen before the next request, no matter
  1653. * what.
  1654. */
  1655. ret = intel_ring_flush_all_caches(ring);
  1656. if (ret)
  1657. return ret;
  1658. request = kmalloc(sizeof(*request), GFP_KERNEL);
  1659. if (request == NULL)
  1660. return -ENOMEM;
  1661. /* Record the position of the start of the request so that
  1662. * should we detect the updated seqno part-way through the
  1663. * GPU processing the request, we never over-estimate the
  1664. * position of the head.
  1665. */
  1666. request_ring_position = intel_ring_get_tail(ring);
  1667. ret = ring->add_request(ring);
  1668. if (ret) {
  1669. kfree(request);
  1670. return ret;
  1671. }
  1672. request->seqno = intel_ring_get_seqno(ring);
  1673. request->ring = ring;
  1674. request->tail = request_ring_position;
  1675. request->emitted_jiffies = jiffies;
  1676. was_empty = list_empty(&ring->request_list);
  1677. list_add_tail(&request->list, &ring->request_list);
  1678. request->file_priv = NULL;
  1679. if (file) {
  1680. struct drm_i915_file_private *file_priv = file->driver_priv;
  1681. spin_lock(&file_priv->mm.lock);
  1682. request->file_priv = file_priv;
  1683. list_add_tail(&request->client_list,
  1684. &file_priv->mm.request_list);
  1685. spin_unlock(&file_priv->mm.lock);
  1686. }
  1687. trace_i915_gem_request_add(ring, request->seqno);
  1688. ring->outstanding_lazy_request = 0;
  1689. if (!dev_priv->mm.suspended) {
  1690. if (i915_enable_hangcheck) {
  1691. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  1692. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  1693. }
  1694. if (was_empty) {
  1695. queue_delayed_work(dev_priv->wq,
  1696. &dev_priv->mm.retire_work,
  1697. round_jiffies_up_relative(HZ));
  1698. intel_mark_busy(dev_priv->dev);
  1699. }
  1700. }
  1701. if (out_seqno)
  1702. *out_seqno = request->seqno;
  1703. return 0;
  1704. }
  1705. static inline void
  1706. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1707. {
  1708. struct drm_i915_file_private *file_priv = request->file_priv;
  1709. if (!file_priv)
  1710. return;
  1711. spin_lock(&file_priv->mm.lock);
  1712. if (request->file_priv) {
  1713. list_del(&request->client_list);
  1714. request->file_priv = NULL;
  1715. }
  1716. spin_unlock(&file_priv->mm.lock);
  1717. }
  1718. static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
  1719. struct intel_ring_buffer *ring)
  1720. {
  1721. while (!list_empty(&ring->request_list)) {
  1722. struct drm_i915_gem_request *request;
  1723. request = list_first_entry(&ring->request_list,
  1724. struct drm_i915_gem_request,
  1725. list);
  1726. list_del(&request->list);
  1727. i915_gem_request_remove_from_client(request);
  1728. kfree(request);
  1729. }
  1730. while (!list_empty(&ring->active_list)) {
  1731. struct drm_i915_gem_object *obj;
  1732. obj = list_first_entry(&ring->active_list,
  1733. struct drm_i915_gem_object,
  1734. ring_list);
  1735. i915_gem_object_move_to_inactive(obj);
  1736. }
  1737. }
  1738. static void i915_gem_reset_fences(struct drm_device *dev)
  1739. {
  1740. struct drm_i915_private *dev_priv = dev->dev_private;
  1741. int i;
  1742. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  1743. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  1744. i915_gem_write_fence(dev, i, NULL);
  1745. if (reg->obj)
  1746. i915_gem_object_fence_lost(reg->obj);
  1747. reg->pin_count = 0;
  1748. reg->obj = NULL;
  1749. INIT_LIST_HEAD(&reg->lru_list);
  1750. }
  1751. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  1752. }
  1753. void i915_gem_reset(struct drm_device *dev)
  1754. {
  1755. struct drm_i915_private *dev_priv = dev->dev_private;
  1756. struct drm_i915_gem_object *obj;
  1757. struct intel_ring_buffer *ring;
  1758. int i;
  1759. for_each_ring(ring, dev_priv, i)
  1760. i915_gem_reset_ring_lists(dev_priv, ring);
  1761. /* Move everything out of the GPU domains to ensure we do any
  1762. * necessary invalidation upon reuse.
  1763. */
  1764. list_for_each_entry(obj,
  1765. &dev_priv->mm.inactive_list,
  1766. mm_list)
  1767. {
  1768. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  1769. }
  1770. /* The fence registers are invalidated so clear them out */
  1771. i915_gem_reset_fences(dev);
  1772. }
  1773. /**
  1774. * This function clears the request list as sequence numbers are passed.
  1775. */
  1776. void
  1777. i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
  1778. {
  1779. uint32_t seqno;
  1780. if (list_empty(&ring->request_list))
  1781. return;
  1782. WARN_ON(i915_verify_lists(ring->dev));
  1783. seqno = ring->get_seqno(ring, true);
  1784. while (!list_empty(&ring->request_list)) {
  1785. struct drm_i915_gem_request *request;
  1786. request = list_first_entry(&ring->request_list,
  1787. struct drm_i915_gem_request,
  1788. list);
  1789. if (!i915_seqno_passed(seqno, request->seqno))
  1790. break;
  1791. trace_i915_gem_request_retire(ring, request->seqno);
  1792. /* We know the GPU must have read the request to have
  1793. * sent us the seqno + interrupt, so use the position
  1794. * of tail of the request to update the last known position
  1795. * of the GPU head.
  1796. */
  1797. ring->last_retired_head = request->tail;
  1798. list_del(&request->list);
  1799. i915_gem_request_remove_from_client(request);
  1800. kfree(request);
  1801. }
  1802. /* Move any buffers on the active list that are no longer referenced
  1803. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1804. */
  1805. while (!list_empty(&ring->active_list)) {
  1806. struct drm_i915_gem_object *obj;
  1807. obj = list_first_entry(&ring->active_list,
  1808. struct drm_i915_gem_object,
  1809. ring_list);
  1810. if (!i915_seqno_passed(seqno, obj->last_read_seqno))
  1811. break;
  1812. i915_gem_object_move_to_inactive(obj);
  1813. }
  1814. if (unlikely(ring->trace_irq_seqno &&
  1815. i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
  1816. ring->irq_put(ring);
  1817. ring->trace_irq_seqno = 0;
  1818. }
  1819. WARN_ON(i915_verify_lists(ring->dev));
  1820. }
  1821. void
  1822. i915_gem_retire_requests(struct drm_device *dev)
  1823. {
  1824. drm_i915_private_t *dev_priv = dev->dev_private;
  1825. struct intel_ring_buffer *ring;
  1826. int i;
  1827. for_each_ring(ring, dev_priv, i)
  1828. i915_gem_retire_requests_ring(ring);
  1829. }
  1830. static void
  1831. i915_gem_retire_work_handler(struct work_struct *work)
  1832. {
  1833. drm_i915_private_t *dev_priv;
  1834. struct drm_device *dev;
  1835. struct intel_ring_buffer *ring;
  1836. bool idle;
  1837. int i;
  1838. dev_priv = container_of(work, drm_i915_private_t,
  1839. mm.retire_work.work);
  1840. dev = dev_priv->dev;
  1841. /* Come back later if the device is busy... */
  1842. if (!mutex_trylock(&dev->struct_mutex)) {
  1843. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  1844. round_jiffies_up_relative(HZ));
  1845. return;
  1846. }
  1847. i915_gem_retire_requests(dev);
  1848. /* Send a periodic flush down the ring so we don't hold onto GEM
  1849. * objects indefinitely.
  1850. */
  1851. idle = true;
  1852. for_each_ring(ring, dev_priv, i) {
  1853. if (ring->gpu_caches_dirty)
  1854. i915_add_request(ring, NULL, NULL);
  1855. idle &= list_empty(&ring->request_list);
  1856. }
  1857. if (!dev_priv->mm.suspended && !idle)
  1858. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  1859. round_jiffies_up_relative(HZ));
  1860. if (idle)
  1861. intel_mark_idle(dev);
  1862. mutex_unlock(&dev->struct_mutex);
  1863. }
  1864. /**
  1865. * Ensures that an object will eventually get non-busy by flushing any required
  1866. * write domains, emitting any outstanding lazy request and retiring and
  1867. * completed requests.
  1868. */
  1869. static int
  1870. i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  1871. {
  1872. int ret;
  1873. if (obj->active) {
  1874. ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
  1875. if (ret)
  1876. return ret;
  1877. i915_gem_retire_requests_ring(obj->ring);
  1878. }
  1879. return 0;
  1880. }
  1881. /**
  1882. * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  1883. * @DRM_IOCTL_ARGS: standard ioctl arguments
  1884. *
  1885. * Returns 0 if successful, else an error is returned with the remaining time in
  1886. * the timeout parameter.
  1887. * -ETIME: object is still busy after timeout
  1888. * -ERESTARTSYS: signal interrupted the wait
  1889. * -ENONENT: object doesn't exist
  1890. * Also possible, but rare:
  1891. * -EAGAIN: GPU wedged
  1892. * -ENOMEM: damn
  1893. * -ENODEV: Internal IRQ fail
  1894. * -E?: The add request failed
  1895. *
  1896. * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  1897. * non-zero timeout parameter the wait ioctl will wait for the given number of
  1898. * nanoseconds on an object becoming unbusy. Since the wait itself does so
  1899. * without holding struct_mutex the object may become re-busied before this
  1900. * function completes. A similar but shorter * race condition exists in the busy
  1901. * ioctl
  1902. */
  1903. int
  1904. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  1905. {
  1906. struct drm_i915_gem_wait *args = data;
  1907. struct drm_i915_gem_object *obj;
  1908. struct intel_ring_buffer *ring = NULL;
  1909. struct timespec timeout_stack, *timeout = NULL;
  1910. u32 seqno = 0;
  1911. int ret = 0;
  1912. if (args->timeout_ns >= 0) {
  1913. timeout_stack = ns_to_timespec(args->timeout_ns);
  1914. timeout = &timeout_stack;
  1915. }
  1916. ret = i915_mutex_lock_interruptible(dev);
  1917. if (ret)
  1918. return ret;
  1919. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
  1920. if (&obj->base == NULL) {
  1921. mutex_unlock(&dev->struct_mutex);
  1922. return -ENOENT;
  1923. }
  1924. /* Need to make sure the object gets inactive eventually. */
  1925. ret = i915_gem_object_flush_active(obj);
  1926. if (ret)
  1927. goto out;
  1928. if (obj->active) {
  1929. seqno = obj->last_read_seqno;
  1930. ring = obj->ring;
  1931. }
  1932. if (seqno == 0)
  1933. goto out;
  1934. /* Do this after OLR check to make sure we make forward progress polling
  1935. * on this IOCTL with a 0 timeout (like busy ioctl)
  1936. */
  1937. if (!args->timeout_ns) {
  1938. ret = -ETIME;
  1939. goto out;
  1940. }
  1941. drm_gem_object_unreference(&obj->base);
  1942. mutex_unlock(&dev->struct_mutex);
  1943. ret = __wait_seqno(ring, seqno, true, timeout);
  1944. if (timeout) {
  1945. WARN_ON(!timespec_valid(timeout));
  1946. args->timeout_ns = timespec_to_ns(timeout);
  1947. }
  1948. return ret;
  1949. out:
  1950. drm_gem_object_unreference(&obj->base);
  1951. mutex_unlock(&dev->struct_mutex);
  1952. return ret;
  1953. }
  1954. /**
  1955. * i915_gem_object_sync - sync an object to a ring.
  1956. *
  1957. * @obj: object which may be in use on another ring.
  1958. * @to: ring we wish to use the object on. May be NULL.
  1959. *
  1960. * This code is meant to abstract object synchronization with the GPU.
  1961. * Calling with NULL implies synchronizing the object with the CPU
  1962. * rather than a particular GPU ring.
  1963. *
  1964. * Returns 0 if successful, else propagates up the lower layer error.
  1965. */
  1966. int
  1967. i915_gem_object_sync(struct drm_i915_gem_object *obj,
  1968. struct intel_ring_buffer *to)
  1969. {
  1970. struct intel_ring_buffer *from = obj->ring;
  1971. u32 seqno;
  1972. int ret, idx;
  1973. if (from == NULL || to == from)
  1974. return 0;
  1975. if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
  1976. return i915_gem_object_wait_rendering(obj, false);
  1977. idx = intel_ring_sync_index(from, to);
  1978. seqno = obj->last_read_seqno;
  1979. if (seqno <= from->sync_seqno[idx])
  1980. return 0;
  1981. ret = i915_gem_check_olr(obj->ring, seqno);
  1982. if (ret)
  1983. return ret;
  1984. ret = to->sync_to(to, from, seqno);
  1985. if (!ret)
  1986. /* We use last_read_seqno because sync_to()
  1987. * might have just caused seqno wrap under
  1988. * the radar.
  1989. */
  1990. from->sync_seqno[idx] = obj->last_read_seqno;
  1991. return ret;
  1992. }
  1993. static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  1994. {
  1995. u32 old_write_domain, old_read_domains;
  1996. /* Act a barrier for all accesses through the GTT */
  1997. mb();
  1998. /* Force a pagefault for domain tracking on next user access */
  1999. i915_gem_release_mmap(obj);
  2000. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2001. return;
  2002. old_read_domains = obj->base.read_domains;
  2003. old_write_domain = obj->base.write_domain;
  2004. obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
  2005. obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
  2006. trace_i915_gem_object_change_domain(obj,
  2007. old_read_domains,
  2008. old_write_domain);
  2009. }
  2010. /**
  2011. * Unbinds an object from the GTT aperture.
  2012. */
  2013. int
  2014. i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  2015. {
  2016. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  2017. int ret;
  2018. if (obj->gtt_space == NULL)
  2019. return 0;
  2020. if (obj->pin_count)
  2021. return -EBUSY;
  2022. BUG_ON(obj->pages == NULL);
  2023. ret = i915_gem_object_finish_gpu(obj);
  2024. if (ret)
  2025. return ret;
  2026. /* Continue on if we fail due to EIO, the GPU is hung so we
  2027. * should be safe and we need to cleanup or else we might
  2028. * cause memory corruption through use-after-free.
  2029. */
  2030. i915_gem_object_finish_gtt(obj);
  2031. /* release the fence reg _after_ flushing */
  2032. ret = i915_gem_object_put_fence(obj);
  2033. if (ret)
  2034. return ret;
  2035. trace_i915_gem_object_unbind(obj);
  2036. if (obj->has_global_gtt_mapping)
  2037. i915_gem_gtt_unbind_object(obj);
  2038. if (obj->has_aliasing_ppgtt_mapping) {
  2039. i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
  2040. obj->has_aliasing_ppgtt_mapping = 0;
  2041. }
  2042. i915_gem_gtt_finish_object(obj);
  2043. list_del(&obj->mm_list);
  2044. list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
  2045. /* Avoid an unnecessary call to unbind on rebind. */
  2046. obj->map_and_fenceable = true;
  2047. drm_mm_put_block(obj->gtt_space);
  2048. obj->gtt_space = NULL;
  2049. obj->gtt_offset = 0;
  2050. return 0;
  2051. }
  2052. int i915_gpu_idle(struct drm_device *dev)
  2053. {
  2054. drm_i915_private_t *dev_priv = dev->dev_private;
  2055. struct intel_ring_buffer *ring;
  2056. int ret, i;
  2057. /* Flush everything onto the inactive list. */
  2058. for_each_ring(ring, dev_priv, i) {
  2059. ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
  2060. if (ret)
  2061. return ret;
  2062. ret = intel_ring_idle(ring);
  2063. if (ret)
  2064. return ret;
  2065. }
  2066. return 0;
  2067. }
  2068. static void i965_write_fence_reg(struct drm_device *dev, int reg,
  2069. struct drm_i915_gem_object *obj)
  2070. {
  2071. drm_i915_private_t *dev_priv = dev->dev_private;
  2072. int fence_reg;
  2073. int fence_pitch_shift;
  2074. uint64_t val;
  2075. if (INTEL_INFO(dev)->gen >= 6) {
  2076. fence_reg = FENCE_REG_SANDYBRIDGE_0;
  2077. fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
  2078. } else {
  2079. fence_reg = FENCE_REG_965_0;
  2080. fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
  2081. }
  2082. if (obj) {
  2083. u32 size = obj->gtt_space->size;
  2084. val = (uint64_t)((obj->gtt_offset + size - 4096) &
  2085. 0xfffff000) << 32;
  2086. val |= obj->gtt_offset & 0xfffff000;
  2087. val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
  2088. if (obj->tiling_mode == I915_TILING_Y)
  2089. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  2090. val |= I965_FENCE_REG_VALID;
  2091. } else
  2092. val = 0;
  2093. fence_reg += reg * 8;
  2094. I915_WRITE64(fence_reg, val);
  2095. POSTING_READ(fence_reg);
  2096. }
  2097. static void i915_write_fence_reg(struct drm_device *dev, int reg,
  2098. struct drm_i915_gem_object *obj)
  2099. {
  2100. drm_i915_private_t *dev_priv = dev->dev_private;
  2101. u32 val;
  2102. if (obj) {
  2103. u32 size = obj->gtt_space->size;
  2104. int pitch_val;
  2105. int tile_width;
  2106. WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
  2107. (size & -size) != size ||
  2108. (obj->gtt_offset & (size - 1)),
  2109. "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
  2110. obj->gtt_offset, obj->map_and_fenceable, size);
  2111. if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
  2112. tile_width = 128;
  2113. else
  2114. tile_width = 512;
  2115. /* Note: pitch better be a power of two tile widths */
  2116. pitch_val = obj->stride / tile_width;
  2117. pitch_val = ffs(pitch_val) - 1;
  2118. val = obj->gtt_offset;
  2119. if (obj->tiling_mode == I915_TILING_Y)
  2120. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2121. val |= I915_FENCE_SIZE_BITS(size);
  2122. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2123. val |= I830_FENCE_REG_VALID;
  2124. } else
  2125. val = 0;
  2126. if (reg < 8)
  2127. reg = FENCE_REG_830_0 + reg * 4;
  2128. else
  2129. reg = FENCE_REG_945_8 + (reg - 8) * 4;
  2130. I915_WRITE(reg, val);
  2131. POSTING_READ(reg);
  2132. }
  2133. static void i830_write_fence_reg(struct drm_device *dev, int reg,
  2134. struct drm_i915_gem_object *obj)
  2135. {
  2136. drm_i915_private_t *dev_priv = dev->dev_private;
  2137. uint32_t val;
  2138. if (obj) {
  2139. u32 size = obj->gtt_space->size;
  2140. uint32_t pitch_val;
  2141. WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
  2142. (size & -size) != size ||
  2143. (obj->gtt_offset & (size - 1)),
  2144. "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
  2145. obj->gtt_offset, size);
  2146. pitch_val = obj->stride / 128;
  2147. pitch_val = ffs(pitch_val) - 1;
  2148. val = obj->gtt_offset;
  2149. if (obj->tiling_mode == I915_TILING_Y)
  2150. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2151. val |= I830_FENCE_SIZE_BITS(size);
  2152. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2153. val |= I830_FENCE_REG_VALID;
  2154. } else
  2155. val = 0;
  2156. I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
  2157. POSTING_READ(FENCE_REG_830_0 + reg * 4);
  2158. }
  2159. inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
  2160. {
  2161. return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
  2162. }
  2163. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  2164. struct drm_i915_gem_object *obj)
  2165. {
  2166. struct drm_i915_private *dev_priv = dev->dev_private;
  2167. /* Ensure that all CPU reads are completed before installing a fence
  2168. * and all writes before removing the fence.
  2169. */
  2170. if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
  2171. mb();
  2172. switch (INTEL_INFO(dev)->gen) {
  2173. case 7:
  2174. case 6:
  2175. case 5:
  2176. case 4: i965_write_fence_reg(dev, reg, obj); break;
  2177. case 3: i915_write_fence_reg(dev, reg, obj); break;
  2178. case 2: i830_write_fence_reg(dev, reg, obj); break;
  2179. default: BUG();
  2180. }
  2181. /* And similarly be paranoid that no direct access to this region
  2182. * is reordered to before the fence is installed.
  2183. */
  2184. if (i915_gem_object_needs_mb(obj))
  2185. mb();
  2186. }
  2187. static inline int fence_number(struct drm_i915_private *dev_priv,
  2188. struct drm_i915_fence_reg *fence)
  2189. {
  2190. return fence - dev_priv->fence_regs;
  2191. }
  2192. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  2193. struct drm_i915_fence_reg *fence,
  2194. bool enable)
  2195. {
  2196. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2197. int reg = fence_number(dev_priv, fence);
  2198. i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
  2199. if (enable) {
  2200. obj->fence_reg = reg;
  2201. fence->obj = obj;
  2202. list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
  2203. } else {
  2204. obj->fence_reg = I915_FENCE_REG_NONE;
  2205. fence->obj = NULL;
  2206. list_del_init(&fence->lru_list);
  2207. }
  2208. }
  2209. static int
  2210. i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  2211. {
  2212. if (obj->last_fenced_seqno) {
  2213. int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
  2214. if (ret)
  2215. return ret;
  2216. obj->last_fenced_seqno = 0;
  2217. }
  2218. obj->fenced_gpu_access = false;
  2219. return 0;
  2220. }
  2221. int
  2222. i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
  2223. {
  2224. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2225. int ret;
  2226. ret = i915_gem_object_wait_fence(obj);
  2227. if (ret)
  2228. return ret;
  2229. if (obj->fence_reg == I915_FENCE_REG_NONE)
  2230. return 0;
  2231. i915_gem_object_update_fence(obj,
  2232. &dev_priv->fence_regs[obj->fence_reg],
  2233. false);
  2234. i915_gem_object_fence_lost(obj);
  2235. return 0;
  2236. }
  2237. static struct drm_i915_fence_reg *
  2238. i915_find_fence_reg(struct drm_device *dev)
  2239. {
  2240. struct drm_i915_private *dev_priv = dev->dev_private;
  2241. struct drm_i915_fence_reg *reg, *avail;
  2242. int i;
  2243. /* First try to find a free reg */
  2244. avail = NULL;
  2245. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2246. reg = &dev_priv->fence_regs[i];
  2247. if (!reg->obj)
  2248. return reg;
  2249. if (!reg->pin_count)
  2250. avail = reg;
  2251. }
  2252. if (avail == NULL)
  2253. return NULL;
  2254. /* None available, try to steal one or wait for a user to finish */
  2255. list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
  2256. if (reg->pin_count)
  2257. continue;
  2258. return reg;
  2259. }
  2260. return NULL;
  2261. }
  2262. /**
  2263. * i915_gem_object_get_fence - set up fencing for an object
  2264. * @obj: object to map through a fence reg
  2265. *
  2266. * When mapping objects through the GTT, userspace wants to be able to write
  2267. * to them without having to worry about swizzling if the object is tiled.
  2268. * This function walks the fence regs looking for a free one for @obj,
  2269. * stealing one if it can't find any.
  2270. *
  2271. * It then sets up the reg based on the object's properties: address, pitch
  2272. * and tiling format.
  2273. *
  2274. * For an untiled surface, this removes any existing fence.
  2275. */
  2276. int
  2277. i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
  2278. {
  2279. struct drm_device *dev = obj->base.dev;
  2280. struct drm_i915_private *dev_priv = dev->dev_private;
  2281. bool enable = obj->tiling_mode != I915_TILING_NONE;
  2282. struct drm_i915_fence_reg *reg;
  2283. int ret;
  2284. /* Have we updated the tiling parameters upon the object and so
  2285. * will need to serialise the write to the associated fence register?
  2286. */
  2287. if (obj->fence_dirty) {
  2288. ret = i915_gem_object_wait_fence(obj);
  2289. if (ret)
  2290. return ret;
  2291. }
  2292. /* Just update our place in the LRU if our fence is getting reused. */
  2293. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  2294. reg = &dev_priv->fence_regs[obj->fence_reg];
  2295. if (!obj->fence_dirty) {
  2296. list_move_tail(&reg->lru_list,
  2297. &dev_priv->mm.fence_list);
  2298. return 0;
  2299. }
  2300. } else if (enable) {
  2301. reg = i915_find_fence_reg(dev);
  2302. if (reg == NULL)
  2303. return -EDEADLK;
  2304. if (reg->obj) {
  2305. struct drm_i915_gem_object *old = reg->obj;
  2306. ret = i915_gem_object_wait_fence(old);
  2307. if (ret)
  2308. return ret;
  2309. i915_gem_object_fence_lost(old);
  2310. }
  2311. } else
  2312. return 0;
  2313. i915_gem_object_update_fence(obj, reg, enable);
  2314. obj->fence_dirty = false;
  2315. return 0;
  2316. }
  2317. static bool i915_gem_valid_gtt_space(struct drm_device *dev,
  2318. struct drm_mm_node *gtt_space,
  2319. unsigned long cache_level)
  2320. {
  2321. struct drm_mm_node *other;
  2322. /* On non-LLC machines we have to be careful when putting differing
  2323. * types of snoopable memory together to avoid the prefetcher
  2324. * crossing memory domains and dying.
  2325. */
  2326. if (HAS_LLC(dev))
  2327. return true;
  2328. if (gtt_space == NULL)
  2329. return true;
  2330. if (list_empty(&gtt_space->node_list))
  2331. return true;
  2332. other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
  2333. if (other->allocated && !other->hole_follows && other->color != cache_level)
  2334. return false;
  2335. other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
  2336. if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
  2337. return false;
  2338. return true;
  2339. }
  2340. static void i915_gem_verify_gtt(struct drm_device *dev)
  2341. {
  2342. #if WATCH_GTT
  2343. struct drm_i915_private *dev_priv = dev->dev_private;
  2344. struct drm_i915_gem_object *obj;
  2345. int err = 0;
  2346. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  2347. if (obj->gtt_space == NULL) {
  2348. printk(KERN_ERR "object found on GTT list with no space reserved\n");
  2349. err++;
  2350. continue;
  2351. }
  2352. if (obj->cache_level != obj->gtt_space->color) {
  2353. printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
  2354. obj->gtt_space->start,
  2355. obj->gtt_space->start + obj->gtt_space->size,
  2356. obj->cache_level,
  2357. obj->gtt_space->color);
  2358. err++;
  2359. continue;
  2360. }
  2361. if (!i915_gem_valid_gtt_space(dev,
  2362. obj->gtt_space,
  2363. obj->cache_level)) {
  2364. printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
  2365. obj->gtt_space->start,
  2366. obj->gtt_space->start + obj->gtt_space->size,
  2367. obj->cache_level);
  2368. err++;
  2369. continue;
  2370. }
  2371. }
  2372. WARN_ON(err);
  2373. #endif
  2374. }
  2375. /**
  2376. * Finds free space in the GTT aperture and binds the object there.
  2377. */
  2378. static int
  2379. i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  2380. unsigned alignment,
  2381. bool map_and_fenceable,
  2382. bool nonblocking)
  2383. {
  2384. struct drm_device *dev = obj->base.dev;
  2385. drm_i915_private_t *dev_priv = dev->dev_private;
  2386. struct drm_mm_node *node;
  2387. u32 size, fence_size, fence_alignment, unfenced_alignment;
  2388. bool mappable, fenceable;
  2389. int ret;
  2390. fence_size = i915_gem_get_gtt_size(dev,
  2391. obj->base.size,
  2392. obj->tiling_mode);
  2393. fence_alignment = i915_gem_get_gtt_alignment(dev,
  2394. obj->base.size,
  2395. obj->tiling_mode, true);
  2396. unfenced_alignment =
  2397. i915_gem_get_gtt_alignment(dev,
  2398. obj->base.size,
  2399. obj->tiling_mode, false);
  2400. if (alignment == 0)
  2401. alignment = map_and_fenceable ? fence_alignment :
  2402. unfenced_alignment;
  2403. if (map_and_fenceable && alignment & (fence_alignment - 1)) {
  2404. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2405. return -EINVAL;
  2406. }
  2407. size = map_and_fenceable ? fence_size : obj->base.size;
  2408. /* If the object is bigger than the entire aperture, reject it early
  2409. * before evicting everything in a vain attempt to find space.
  2410. */
  2411. if (obj->base.size >
  2412. (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
  2413. DRM_ERROR("Attempting to bind an object larger than the aperture\n");
  2414. return -E2BIG;
  2415. }
  2416. ret = i915_gem_object_get_pages(obj);
  2417. if (ret)
  2418. return ret;
  2419. i915_gem_object_pin_pages(obj);
  2420. node = kzalloc(sizeof(*node), GFP_KERNEL);
  2421. if (node == NULL) {
  2422. i915_gem_object_unpin_pages(obj);
  2423. return -ENOMEM;
  2424. }
  2425. search_free:
  2426. if (map_and_fenceable)
  2427. ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
  2428. size, alignment, obj->cache_level,
  2429. 0, dev_priv->gtt.mappable_end);
  2430. else
  2431. ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
  2432. size, alignment, obj->cache_level);
  2433. if (ret) {
  2434. ret = i915_gem_evict_something(dev, size, alignment,
  2435. obj->cache_level,
  2436. map_and_fenceable,
  2437. nonblocking);
  2438. if (ret == 0)
  2439. goto search_free;
  2440. i915_gem_object_unpin_pages(obj);
  2441. kfree(node);
  2442. return ret;
  2443. }
  2444. if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
  2445. i915_gem_object_unpin_pages(obj);
  2446. drm_mm_put_block(node);
  2447. return -EINVAL;
  2448. }
  2449. ret = i915_gem_gtt_prepare_object(obj);
  2450. if (ret) {
  2451. i915_gem_object_unpin_pages(obj);
  2452. drm_mm_put_block(node);
  2453. return ret;
  2454. }
  2455. list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
  2456. list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  2457. obj->gtt_space = node;
  2458. obj->gtt_offset = node->start;
  2459. fenceable =
  2460. node->size == fence_size &&
  2461. (node->start & (fence_alignment - 1)) == 0;
  2462. mappable =
  2463. obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
  2464. obj->map_and_fenceable = mappable && fenceable;
  2465. i915_gem_object_unpin_pages(obj);
  2466. trace_i915_gem_object_bind(obj, map_and_fenceable);
  2467. i915_gem_verify_gtt(dev);
  2468. return 0;
  2469. }
  2470. void
  2471. i915_gem_clflush_object(struct drm_i915_gem_object *obj)
  2472. {
  2473. /* If we don't have a page list set up, then we're not pinned
  2474. * to GPU, and we can ignore the cache flush because it'll happen
  2475. * again at bind time.
  2476. */
  2477. if (obj->pages == NULL)
  2478. return;
  2479. /* If the GPU is snooping the contents of the CPU cache,
  2480. * we do not need to manually clear the CPU cache lines. However,
  2481. * the caches are only snooped when the render cache is
  2482. * flushed/invalidated. As we always have to emit invalidations
  2483. * and flushes when moving into and out of the RENDER domain, correct
  2484. * snooping behaviour occurs naturally as the result of our domain
  2485. * tracking.
  2486. */
  2487. if (obj->cache_level != I915_CACHE_NONE)
  2488. return;
  2489. trace_i915_gem_object_clflush(obj);
  2490. drm_clflush_sg(obj->pages);
  2491. }
  2492. /** Flushes the GTT write domain for the object if it's dirty. */
  2493. static void
  2494. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  2495. {
  2496. uint32_t old_write_domain;
  2497. if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  2498. return;
  2499. /* No actual flushing is required for the GTT write domain. Writes
  2500. * to it immediately go to main memory as far as we know, so there's
  2501. * no chipset flush. It also doesn't land in render cache.
  2502. *
  2503. * However, we do have to enforce the order so that all writes through
  2504. * the GTT land before any writes to the device, such as updates to
  2505. * the GATT itself.
  2506. */
  2507. wmb();
  2508. old_write_domain = obj->base.write_domain;
  2509. obj->base.write_domain = 0;
  2510. trace_i915_gem_object_change_domain(obj,
  2511. obj->base.read_domains,
  2512. old_write_domain);
  2513. }
  2514. /** Flushes the CPU write domain for the object if it's dirty. */
  2515. static void
  2516. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
  2517. {
  2518. uint32_t old_write_domain;
  2519. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  2520. return;
  2521. i915_gem_clflush_object(obj);
  2522. i915_gem_chipset_flush(obj->base.dev);
  2523. old_write_domain = obj->base.write_domain;
  2524. obj->base.write_domain = 0;
  2525. trace_i915_gem_object_change_domain(obj,
  2526. obj->base.read_domains,
  2527. old_write_domain);
  2528. }
  2529. /**
  2530. * Moves a single object to the GTT read, and possibly write domain.
  2531. *
  2532. * This function returns when the move is complete, including waiting on
  2533. * flushes to occur.
  2534. */
  2535. int
  2536. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  2537. {
  2538. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  2539. uint32_t old_write_domain, old_read_domains;
  2540. int ret;
  2541. /* Not valid to be called on unbound objects. */
  2542. if (obj->gtt_space == NULL)
  2543. return -EINVAL;
  2544. if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
  2545. return 0;
  2546. ret = i915_gem_object_wait_rendering(obj, !write);
  2547. if (ret)
  2548. return ret;
  2549. i915_gem_object_flush_cpu_write_domain(obj);
  2550. /* Serialise direct access to this object with the barriers for
  2551. * coherent writes from the GPU, by effectively invalidating the
  2552. * GTT domain upon first access.
  2553. */
  2554. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2555. mb();
  2556. old_write_domain = obj->base.write_domain;
  2557. old_read_domains = obj->base.read_domains;
  2558. /* It should now be out of any other write domains, and we can update
  2559. * the domain values for our changes.
  2560. */
  2561. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2562. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2563. if (write) {
  2564. obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  2565. obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  2566. obj->dirty = 1;
  2567. }
  2568. trace_i915_gem_object_change_domain(obj,
  2569. old_read_domains,
  2570. old_write_domain);
  2571. /* And bump the LRU for this access */
  2572. if (i915_gem_object_is_inactive(obj))
  2573. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  2574. return 0;
  2575. }
  2576. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  2577. enum i915_cache_level cache_level)
  2578. {
  2579. struct drm_device *dev = obj->base.dev;
  2580. drm_i915_private_t *dev_priv = dev->dev_private;
  2581. int ret;
  2582. if (obj->cache_level == cache_level)
  2583. return 0;
  2584. if (obj->pin_count) {
  2585. DRM_DEBUG("can not change the cache level of pinned objects\n");
  2586. return -EBUSY;
  2587. }
  2588. if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
  2589. ret = i915_gem_object_unbind(obj);
  2590. if (ret)
  2591. return ret;
  2592. }
  2593. if (obj->gtt_space) {
  2594. ret = i915_gem_object_finish_gpu(obj);
  2595. if (ret)
  2596. return ret;
  2597. i915_gem_object_finish_gtt(obj);
  2598. /* Before SandyBridge, you could not use tiling or fence
  2599. * registers with snooped memory, so relinquish any fences
  2600. * currently pointing to our region in the aperture.
  2601. */
  2602. if (INTEL_INFO(dev)->gen < 6) {
  2603. ret = i915_gem_object_put_fence(obj);
  2604. if (ret)
  2605. return ret;
  2606. }
  2607. if (obj->has_global_gtt_mapping)
  2608. i915_gem_gtt_bind_object(obj, cache_level);
  2609. if (obj->has_aliasing_ppgtt_mapping)
  2610. i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
  2611. obj, cache_level);
  2612. obj->gtt_space->color = cache_level;
  2613. }
  2614. if (cache_level == I915_CACHE_NONE) {
  2615. u32 old_read_domains, old_write_domain;
  2616. /* If we're coming from LLC cached, then we haven't
  2617. * actually been tracking whether the data is in the
  2618. * CPU cache or not, since we only allow one bit set
  2619. * in obj->write_domain and have been skipping the clflushes.
  2620. * Just set it to the CPU cache for now.
  2621. */
  2622. WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
  2623. WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
  2624. old_read_domains = obj->base.read_domains;
  2625. old_write_domain = obj->base.write_domain;
  2626. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2627. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2628. trace_i915_gem_object_change_domain(obj,
  2629. old_read_domains,
  2630. old_write_domain);
  2631. }
  2632. obj->cache_level = cache_level;
  2633. i915_gem_verify_gtt(dev);
  2634. return 0;
  2635. }
  2636. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  2637. struct drm_file *file)
  2638. {
  2639. struct drm_i915_gem_caching *args = data;
  2640. struct drm_i915_gem_object *obj;
  2641. int ret;
  2642. ret = i915_mutex_lock_interruptible(dev);
  2643. if (ret)
  2644. return ret;
  2645. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2646. if (&obj->base == NULL) {
  2647. ret = -ENOENT;
  2648. goto unlock;
  2649. }
  2650. args->caching = obj->cache_level != I915_CACHE_NONE;
  2651. drm_gem_object_unreference(&obj->base);
  2652. unlock:
  2653. mutex_unlock(&dev->struct_mutex);
  2654. return ret;
  2655. }
  2656. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  2657. struct drm_file *file)
  2658. {
  2659. struct drm_i915_gem_caching *args = data;
  2660. struct drm_i915_gem_object *obj;
  2661. enum i915_cache_level level;
  2662. int ret;
  2663. switch (args->caching) {
  2664. case I915_CACHING_NONE:
  2665. level = I915_CACHE_NONE;
  2666. break;
  2667. case I915_CACHING_CACHED:
  2668. level = I915_CACHE_LLC;
  2669. break;
  2670. default:
  2671. return -EINVAL;
  2672. }
  2673. ret = i915_mutex_lock_interruptible(dev);
  2674. if (ret)
  2675. return ret;
  2676. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2677. if (&obj->base == NULL) {
  2678. ret = -ENOENT;
  2679. goto unlock;
  2680. }
  2681. ret = i915_gem_object_set_cache_level(obj, level);
  2682. drm_gem_object_unreference(&obj->base);
  2683. unlock:
  2684. mutex_unlock(&dev->struct_mutex);
  2685. return ret;
  2686. }
  2687. /*
  2688. * Prepare buffer for display plane (scanout, cursors, etc).
  2689. * Can be called from an uninterruptible phase (modesetting) and allows
  2690. * any flushes to be pipelined (for pageflips).
  2691. */
  2692. int
  2693. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  2694. u32 alignment,
  2695. struct intel_ring_buffer *pipelined)
  2696. {
  2697. u32 old_read_domains, old_write_domain;
  2698. int ret;
  2699. if (pipelined != obj->ring) {
  2700. ret = i915_gem_object_sync(obj, pipelined);
  2701. if (ret)
  2702. return ret;
  2703. }
  2704. /* The display engine is not coherent with the LLC cache on gen6. As
  2705. * a result, we make sure that the pinning that is about to occur is
  2706. * done with uncached PTEs. This is lowest common denominator for all
  2707. * chipsets.
  2708. *
  2709. * However for gen6+, we could do better by using the GFDT bit instead
  2710. * of uncaching, which would allow us to flush all the LLC-cached data
  2711. * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  2712. */
  2713. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
  2714. if (ret)
  2715. return ret;
  2716. /* As the user may map the buffer once pinned in the display plane
  2717. * (e.g. libkms for the bootup splash), we have to ensure that we
  2718. * always use map_and_fenceable for all scanout buffers.
  2719. */
  2720. ret = i915_gem_object_pin(obj, alignment, true, false);
  2721. if (ret)
  2722. return ret;
  2723. i915_gem_object_flush_cpu_write_domain(obj);
  2724. old_write_domain = obj->base.write_domain;
  2725. old_read_domains = obj->base.read_domains;
  2726. /* It should now be out of any other write domains, and we can update
  2727. * the domain values for our changes.
  2728. */
  2729. obj->base.write_domain = 0;
  2730. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2731. trace_i915_gem_object_change_domain(obj,
  2732. old_read_domains,
  2733. old_write_domain);
  2734. return 0;
  2735. }
  2736. int
  2737. i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
  2738. {
  2739. int ret;
  2740. if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
  2741. return 0;
  2742. ret = i915_gem_object_wait_rendering(obj, false);
  2743. if (ret)
  2744. return ret;
  2745. /* Ensure that we invalidate the GPU's caches and TLBs. */
  2746. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  2747. return 0;
  2748. }
  2749. /**
  2750. * Moves a single object to the CPU read, and possibly write domain.
  2751. *
  2752. * This function returns when the move is complete, including waiting on
  2753. * flushes to occur.
  2754. */
  2755. int
  2756. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  2757. {
  2758. uint32_t old_write_domain, old_read_domains;
  2759. int ret;
  2760. if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  2761. return 0;
  2762. ret = i915_gem_object_wait_rendering(obj, !write);
  2763. if (ret)
  2764. return ret;
  2765. i915_gem_object_flush_gtt_write_domain(obj);
  2766. old_write_domain = obj->base.write_domain;
  2767. old_read_domains = obj->base.read_domains;
  2768. /* Flush the CPU cache if it's still invalid. */
  2769. if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2770. i915_gem_clflush_object(obj);
  2771. obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  2772. }
  2773. /* It should now be out of any other write domains, and we can update
  2774. * the domain values for our changes.
  2775. */
  2776. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2777. /* If we're writing through the CPU, then the GPU read domains will
  2778. * need to be invalidated at next use.
  2779. */
  2780. if (write) {
  2781. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2782. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2783. }
  2784. trace_i915_gem_object_change_domain(obj,
  2785. old_read_domains,
  2786. old_write_domain);
  2787. return 0;
  2788. }
  2789. /* Throttle our rendering by waiting until the ring has completed our requests
  2790. * emitted over 20 msec ago.
  2791. *
  2792. * Note that if we were to use the current jiffies each time around the loop,
  2793. * we wouldn't escape the function with any frames outstanding if the time to
  2794. * render a frame was over 20ms.
  2795. *
  2796. * This should get us reasonable parallelism between CPU and GPU but also
  2797. * relatively low latency when blocking on a particular request to finish.
  2798. */
  2799. static int
  2800. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  2801. {
  2802. struct drm_i915_private *dev_priv = dev->dev_private;
  2803. struct drm_i915_file_private *file_priv = file->driver_priv;
  2804. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2805. struct drm_i915_gem_request *request;
  2806. struct intel_ring_buffer *ring = NULL;
  2807. u32 seqno = 0;
  2808. int ret;
  2809. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  2810. if (ret)
  2811. return ret;
  2812. ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
  2813. if (ret)
  2814. return ret;
  2815. spin_lock(&file_priv->mm.lock);
  2816. list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  2817. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2818. break;
  2819. ring = request->ring;
  2820. seqno = request->seqno;
  2821. }
  2822. spin_unlock(&file_priv->mm.lock);
  2823. if (seqno == 0)
  2824. return 0;
  2825. ret = __wait_seqno(ring, seqno, true, NULL);
  2826. if (ret == 0)
  2827. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  2828. return ret;
  2829. }
  2830. int
  2831. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  2832. uint32_t alignment,
  2833. bool map_and_fenceable,
  2834. bool nonblocking)
  2835. {
  2836. int ret;
  2837. if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
  2838. return -EBUSY;
  2839. if (obj->gtt_space != NULL) {
  2840. if ((alignment && obj->gtt_offset & (alignment - 1)) ||
  2841. (map_and_fenceable && !obj->map_and_fenceable)) {
  2842. WARN(obj->pin_count,
  2843. "bo is already pinned with incorrect alignment:"
  2844. " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
  2845. " obj->map_and_fenceable=%d\n",
  2846. obj->gtt_offset, alignment,
  2847. map_and_fenceable,
  2848. obj->map_and_fenceable);
  2849. ret = i915_gem_object_unbind(obj);
  2850. if (ret)
  2851. return ret;
  2852. }
  2853. }
  2854. if (obj->gtt_space == NULL) {
  2855. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2856. ret = i915_gem_object_bind_to_gtt(obj, alignment,
  2857. map_and_fenceable,
  2858. nonblocking);
  2859. if (ret)
  2860. return ret;
  2861. if (!dev_priv->mm.aliasing_ppgtt)
  2862. i915_gem_gtt_bind_object(obj, obj->cache_level);
  2863. }
  2864. if (!obj->has_global_gtt_mapping && map_and_fenceable)
  2865. i915_gem_gtt_bind_object(obj, obj->cache_level);
  2866. obj->pin_count++;
  2867. obj->pin_mappable |= map_and_fenceable;
  2868. return 0;
  2869. }
  2870. void
  2871. i915_gem_object_unpin(struct drm_i915_gem_object *obj)
  2872. {
  2873. BUG_ON(obj->pin_count == 0);
  2874. BUG_ON(obj->gtt_space == NULL);
  2875. if (--obj->pin_count == 0)
  2876. obj->pin_mappable = false;
  2877. }
  2878. int
  2879. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  2880. struct drm_file *file)
  2881. {
  2882. struct drm_i915_gem_pin *args = data;
  2883. struct drm_i915_gem_object *obj;
  2884. int ret;
  2885. ret = i915_mutex_lock_interruptible(dev);
  2886. if (ret)
  2887. return ret;
  2888. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2889. if (&obj->base == NULL) {
  2890. ret = -ENOENT;
  2891. goto unlock;
  2892. }
  2893. if (obj->madv != I915_MADV_WILLNEED) {
  2894. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  2895. ret = -EINVAL;
  2896. goto out;
  2897. }
  2898. if (obj->pin_filp != NULL && obj->pin_filp != file) {
  2899. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  2900. args->handle);
  2901. ret = -EINVAL;
  2902. goto out;
  2903. }
  2904. obj->user_pin_count++;
  2905. obj->pin_filp = file;
  2906. if (obj->user_pin_count == 1) {
  2907. ret = i915_gem_object_pin(obj, args->alignment, true, false);
  2908. if (ret)
  2909. goto out;
  2910. }
  2911. /* XXX - flush the CPU caches for pinned objects
  2912. * as the X server doesn't manage domains yet
  2913. */
  2914. i915_gem_object_flush_cpu_write_domain(obj);
  2915. args->offset = obj->gtt_offset;
  2916. out:
  2917. drm_gem_object_unreference(&obj->base);
  2918. unlock:
  2919. mutex_unlock(&dev->struct_mutex);
  2920. return ret;
  2921. }
  2922. int
  2923. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  2924. struct drm_file *file)
  2925. {
  2926. struct drm_i915_gem_pin *args = data;
  2927. struct drm_i915_gem_object *obj;
  2928. int ret;
  2929. ret = i915_mutex_lock_interruptible(dev);
  2930. if (ret)
  2931. return ret;
  2932. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2933. if (&obj->base == NULL) {
  2934. ret = -ENOENT;
  2935. goto unlock;
  2936. }
  2937. if (obj->pin_filp != file) {
  2938. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  2939. args->handle);
  2940. ret = -EINVAL;
  2941. goto out;
  2942. }
  2943. obj->user_pin_count--;
  2944. if (obj->user_pin_count == 0) {
  2945. obj->pin_filp = NULL;
  2946. i915_gem_object_unpin(obj);
  2947. }
  2948. out:
  2949. drm_gem_object_unreference(&obj->base);
  2950. unlock:
  2951. mutex_unlock(&dev->struct_mutex);
  2952. return ret;
  2953. }
  2954. int
  2955. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  2956. struct drm_file *file)
  2957. {
  2958. struct drm_i915_gem_busy *args = data;
  2959. struct drm_i915_gem_object *obj;
  2960. int ret;
  2961. ret = i915_mutex_lock_interruptible(dev);
  2962. if (ret)
  2963. return ret;
  2964. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2965. if (&obj->base == NULL) {
  2966. ret = -ENOENT;
  2967. goto unlock;
  2968. }
  2969. /* Count all active objects as busy, even if they are currently not used
  2970. * by the gpu. Users of this interface expect objects to eventually
  2971. * become non-busy without any further actions, therefore emit any
  2972. * necessary flushes here.
  2973. */
  2974. ret = i915_gem_object_flush_active(obj);
  2975. args->busy = obj->active;
  2976. if (obj->ring) {
  2977. BUILD_BUG_ON(I915_NUM_RINGS > 16);
  2978. args->busy |= intel_ring_flag(obj->ring) << 16;
  2979. }
  2980. drm_gem_object_unreference(&obj->base);
  2981. unlock:
  2982. mutex_unlock(&dev->struct_mutex);
  2983. return ret;
  2984. }
  2985. int
  2986. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  2987. struct drm_file *file_priv)
  2988. {
  2989. return i915_gem_ring_throttle(dev, file_priv);
  2990. }
  2991. int
  2992. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  2993. struct drm_file *file_priv)
  2994. {
  2995. struct drm_i915_gem_madvise *args = data;
  2996. struct drm_i915_gem_object *obj;
  2997. int ret;
  2998. switch (args->madv) {
  2999. case I915_MADV_DONTNEED:
  3000. case I915_MADV_WILLNEED:
  3001. break;
  3002. default:
  3003. return -EINVAL;
  3004. }
  3005. ret = i915_mutex_lock_interruptible(dev);
  3006. if (ret)
  3007. return ret;
  3008. obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  3009. if (&obj->base == NULL) {
  3010. ret = -ENOENT;
  3011. goto unlock;
  3012. }
  3013. if (obj->pin_count) {
  3014. ret = -EINVAL;
  3015. goto out;
  3016. }
  3017. if (obj->madv != __I915_MADV_PURGED)
  3018. obj->madv = args->madv;
  3019. /* if the object is no longer attached, discard its backing storage */
  3020. if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
  3021. i915_gem_object_truncate(obj);
  3022. args->retained = obj->madv != __I915_MADV_PURGED;
  3023. out:
  3024. drm_gem_object_unreference(&obj->base);
  3025. unlock:
  3026. mutex_unlock(&dev->struct_mutex);
  3027. return ret;
  3028. }
  3029. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3030. const struct drm_i915_gem_object_ops *ops)
  3031. {
  3032. INIT_LIST_HEAD(&obj->mm_list);
  3033. INIT_LIST_HEAD(&obj->gtt_list);
  3034. INIT_LIST_HEAD(&obj->ring_list);
  3035. INIT_LIST_HEAD(&obj->exec_list);
  3036. obj->ops = ops;
  3037. obj->fence_reg = I915_FENCE_REG_NONE;
  3038. obj->madv = I915_MADV_WILLNEED;
  3039. /* Avoid an unnecessary call to unbind on the first bind. */
  3040. obj->map_and_fenceable = true;
  3041. i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
  3042. }
  3043. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3044. .get_pages = i915_gem_object_get_pages_gtt,
  3045. .put_pages = i915_gem_object_put_pages_gtt,
  3046. };
  3047. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  3048. size_t size)
  3049. {
  3050. struct drm_i915_gem_object *obj;
  3051. struct address_space *mapping;
  3052. gfp_t mask;
  3053. obj = i915_gem_object_alloc(dev);
  3054. if (obj == NULL)
  3055. return NULL;
  3056. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3057. i915_gem_object_free(obj);
  3058. return NULL;
  3059. }
  3060. mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
  3061. if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
  3062. /* 965gm cannot relocate objects above 4GiB. */
  3063. mask &= ~__GFP_HIGHMEM;
  3064. mask |= __GFP_DMA32;
  3065. }
  3066. mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  3067. mapping_set_gfp_mask(mapping, mask);
  3068. i915_gem_object_init(obj, &i915_gem_object_ops);
  3069. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3070. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3071. if (HAS_LLC(dev)) {
  3072. /* On some devices, we can have the GPU use the LLC (the CPU
  3073. * cache) for about a 10% performance improvement
  3074. * compared to uncached. Graphics requests other than
  3075. * display scanout are coherent with the CPU in
  3076. * accessing this cache. This means in this mode we
  3077. * don't need to clflush on the CPU side, and on the
  3078. * GPU side we only need to flush internal caches to
  3079. * get data visible to the CPU.
  3080. *
  3081. * However, we maintain the display planes as UC, and so
  3082. * need to rebind when first used as such.
  3083. */
  3084. obj->cache_level = I915_CACHE_LLC;
  3085. } else
  3086. obj->cache_level = I915_CACHE_NONE;
  3087. return obj;
  3088. }
  3089. int i915_gem_init_object(struct drm_gem_object *obj)
  3090. {
  3091. BUG();
  3092. return 0;
  3093. }
  3094. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  3095. {
  3096. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  3097. struct drm_device *dev = obj->base.dev;
  3098. drm_i915_private_t *dev_priv = dev->dev_private;
  3099. trace_i915_gem_object_destroy(obj);
  3100. if (obj->phys_obj)
  3101. i915_gem_detach_phys_object(dev, obj);
  3102. obj->pin_count = 0;
  3103. if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
  3104. bool was_interruptible;
  3105. was_interruptible = dev_priv->mm.interruptible;
  3106. dev_priv->mm.interruptible = false;
  3107. WARN_ON(i915_gem_object_unbind(obj));
  3108. dev_priv->mm.interruptible = was_interruptible;
  3109. }
  3110. obj->pages_pin_count = 0;
  3111. i915_gem_object_put_pages(obj);
  3112. i915_gem_object_free_mmap_offset(obj);
  3113. i915_gem_object_release_stolen(obj);
  3114. BUG_ON(obj->pages);
  3115. if (obj->base.import_attach)
  3116. drm_prime_gem_destroy(&obj->base, NULL);
  3117. drm_gem_object_release(&obj->base);
  3118. i915_gem_info_remove_obj(dev_priv, obj->base.size);
  3119. kfree(obj->bit_17);
  3120. i915_gem_object_free(obj);
  3121. }
  3122. int
  3123. i915_gem_idle(struct drm_device *dev)
  3124. {
  3125. drm_i915_private_t *dev_priv = dev->dev_private;
  3126. int ret;
  3127. mutex_lock(&dev->struct_mutex);
  3128. if (dev_priv->mm.suspended) {
  3129. mutex_unlock(&dev->struct_mutex);
  3130. return 0;
  3131. }
  3132. ret = i915_gpu_idle(dev);
  3133. if (ret) {
  3134. mutex_unlock(&dev->struct_mutex);
  3135. return ret;
  3136. }
  3137. i915_gem_retire_requests(dev);
  3138. /* Under UMS, be paranoid and evict. */
  3139. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3140. i915_gem_evict_everything(dev);
  3141. i915_gem_reset_fences(dev);
  3142. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3143. * We need to replace this with a semaphore, or something.
  3144. * And not confound mm.suspended!
  3145. */
  3146. dev_priv->mm.suspended = 1;
  3147. del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  3148. i915_kernel_lost_context(dev);
  3149. i915_gem_cleanup_ringbuffer(dev);
  3150. mutex_unlock(&dev->struct_mutex);
  3151. /* Cancel the retire work handler, which should be idle now. */
  3152. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3153. return 0;
  3154. }
  3155. void i915_gem_l3_remap(struct drm_device *dev)
  3156. {
  3157. drm_i915_private_t *dev_priv = dev->dev_private;
  3158. u32 misccpctl;
  3159. int i;
  3160. if (!IS_IVYBRIDGE(dev))
  3161. return;
  3162. if (!dev_priv->l3_parity.remap_info)
  3163. return;
  3164. misccpctl = I915_READ(GEN7_MISCCPCTL);
  3165. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  3166. POSTING_READ(GEN7_MISCCPCTL);
  3167. for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
  3168. u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
  3169. if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
  3170. DRM_DEBUG("0x%x was already programmed to %x\n",
  3171. GEN7_L3LOG_BASE + i, remap);
  3172. if (remap && !dev_priv->l3_parity.remap_info[i/4])
  3173. DRM_DEBUG_DRIVER("Clearing remapped register\n");
  3174. I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
  3175. }
  3176. /* Make sure all the writes land before disabling dop clock gating */
  3177. POSTING_READ(GEN7_L3LOG_BASE);
  3178. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  3179. }
  3180. void i915_gem_init_swizzling(struct drm_device *dev)
  3181. {
  3182. drm_i915_private_t *dev_priv = dev->dev_private;
  3183. if (INTEL_INFO(dev)->gen < 5 ||
  3184. dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  3185. return;
  3186. I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  3187. DISP_TILE_SURFACE_SWIZZLING);
  3188. if (IS_GEN5(dev))
  3189. return;
  3190. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  3191. if (IS_GEN6(dev))
  3192. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  3193. else if (IS_GEN7(dev))
  3194. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  3195. else
  3196. BUG();
  3197. }
  3198. static bool
  3199. intel_enable_blt(struct drm_device *dev)
  3200. {
  3201. if (!HAS_BLT(dev))
  3202. return false;
  3203. /* The blitter was dysfunctional on early prototypes */
  3204. if (IS_GEN6(dev) && dev->pdev->revision < 8) {
  3205. DRM_INFO("BLT not supported on this pre-production hardware;"
  3206. " graphics performance will be degraded.\n");
  3207. return false;
  3208. }
  3209. return true;
  3210. }
  3211. int
  3212. i915_gem_init_hw(struct drm_device *dev)
  3213. {
  3214. drm_i915_private_t *dev_priv = dev->dev_private;
  3215. int ret;
  3216. if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
  3217. return -EIO;
  3218. if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
  3219. I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
  3220. i915_gem_l3_remap(dev);
  3221. i915_gem_init_swizzling(dev);
  3222. dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
  3223. ret = intel_init_render_ring_buffer(dev);
  3224. if (ret)
  3225. return ret;
  3226. if (HAS_BSD(dev)) {
  3227. ret = intel_init_bsd_ring_buffer(dev);
  3228. if (ret)
  3229. goto cleanup_render_ring;
  3230. }
  3231. if (intel_enable_blt(dev)) {
  3232. ret = intel_init_blt_ring_buffer(dev);
  3233. if (ret)
  3234. goto cleanup_bsd_ring;
  3235. }
  3236. /*
  3237. * XXX: There was some w/a described somewhere suggesting loading
  3238. * contexts before PPGTT.
  3239. */
  3240. i915_gem_context_init(dev);
  3241. i915_gem_init_ppgtt(dev);
  3242. return 0;
  3243. cleanup_bsd_ring:
  3244. intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
  3245. cleanup_render_ring:
  3246. intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
  3247. return ret;
  3248. }
  3249. int i915_gem_init(struct drm_device *dev)
  3250. {
  3251. struct drm_i915_private *dev_priv = dev->dev_private;
  3252. int ret;
  3253. mutex_lock(&dev->struct_mutex);
  3254. i915_gem_init_global_gtt(dev);
  3255. ret = i915_gem_init_hw(dev);
  3256. mutex_unlock(&dev->struct_mutex);
  3257. if (ret) {
  3258. i915_gem_cleanup_aliasing_ppgtt(dev);
  3259. return ret;
  3260. }
  3261. /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
  3262. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3263. dev_priv->dri1.allow_batchbuffer = 1;
  3264. return 0;
  3265. }
  3266. void
  3267. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3268. {
  3269. drm_i915_private_t *dev_priv = dev->dev_private;
  3270. struct intel_ring_buffer *ring;
  3271. int i;
  3272. for_each_ring(ring, dev_priv, i)
  3273. intel_cleanup_ring_buffer(ring);
  3274. }
  3275. int
  3276. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3277. struct drm_file *file_priv)
  3278. {
  3279. drm_i915_private_t *dev_priv = dev->dev_private;
  3280. int ret;
  3281. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3282. return 0;
  3283. if (i915_reset_in_progress(&dev_priv->gpu_error)) {
  3284. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3285. atomic_set(&dev_priv->gpu_error.reset_counter, 0);
  3286. }
  3287. mutex_lock(&dev->struct_mutex);
  3288. dev_priv->mm.suspended = 0;
  3289. ret = i915_gem_init_hw(dev);
  3290. if (ret != 0) {
  3291. mutex_unlock(&dev->struct_mutex);
  3292. return ret;
  3293. }
  3294. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  3295. mutex_unlock(&dev->struct_mutex);
  3296. ret = drm_irq_install(dev);
  3297. if (ret)
  3298. goto cleanup_ringbuffer;
  3299. return 0;
  3300. cleanup_ringbuffer:
  3301. mutex_lock(&dev->struct_mutex);
  3302. i915_gem_cleanup_ringbuffer(dev);
  3303. dev_priv->mm.suspended = 1;
  3304. mutex_unlock(&dev->struct_mutex);
  3305. return ret;
  3306. }
  3307. int
  3308. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3309. struct drm_file *file_priv)
  3310. {
  3311. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3312. return 0;
  3313. drm_irq_uninstall(dev);
  3314. return i915_gem_idle(dev);
  3315. }
  3316. void
  3317. i915_gem_lastclose(struct drm_device *dev)
  3318. {
  3319. int ret;
  3320. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3321. return;
  3322. ret = i915_gem_idle(dev);
  3323. if (ret)
  3324. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3325. }
  3326. static void
  3327. init_ring_lists(struct intel_ring_buffer *ring)
  3328. {
  3329. INIT_LIST_HEAD(&ring->active_list);
  3330. INIT_LIST_HEAD(&ring->request_list);
  3331. }
  3332. void
  3333. i915_gem_load(struct drm_device *dev)
  3334. {
  3335. drm_i915_private_t *dev_priv = dev->dev_private;
  3336. int i;
  3337. dev_priv->slab =
  3338. kmem_cache_create("i915_gem_object",
  3339. sizeof(struct drm_i915_gem_object), 0,
  3340. SLAB_HWCACHE_ALIGN,
  3341. NULL);
  3342. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  3343. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3344. INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
  3345. INIT_LIST_HEAD(&dev_priv->mm.bound_list);
  3346. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3347. for (i = 0; i < I915_NUM_RINGS; i++)
  3348. init_ring_lists(&dev_priv->ring[i]);
  3349. for (i = 0; i < I915_MAX_NUM_FENCES; i++)
  3350. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  3351. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3352. i915_gem_retire_work_handler);
  3353. init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  3354. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  3355. if (IS_GEN3(dev)) {
  3356. I915_WRITE(MI_ARB_STATE,
  3357. _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  3358. }
  3359. dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  3360. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3361. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3362. dev_priv->fence_reg_start = 3;
  3363. if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3364. dev_priv->num_fence_regs = 16;
  3365. else
  3366. dev_priv->num_fence_regs = 8;
  3367. /* Initialize fence registers to zero */
  3368. i915_gem_reset_fences(dev);
  3369. i915_gem_detect_bit_6_swizzle(dev);
  3370. init_waitqueue_head(&dev_priv->pending_flip_queue);
  3371. dev_priv->mm.interruptible = true;
  3372. dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
  3373. dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
  3374. register_shrinker(&dev_priv->mm.inactive_shrinker);
  3375. }
  3376. /*
  3377. * Create a physically contiguous memory object for this object
  3378. * e.g. for cursor + overlay regs
  3379. */
  3380. static int i915_gem_init_phys_object(struct drm_device *dev,
  3381. int id, int size, int align)
  3382. {
  3383. drm_i915_private_t *dev_priv = dev->dev_private;
  3384. struct drm_i915_gem_phys_object *phys_obj;
  3385. int ret;
  3386. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3387. return 0;
  3388. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3389. if (!phys_obj)
  3390. return -ENOMEM;
  3391. phys_obj->id = id;
  3392. phys_obj->handle = drm_pci_alloc(dev, size, align);
  3393. if (!phys_obj->handle) {
  3394. ret = -ENOMEM;
  3395. goto kfree_obj;
  3396. }
  3397. #ifdef CONFIG_X86
  3398. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3399. #endif
  3400. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3401. return 0;
  3402. kfree_obj:
  3403. kfree(phys_obj);
  3404. return ret;
  3405. }
  3406. static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3407. {
  3408. drm_i915_private_t *dev_priv = dev->dev_private;
  3409. struct drm_i915_gem_phys_object *phys_obj;
  3410. if (!dev_priv->mm.phys_objs[id - 1])
  3411. return;
  3412. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3413. if (phys_obj->cur_obj) {
  3414. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3415. }
  3416. #ifdef CONFIG_X86
  3417. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3418. #endif
  3419. drm_pci_free(dev, phys_obj->handle);
  3420. kfree(phys_obj);
  3421. dev_priv->mm.phys_objs[id - 1] = NULL;
  3422. }
  3423. void i915_gem_free_all_phys_object(struct drm_device *dev)
  3424. {
  3425. int i;
  3426. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  3427. i915_gem_free_phys_object(dev, i);
  3428. }
  3429. void i915_gem_detach_phys_object(struct drm_device *dev,
  3430. struct drm_i915_gem_object *obj)
  3431. {
  3432. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  3433. char *vaddr;
  3434. int i;
  3435. int page_count;
  3436. if (!obj->phys_obj)
  3437. return;
  3438. vaddr = obj->phys_obj->handle->vaddr;
  3439. page_count = obj->base.size / PAGE_SIZE;
  3440. for (i = 0; i < page_count; i++) {
  3441. struct page *page = shmem_read_mapping_page(mapping, i);
  3442. if (!IS_ERR(page)) {
  3443. char *dst = kmap_atomic(page);
  3444. memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
  3445. kunmap_atomic(dst);
  3446. drm_clflush_pages(&page, 1);
  3447. set_page_dirty(page);
  3448. mark_page_accessed(page);
  3449. page_cache_release(page);
  3450. }
  3451. }
  3452. i915_gem_chipset_flush(dev);
  3453. obj->phys_obj->cur_obj = NULL;
  3454. obj->phys_obj = NULL;
  3455. }
  3456. int
  3457. i915_gem_attach_phys_object(struct drm_device *dev,
  3458. struct drm_i915_gem_object *obj,
  3459. int id,
  3460. int align)
  3461. {
  3462. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  3463. drm_i915_private_t *dev_priv = dev->dev_private;
  3464. int ret = 0;
  3465. int page_count;
  3466. int i;
  3467. if (id > I915_MAX_PHYS_OBJECT)
  3468. return -EINVAL;
  3469. if (obj->phys_obj) {
  3470. if (obj->phys_obj->id == id)
  3471. return 0;
  3472. i915_gem_detach_phys_object(dev, obj);
  3473. }
  3474. /* create a new object */
  3475. if (!dev_priv->mm.phys_objs[id - 1]) {
  3476. ret = i915_gem_init_phys_object(dev, id,
  3477. obj->base.size, align);
  3478. if (ret) {
  3479. DRM_ERROR("failed to init phys object %d size: %zu\n",
  3480. id, obj->base.size);
  3481. return ret;
  3482. }
  3483. }
  3484. /* bind to the object */
  3485. obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
  3486. obj->phys_obj->cur_obj = obj;
  3487. page_count = obj->base.size / PAGE_SIZE;
  3488. for (i = 0; i < page_count; i++) {
  3489. struct page *page;
  3490. char *dst, *src;
  3491. page = shmem_read_mapping_page(mapping, i);
  3492. if (IS_ERR(page))
  3493. return PTR_ERR(page);
  3494. src = kmap_atomic(page);
  3495. dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  3496. memcpy(dst, src, PAGE_SIZE);
  3497. kunmap_atomic(src);
  3498. mark_page_accessed(page);
  3499. page_cache_release(page);
  3500. }
  3501. return 0;
  3502. }
  3503. static int
  3504. i915_gem_phys_pwrite(struct drm_device *dev,
  3505. struct drm_i915_gem_object *obj,
  3506. struct drm_i915_gem_pwrite *args,
  3507. struct drm_file *file_priv)
  3508. {
  3509. void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
  3510. char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
  3511. if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
  3512. unsigned long unwritten;
  3513. /* The physical object once assigned is fixed for the lifetime
  3514. * of the obj, so we can safely drop the lock and continue
  3515. * to access vaddr.
  3516. */
  3517. mutex_unlock(&dev->struct_mutex);
  3518. unwritten = copy_from_user(vaddr, user_data, args->size);
  3519. mutex_lock(&dev->struct_mutex);
  3520. if (unwritten)
  3521. return -EFAULT;
  3522. }
  3523. i915_gem_chipset_flush(dev);
  3524. return 0;
  3525. }
  3526. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  3527. {
  3528. struct drm_i915_file_private *file_priv = file->driver_priv;
  3529. /* Clean up our request list when the client is going away, so that
  3530. * later retire_requests won't dereference our soon-to-be-gone
  3531. * file_priv.
  3532. */
  3533. spin_lock(&file_priv->mm.lock);
  3534. while (!list_empty(&file_priv->mm.request_list)) {
  3535. struct drm_i915_gem_request *request;
  3536. request = list_first_entry(&file_priv->mm.request_list,
  3537. struct drm_i915_gem_request,
  3538. client_list);
  3539. list_del(&request->client_list);
  3540. request->file_priv = NULL;
  3541. }
  3542. spin_unlock(&file_priv->mm.lock);
  3543. }
  3544. static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
  3545. {
  3546. if (!mutex_is_locked(mutex))
  3547. return false;
  3548. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
  3549. return mutex->owner == task;
  3550. #else
  3551. /* Since UP may be pre-empted, we cannot assume that we own the lock */
  3552. return false;
  3553. #endif
  3554. }
  3555. static int
  3556. i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
  3557. {
  3558. struct drm_i915_private *dev_priv =
  3559. container_of(shrinker,
  3560. struct drm_i915_private,
  3561. mm.inactive_shrinker);
  3562. struct drm_device *dev = dev_priv->dev;
  3563. struct drm_i915_gem_object *obj;
  3564. int nr_to_scan = sc->nr_to_scan;
  3565. bool unlock = true;
  3566. int cnt;
  3567. if (!mutex_trylock(&dev->struct_mutex)) {
  3568. if (!mutex_is_locked_by(&dev->struct_mutex, current))
  3569. return 0;
  3570. if (dev_priv->mm.shrinker_no_lock_stealing)
  3571. return 0;
  3572. unlock = false;
  3573. }
  3574. if (nr_to_scan) {
  3575. nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
  3576. if (nr_to_scan > 0)
  3577. i915_gem_shrink_all(dev_priv);
  3578. }
  3579. cnt = 0;
  3580. list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
  3581. if (obj->pages_pin_count == 0)
  3582. cnt += obj->base.size >> PAGE_SHIFT;
  3583. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  3584. if (obj->pin_count == 0 && obj->pages_pin_count == 0)
  3585. cnt += obj->base.size >> PAGE_SHIFT;
  3586. if (unlock)
  3587. mutex_unlock(&dev->struct_mutex);
  3588. return cnt;
  3589. }