i915_gem.c 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/i915_drm.h>
  29. #include "i915_drv.h"
  30. #include "i915_trace.h"
  31. #include "intel_drv.h"
  32. #include <linux/shmem_fs.h>
  33. #include <linux/slab.h>
  34. #include <linux/swap.h>
  35. #include <linux/pci.h>
  36. #include <linux/dma-buf.h>
  37. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  38. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  39. static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  40. unsigned alignment,
  41. bool map_and_fenceable,
  42. bool nonblocking);
  43. static int i915_gem_phys_pwrite(struct drm_device *dev,
  44. struct drm_i915_gem_object *obj,
  45. struct drm_i915_gem_pwrite *args,
  46. struct drm_file *file);
  47. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  48. struct drm_i915_gem_object *obj);
  49. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  50. struct drm_i915_fence_reg *fence,
  51. bool enable);
  52. static int i915_gem_inactive_shrink(struct shrinker *shrinker,
  53. struct shrink_control *sc);
  54. static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
  55. static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
  56. static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  57. static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
  58. {
  59. if (obj->tiling_mode)
  60. i915_gem_release_mmap(obj);
  61. /* As we do not have an associated fence register, we will force
  62. * a tiling change if we ever need to acquire one.
  63. */
  64. obj->fence_dirty = false;
  65. obj->fence_reg = I915_FENCE_REG_NONE;
  66. }
  67. /* some bookkeeping */
  68. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  69. size_t size)
  70. {
  71. dev_priv->mm.object_count++;
  72. dev_priv->mm.object_memory += size;
  73. }
  74. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  75. size_t size)
  76. {
  77. dev_priv->mm.object_count--;
  78. dev_priv->mm.object_memory -= size;
  79. }
  80. static int
  81. i915_gem_wait_for_error(struct i915_gpu_error *error)
  82. {
  83. int ret;
  84. #define EXIT_COND (!i915_reset_in_progress(error) || \
  85. i915_terminally_wedged(error))
  86. if (EXIT_COND)
  87. return 0;
  88. /*
  89. * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  90. * userspace. If it takes that long something really bad is going on and
  91. * we should simply try to bail out and fail as gracefully as possible.
  92. */
  93. ret = wait_event_interruptible_timeout(error->reset_queue,
  94. EXIT_COND,
  95. 10*HZ);
  96. if (ret == 0) {
  97. DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  98. return -EIO;
  99. } else if (ret < 0) {
  100. return ret;
  101. }
  102. #undef EXIT_COND
  103. return 0;
  104. }
  105. int i915_mutex_lock_interruptible(struct drm_device *dev)
  106. {
  107. struct drm_i915_private *dev_priv = dev->dev_private;
  108. int ret;
  109. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  110. if (ret)
  111. return ret;
  112. ret = mutex_lock_interruptible(&dev->struct_mutex);
  113. if (ret)
  114. return ret;
  115. WARN_ON(i915_verify_lists(dev));
  116. return 0;
  117. }
  118. static inline bool
  119. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  120. {
  121. return i915_gem_obj_ggtt_bound(obj) && !obj->active;
  122. }
  123. int
  124. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  125. struct drm_file *file)
  126. {
  127. struct drm_i915_private *dev_priv = dev->dev_private;
  128. struct drm_i915_gem_init *args = data;
  129. if (drm_core_check_feature(dev, DRIVER_MODESET))
  130. return -ENODEV;
  131. if (args->gtt_start >= args->gtt_end ||
  132. (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
  133. return -EINVAL;
  134. /* GEM with user mode setting was never supported on ilk and later. */
  135. if (INTEL_INFO(dev)->gen >= 5)
  136. return -ENODEV;
  137. mutex_lock(&dev->struct_mutex);
  138. i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
  139. args->gtt_end);
  140. dev_priv->gtt.mappable_end = args->gtt_end;
  141. mutex_unlock(&dev->struct_mutex);
  142. return 0;
  143. }
  144. int
  145. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  146. struct drm_file *file)
  147. {
  148. struct drm_i915_private *dev_priv = dev->dev_private;
  149. struct drm_i915_gem_get_aperture *args = data;
  150. struct drm_i915_gem_object *obj;
  151. size_t pinned;
  152. pinned = 0;
  153. mutex_lock(&dev->struct_mutex);
  154. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
  155. if (obj->pin_count)
  156. pinned += i915_gem_obj_ggtt_size(obj);
  157. mutex_unlock(&dev->struct_mutex);
  158. args->aper_size = dev_priv->gtt.base.total;
  159. args->aper_available_size = args->aper_size - pinned;
  160. return 0;
  161. }
  162. void *i915_gem_object_alloc(struct drm_device *dev)
  163. {
  164. struct drm_i915_private *dev_priv = dev->dev_private;
  165. return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
  166. }
  167. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  168. {
  169. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  170. kmem_cache_free(dev_priv->slab, obj);
  171. }
  172. static int
  173. i915_gem_create(struct drm_file *file,
  174. struct drm_device *dev,
  175. uint64_t size,
  176. uint32_t *handle_p)
  177. {
  178. struct drm_i915_gem_object *obj;
  179. int ret;
  180. u32 handle;
  181. size = roundup(size, PAGE_SIZE);
  182. if (size == 0)
  183. return -EINVAL;
  184. /* Allocate the new object */
  185. obj = i915_gem_alloc_object(dev, size);
  186. if (obj == NULL)
  187. return -ENOMEM;
  188. ret = drm_gem_handle_create(file, &obj->base, &handle);
  189. /* drop reference from allocate - handle holds it now */
  190. drm_gem_object_unreference_unlocked(&obj->base);
  191. if (ret)
  192. return ret;
  193. *handle_p = handle;
  194. return 0;
  195. }
  196. int
  197. i915_gem_dumb_create(struct drm_file *file,
  198. struct drm_device *dev,
  199. struct drm_mode_create_dumb *args)
  200. {
  201. /* have to work out size/pitch and return them */
  202. args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
  203. args->size = args->pitch * args->height;
  204. return i915_gem_create(file, dev,
  205. args->size, &args->handle);
  206. }
  207. int i915_gem_dumb_destroy(struct drm_file *file,
  208. struct drm_device *dev,
  209. uint32_t handle)
  210. {
  211. return drm_gem_handle_delete(file, handle);
  212. }
  213. /**
  214. * Creates a new mm object and returns a handle to it.
  215. */
  216. int
  217. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  218. struct drm_file *file)
  219. {
  220. struct drm_i915_gem_create *args = data;
  221. return i915_gem_create(file, dev,
  222. args->size, &args->handle);
  223. }
  224. static inline int
  225. __copy_to_user_swizzled(char __user *cpu_vaddr,
  226. const char *gpu_vaddr, int gpu_offset,
  227. int length)
  228. {
  229. int ret, cpu_offset = 0;
  230. while (length > 0) {
  231. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  232. int this_length = min(cacheline_end - gpu_offset, length);
  233. int swizzled_gpu_offset = gpu_offset ^ 64;
  234. ret = __copy_to_user(cpu_vaddr + cpu_offset,
  235. gpu_vaddr + swizzled_gpu_offset,
  236. this_length);
  237. if (ret)
  238. return ret + length;
  239. cpu_offset += this_length;
  240. gpu_offset += this_length;
  241. length -= this_length;
  242. }
  243. return 0;
  244. }
  245. static inline int
  246. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  247. const char __user *cpu_vaddr,
  248. int length)
  249. {
  250. int ret, cpu_offset = 0;
  251. while (length > 0) {
  252. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  253. int this_length = min(cacheline_end - gpu_offset, length);
  254. int swizzled_gpu_offset = gpu_offset ^ 64;
  255. ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  256. cpu_vaddr + cpu_offset,
  257. this_length);
  258. if (ret)
  259. return ret + length;
  260. cpu_offset += this_length;
  261. gpu_offset += this_length;
  262. length -= this_length;
  263. }
  264. return 0;
  265. }
  266. /* Per-page copy function for the shmem pread fastpath.
  267. * Flushes invalid cachelines before reading the target if
  268. * needs_clflush is set. */
  269. static int
  270. shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
  271. char __user *user_data,
  272. bool page_do_bit17_swizzling, bool needs_clflush)
  273. {
  274. char *vaddr;
  275. int ret;
  276. if (unlikely(page_do_bit17_swizzling))
  277. return -EINVAL;
  278. vaddr = kmap_atomic(page);
  279. if (needs_clflush)
  280. drm_clflush_virt_range(vaddr + shmem_page_offset,
  281. page_length);
  282. ret = __copy_to_user_inatomic(user_data,
  283. vaddr + shmem_page_offset,
  284. page_length);
  285. kunmap_atomic(vaddr);
  286. return ret ? -EFAULT : 0;
  287. }
  288. static void
  289. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  290. bool swizzled)
  291. {
  292. if (unlikely(swizzled)) {
  293. unsigned long start = (unsigned long) addr;
  294. unsigned long end = (unsigned long) addr + length;
  295. /* For swizzling simply ensure that we always flush both
  296. * channels. Lame, but simple and it works. Swizzled
  297. * pwrite/pread is far from a hotpath - current userspace
  298. * doesn't use it at all. */
  299. start = round_down(start, 128);
  300. end = round_up(end, 128);
  301. drm_clflush_virt_range((void *)start, end - start);
  302. } else {
  303. drm_clflush_virt_range(addr, length);
  304. }
  305. }
  306. /* Only difference to the fast-path function is that this can handle bit17
  307. * and uses non-atomic copy and kmap functions. */
  308. static int
  309. shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
  310. char __user *user_data,
  311. bool page_do_bit17_swizzling, bool needs_clflush)
  312. {
  313. char *vaddr;
  314. int ret;
  315. vaddr = kmap(page);
  316. if (needs_clflush)
  317. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  318. page_length,
  319. page_do_bit17_swizzling);
  320. if (page_do_bit17_swizzling)
  321. ret = __copy_to_user_swizzled(user_data,
  322. vaddr, shmem_page_offset,
  323. page_length);
  324. else
  325. ret = __copy_to_user(user_data,
  326. vaddr + shmem_page_offset,
  327. page_length);
  328. kunmap(page);
  329. return ret ? - EFAULT : 0;
  330. }
  331. static int
  332. i915_gem_shmem_pread(struct drm_device *dev,
  333. struct drm_i915_gem_object *obj,
  334. struct drm_i915_gem_pread *args,
  335. struct drm_file *file)
  336. {
  337. char __user *user_data;
  338. ssize_t remain;
  339. loff_t offset;
  340. int shmem_page_offset, page_length, ret = 0;
  341. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  342. int prefaulted = 0;
  343. int needs_clflush = 0;
  344. struct sg_page_iter sg_iter;
  345. user_data = to_user_ptr(args->data_ptr);
  346. remain = args->size;
  347. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  348. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
  349. /* If we're not in the cpu read domain, set ourself into the gtt
  350. * read domain and manually flush cachelines (if required). This
  351. * optimizes for the case when the gpu will dirty the data
  352. * anyway again before the next pread happens. */
  353. if (obj->cache_level == I915_CACHE_NONE)
  354. needs_clflush = 1;
  355. if (i915_gem_obj_ggtt_bound(obj)) {
  356. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  357. if (ret)
  358. return ret;
  359. }
  360. }
  361. ret = i915_gem_object_get_pages(obj);
  362. if (ret)
  363. return ret;
  364. i915_gem_object_pin_pages(obj);
  365. offset = args->offset;
  366. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  367. offset >> PAGE_SHIFT) {
  368. struct page *page = sg_page_iter_page(&sg_iter);
  369. if (remain <= 0)
  370. break;
  371. /* Operation in this page
  372. *
  373. * shmem_page_offset = offset within page in shmem file
  374. * page_length = bytes to copy for this page
  375. */
  376. shmem_page_offset = offset_in_page(offset);
  377. page_length = remain;
  378. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  379. page_length = PAGE_SIZE - shmem_page_offset;
  380. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  381. (page_to_phys(page) & (1 << 17)) != 0;
  382. ret = shmem_pread_fast(page, shmem_page_offset, page_length,
  383. user_data, page_do_bit17_swizzling,
  384. needs_clflush);
  385. if (ret == 0)
  386. goto next_page;
  387. mutex_unlock(&dev->struct_mutex);
  388. if (likely(!i915_prefault_disable) && !prefaulted) {
  389. ret = fault_in_multipages_writeable(user_data, remain);
  390. /* Userspace is tricking us, but we've already clobbered
  391. * its pages with the prefault and promised to write the
  392. * data up to the first fault. Hence ignore any errors
  393. * and just continue. */
  394. (void)ret;
  395. prefaulted = 1;
  396. }
  397. ret = shmem_pread_slow(page, shmem_page_offset, page_length,
  398. user_data, page_do_bit17_swizzling,
  399. needs_clflush);
  400. mutex_lock(&dev->struct_mutex);
  401. next_page:
  402. mark_page_accessed(page);
  403. if (ret)
  404. goto out;
  405. remain -= page_length;
  406. user_data += page_length;
  407. offset += page_length;
  408. }
  409. out:
  410. i915_gem_object_unpin_pages(obj);
  411. return ret;
  412. }
  413. /**
  414. * Reads data from the object referenced by handle.
  415. *
  416. * On error, the contents of *data are undefined.
  417. */
  418. int
  419. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  420. struct drm_file *file)
  421. {
  422. struct drm_i915_gem_pread *args = data;
  423. struct drm_i915_gem_object *obj;
  424. int ret = 0;
  425. if (args->size == 0)
  426. return 0;
  427. if (!access_ok(VERIFY_WRITE,
  428. to_user_ptr(args->data_ptr),
  429. args->size))
  430. return -EFAULT;
  431. ret = i915_mutex_lock_interruptible(dev);
  432. if (ret)
  433. return ret;
  434. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  435. if (&obj->base == NULL) {
  436. ret = -ENOENT;
  437. goto unlock;
  438. }
  439. /* Bounds check source. */
  440. if (args->offset > obj->base.size ||
  441. args->size > obj->base.size - args->offset) {
  442. ret = -EINVAL;
  443. goto out;
  444. }
  445. /* prime objects have no backing filp to GEM pread/pwrite
  446. * pages from.
  447. */
  448. if (!obj->base.filp) {
  449. ret = -EINVAL;
  450. goto out;
  451. }
  452. trace_i915_gem_object_pread(obj, args->offset, args->size);
  453. ret = i915_gem_shmem_pread(dev, obj, args, file);
  454. out:
  455. drm_gem_object_unreference(&obj->base);
  456. unlock:
  457. mutex_unlock(&dev->struct_mutex);
  458. return ret;
  459. }
  460. /* This is the fast write path which cannot handle
  461. * page faults in the source data
  462. */
  463. static inline int
  464. fast_user_write(struct io_mapping *mapping,
  465. loff_t page_base, int page_offset,
  466. char __user *user_data,
  467. int length)
  468. {
  469. void __iomem *vaddr_atomic;
  470. void *vaddr;
  471. unsigned long unwritten;
  472. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  473. /* We can use the cpu mem copy function because this is X86. */
  474. vaddr = (void __force*)vaddr_atomic + page_offset;
  475. unwritten = __copy_from_user_inatomic_nocache(vaddr,
  476. user_data, length);
  477. io_mapping_unmap_atomic(vaddr_atomic);
  478. return unwritten;
  479. }
  480. /**
  481. * This is the fast pwrite path, where we copy the data directly from the
  482. * user into the GTT, uncached.
  483. */
  484. static int
  485. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  486. struct drm_i915_gem_object *obj,
  487. struct drm_i915_gem_pwrite *args,
  488. struct drm_file *file)
  489. {
  490. drm_i915_private_t *dev_priv = dev->dev_private;
  491. ssize_t remain;
  492. loff_t offset, page_base;
  493. char __user *user_data;
  494. int page_offset, page_length, ret;
  495. ret = i915_gem_object_pin(obj, 0, true, true);
  496. if (ret)
  497. goto out;
  498. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  499. if (ret)
  500. goto out_unpin;
  501. ret = i915_gem_object_put_fence(obj);
  502. if (ret)
  503. goto out_unpin;
  504. user_data = to_user_ptr(args->data_ptr);
  505. remain = args->size;
  506. offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
  507. while (remain > 0) {
  508. /* Operation in this page
  509. *
  510. * page_base = page offset within aperture
  511. * page_offset = offset within page
  512. * page_length = bytes to copy for this page
  513. */
  514. page_base = offset & PAGE_MASK;
  515. page_offset = offset_in_page(offset);
  516. page_length = remain;
  517. if ((page_offset + remain) > PAGE_SIZE)
  518. page_length = PAGE_SIZE - page_offset;
  519. /* If we get a fault while copying data, then (presumably) our
  520. * source page isn't available. Return the error and we'll
  521. * retry in the slow path.
  522. */
  523. if (fast_user_write(dev_priv->gtt.mappable, page_base,
  524. page_offset, user_data, page_length)) {
  525. ret = -EFAULT;
  526. goto out_unpin;
  527. }
  528. remain -= page_length;
  529. user_data += page_length;
  530. offset += page_length;
  531. }
  532. out_unpin:
  533. i915_gem_object_unpin(obj);
  534. out:
  535. return ret;
  536. }
  537. /* Per-page copy function for the shmem pwrite fastpath.
  538. * Flushes invalid cachelines before writing to the target if
  539. * needs_clflush_before is set and flushes out any written cachelines after
  540. * writing if needs_clflush is set. */
  541. static int
  542. shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
  543. char __user *user_data,
  544. bool page_do_bit17_swizzling,
  545. bool needs_clflush_before,
  546. bool needs_clflush_after)
  547. {
  548. char *vaddr;
  549. int ret;
  550. if (unlikely(page_do_bit17_swizzling))
  551. return -EINVAL;
  552. vaddr = kmap_atomic(page);
  553. if (needs_clflush_before)
  554. drm_clflush_virt_range(vaddr + shmem_page_offset,
  555. page_length);
  556. ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
  557. user_data,
  558. page_length);
  559. if (needs_clflush_after)
  560. drm_clflush_virt_range(vaddr + shmem_page_offset,
  561. page_length);
  562. kunmap_atomic(vaddr);
  563. return ret ? -EFAULT : 0;
  564. }
  565. /* Only difference to the fast-path function is that this can handle bit17
  566. * and uses non-atomic copy and kmap functions. */
  567. static int
  568. shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
  569. char __user *user_data,
  570. bool page_do_bit17_swizzling,
  571. bool needs_clflush_before,
  572. bool needs_clflush_after)
  573. {
  574. char *vaddr;
  575. int ret;
  576. vaddr = kmap(page);
  577. if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  578. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  579. page_length,
  580. page_do_bit17_swizzling);
  581. if (page_do_bit17_swizzling)
  582. ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
  583. user_data,
  584. page_length);
  585. else
  586. ret = __copy_from_user(vaddr + shmem_page_offset,
  587. user_data,
  588. page_length);
  589. if (needs_clflush_after)
  590. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  591. page_length,
  592. page_do_bit17_swizzling);
  593. kunmap(page);
  594. return ret ? -EFAULT : 0;
  595. }
  596. static int
  597. i915_gem_shmem_pwrite(struct drm_device *dev,
  598. struct drm_i915_gem_object *obj,
  599. struct drm_i915_gem_pwrite *args,
  600. struct drm_file *file)
  601. {
  602. ssize_t remain;
  603. loff_t offset;
  604. char __user *user_data;
  605. int shmem_page_offset, page_length, ret = 0;
  606. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  607. int hit_slowpath = 0;
  608. int needs_clflush_after = 0;
  609. int needs_clflush_before = 0;
  610. struct sg_page_iter sg_iter;
  611. user_data = to_user_ptr(args->data_ptr);
  612. remain = args->size;
  613. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  614. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  615. /* If we're not in the cpu write domain, set ourself into the gtt
  616. * write domain and manually flush cachelines (if required). This
  617. * optimizes for the case when the gpu will use the data
  618. * right away and we therefore have to clflush anyway. */
  619. if (obj->cache_level == I915_CACHE_NONE)
  620. needs_clflush_after = 1;
  621. if (i915_gem_obj_ggtt_bound(obj)) {
  622. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  623. if (ret)
  624. return ret;
  625. }
  626. }
  627. /* Same trick applies for invalidate partially written cachelines before
  628. * writing. */
  629. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
  630. && obj->cache_level == I915_CACHE_NONE)
  631. needs_clflush_before = 1;
  632. ret = i915_gem_object_get_pages(obj);
  633. if (ret)
  634. return ret;
  635. i915_gem_object_pin_pages(obj);
  636. offset = args->offset;
  637. obj->dirty = 1;
  638. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  639. offset >> PAGE_SHIFT) {
  640. struct page *page = sg_page_iter_page(&sg_iter);
  641. int partial_cacheline_write;
  642. if (remain <= 0)
  643. break;
  644. /* Operation in this page
  645. *
  646. * shmem_page_offset = offset within page in shmem file
  647. * page_length = bytes to copy for this page
  648. */
  649. shmem_page_offset = offset_in_page(offset);
  650. page_length = remain;
  651. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  652. page_length = PAGE_SIZE - shmem_page_offset;
  653. /* If we don't overwrite a cacheline completely we need to be
  654. * careful to have up-to-date data by first clflushing. Don't
  655. * overcomplicate things and flush the entire patch. */
  656. partial_cacheline_write = needs_clflush_before &&
  657. ((shmem_page_offset | page_length)
  658. & (boot_cpu_data.x86_clflush_size - 1));
  659. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  660. (page_to_phys(page) & (1 << 17)) != 0;
  661. ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
  662. user_data, page_do_bit17_swizzling,
  663. partial_cacheline_write,
  664. needs_clflush_after);
  665. if (ret == 0)
  666. goto next_page;
  667. hit_slowpath = 1;
  668. mutex_unlock(&dev->struct_mutex);
  669. ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
  670. user_data, page_do_bit17_swizzling,
  671. partial_cacheline_write,
  672. needs_clflush_after);
  673. mutex_lock(&dev->struct_mutex);
  674. next_page:
  675. set_page_dirty(page);
  676. mark_page_accessed(page);
  677. if (ret)
  678. goto out;
  679. remain -= page_length;
  680. user_data += page_length;
  681. offset += page_length;
  682. }
  683. out:
  684. i915_gem_object_unpin_pages(obj);
  685. if (hit_slowpath) {
  686. /*
  687. * Fixup: Flush cpu caches in case we didn't flush the dirty
  688. * cachelines in-line while writing and the object moved
  689. * out of the cpu write domain while we've dropped the lock.
  690. */
  691. if (!needs_clflush_after &&
  692. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  693. i915_gem_clflush_object(obj);
  694. i915_gem_chipset_flush(dev);
  695. }
  696. }
  697. if (needs_clflush_after)
  698. i915_gem_chipset_flush(dev);
  699. return ret;
  700. }
  701. /**
  702. * Writes data to the object referenced by handle.
  703. *
  704. * On error, the contents of the buffer that were to be modified are undefined.
  705. */
  706. int
  707. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  708. struct drm_file *file)
  709. {
  710. struct drm_i915_gem_pwrite *args = data;
  711. struct drm_i915_gem_object *obj;
  712. int ret;
  713. if (args->size == 0)
  714. return 0;
  715. if (!access_ok(VERIFY_READ,
  716. to_user_ptr(args->data_ptr),
  717. args->size))
  718. return -EFAULT;
  719. if (likely(!i915_prefault_disable)) {
  720. ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
  721. args->size);
  722. if (ret)
  723. return -EFAULT;
  724. }
  725. ret = i915_mutex_lock_interruptible(dev);
  726. if (ret)
  727. return ret;
  728. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  729. if (&obj->base == NULL) {
  730. ret = -ENOENT;
  731. goto unlock;
  732. }
  733. /* Bounds check destination. */
  734. if (args->offset > obj->base.size ||
  735. args->size > obj->base.size - args->offset) {
  736. ret = -EINVAL;
  737. goto out;
  738. }
  739. /* prime objects have no backing filp to GEM pread/pwrite
  740. * pages from.
  741. */
  742. if (!obj->base.filp) {
  743. ret = -EINVAL;
  744. goto out;
  745. }
  746. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  747. ret = -EFAULT;
  748. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  749. * it would end up going through the fenced access, and we'll get
  750. * different detiling behavior between reading and writing.
  751. * pread/pwrite currently are reading and writing from the CPU
  752. * perspective, requiring manual detiling by the client.
  753. */
  754. if (obj->phys_obj) {
  755. ret = i915_gem_phys_pwrite(dev, obj, args, file);
  756. goto out;
  757. }
  758. if (obj->cache_level == I915_CACHE_NONE &&
  759. obj->tiling_mode == I915_TILING_NONE &&
  760. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  761. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  762. /* Note that the gtt paths might fail with non-page-backed user
  763. * pointers (e.g. gtt mappings when moving data between
  764. * textures). Fallback to the shmem path in that case. */
  765. }
  766. if (ret == -EFAULT || ret == -ENOSPC)
  767. ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  768. out:
  769. drm_gem_object_unreference(&obj->base);
  770. unlock:
  771. mutex_unlock(&dev->struct_mutex);
  772. return ret;
  773. }
  774. int
  775. i915_gem_check_wedge(struct i915_gpu_error *error,
  776. bool interruptible)
  777. {
  778. if (i915_reset_in_progress(error)) {
  779. /* Non-interruptible callers can't handle -EAGAIN, hence return
  780. * -EIO unconditionally for these. */
  781. if (!interruptible)
  782. return -EIO;
  783. /* Recovery complete, but the reset failed ... */
  784. if (i915_terminally_wedged(error))
  785. return -EIO;
  786. return -EAGAIN;
  787. }
  788. return 0;
  789. }
  790. /*
  791. * Compare seqno against outstanding lazy request. Emit a request if they are
  792. * equal.
  793. */
  794. static int
  795. i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  796. {
  797. int ret;
  798. BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
  799. ret = 0;
  800. if (seqno == ring->outstanding_lazy_request)
  801. ret = i915_add_request(ring, NULL);
  802. return ret;
  803. }
  804. /**
  805. * __wait_seqno - wait until execution of seqno has finished
  806. * @ring: the ring expected to report seqno
  807. * @seqno: duh!
  808. * @reset_counter: reset sequence associated with the given seqno
  809. * @interruptible: do an interruptible wait (normally yes)
  810. * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  811. *
  812. * Note: It is of utmost importance that the passed in seqno and reset_counter
  813. * values have been read by the caller in an smp safe manner. Where read-side
  814. * locks are involved, it is sufficient to read the reset_counter before
  815. * unlocking the lock that protects the seqno. For lockless tricks, the
  816. * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  817. * inserted.
  818. *
  819. * Returns 0 if the seqno was found within the alloted time. Else returns the
  820. * errno with remaining time filled in timeout argument.
  821. */
  822. static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
  823. unsigned reset_counter,
  824. bool interruptible, struct timespec *timeout)
  825. {
  826. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  827. struct timespec before, now, wait_time={1,0};
  828. unsigned long timeout_jiffies;
  829. long end;
  830. bool wait_forever = true;
  831. int ret;
  832. if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
  833. return 0;
  834. trace_i915_gem_request_wait_begin(ring, seqno);
  835. if (timeout != NULL) {
  836. wait_time = *timeout;
  837. wait_forever = false;
  838. }
  839. timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
  840. if (WARN_ON(!ring->irq_get(ring)))
  841. return -ENODEV;
  842. /* Record current time in case interrupted by signal, or wedged * */
  843. getrawmonotonic(&before);
  844. #define EXIT_COND \
  845. (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
  846. i915_reset_in_progress(&dev_priv->gpu_error) || \
  847. reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
  848. do {
  849. if (interruptible)
  850. end = wait_event_interruptible_timeout(ring->irq_queue,
  851. EXIT_COND,
  852. timeout_jiffies);
  853. else
  854. end = wait_event_timeout(ring->irq_queue, EXIT_COND,
  855. timeout_jiffies);
  856. /* We need to check whether any gpu reset happened in between
  857. * the caller grabbing the seqno and now ... */
  858. if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
  859. end = -EAGAIN;
  860. /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
  861. * gone. */
  862. ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  863. if (ret)
  864. end = ret;
  865. } while (end == 0 && wait_forever);
  866. getrawmonotonic(&now);
  867. ring->irq_put(ring);
  868. trace_i915_gem_request_wait_end(ring, seqno);
  869. #undef EXIT_COND
  870. if (timeout) {
  871. struct timespec sleep_time = timespec_sub(now, before);
  872. *timeout = timespec_sub(*timeout, sleep_time);
  873. if (!timespec_valid(timeout)) /* i.e. negative time remains */
  874. set_normalized_timespec(timeout, 0, 0);
  875. }
  876. switch (end) {
  877. case -EIO:
  878. case -EAGAIN: /* Wedged */
  879. case -ERESTARTSYS: /* Signal */
  880. return (int)end;
  881. case 0: /* Timeout */
  882. return -ETIME;
  883. default: /* Completed */
  884. WARN_ON(end < 0); /* We're not aware of other errors */
  885. return 0;
  886. }
  887. }
  888. /**
  889. * Waits for a sequence number to be signaled, and cleans up the
  890. * request and object lists appropriately for that event.
  891. */
  892. int
  893. i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
  894. {
  895. struct drm_device *dev = ring->dev;
  896. struct drm_i915_private *dev_priv = dev->dev_private;
  897. bool interruptible = dev_priv->mm.interruptible;
  898. int ret;
  899. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  900. BUG_ON(seqno == 0);
  901. ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  902. if (ret)
  903. return ret;
  904. ret = i915_gem_check_olr(ring, seqno);
  905. if (ret)
  906. return ret;
  907. return __wait_seqno(ring, seqno,
  908. atomic_read(&dev_priv->gpu_error.reset_counter),
  909. interruptible, NULL);
  910. }
  911. static int
  912. i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
  913. struct intel_ring_buffer *ring)
  914. {
  915. i915_gem_retire_requests_ring(ring);
  916. /* Manually manage the write flush as we may have not yet
  917. * retired the buffer.
  918. *
  919. * Note that the last_write_seqno is always the earlier of
  920. * the two (read/write) seqno, so if we haved successfully waited,
  921. * we know we have passed the last write.
  922. */
  923. obj->last_write_seqno = 0;
  924. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  925. return 0;
  926. }
  927. /**
  928. * Ensures that all rendering to the object has completed and the object is
  929. * safe to unbind from the GTT or access from the CPU.
  930. */
  931. static __must_check int
  932. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  933. bool readonly)
  934. {
  935. struct intel_ring_buffer *ring = obj->ring;
  936. u32 seqno;
  937. int ret;
  938. seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  939. if (seqno == 0)
  940. return 0;
  941. ret = i915_wait_seqno(ring, seqno);
  942. if (ret)
  943. return ret;
  944. return i915_gem_object_wait_rendering__tail(obj, ring);
  945. }
  946. /* A nonblocking variant of the above wait. This is a highly dangerous routine
  947. * as the object state may change during this call.
  948. */
  949. static __must_check int
  950. i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
  951. bool readonly)
  952. {
  953. struct drm_device *dev = obj->base.dev;
  954. struct drm_i915_private *dev_priv = dev->dev_private;
  955. struct intel_ring_buffer *ring = obj->ring;
  956. unsigned reset_counter;
  957. u32 seqno;
  958. int ret;
  959. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  960. BUG_ON(!dev_priv->mm.interruptible);
  961. seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  962. if (seqno == 0)
  963. return 0;
  964. ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
  965. if (ret)
  966. return ret;
  967. ret = i915_gem_check_olr(ring, seqno);
  968. if (ret)
  969. return ret;
  970. reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  971. mutex_unlock(&dev->struct_mutex);
  972. ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
  973. mutex_lock(&dev->struct_mutex);
  974. if (ret)
  975. return ret;
  976. return i915_gem_object_wait_rendering__tail(obj, ring);
  977. }
  978. /**
  979. * Called when user space prepares to use an object with the CPU, either
  980. * through the mmap ioctl's mapping or a GTT mapping.
  981. */
  982. int
  983. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  984. struct drm_file *file)
  985. {
  986. struct drm_i915_gem_set_domain *args = data;
  987. struct drm_i915_gem_object *obj;
  988. uint32_t read_domains = args->read_domains;
  989. uint32_t write_domain = args->write_domain;
  990. int ret;
  991. /* Only handle setting domains to types used by the CPU. */
  992. if (write_domain & I915_GEM_GPU_DOMAINS)
  993. return -EINVAL;
  994. if (read_domains & I915_GEM_GPU_DOMAINS)
  995. return -EINVAL;
  996. /* Having something in the write domain implies it's in the read
  997. * domain, and only that read domain. Enforce that in the request.
  998. */
  999. if (write_domain != 0 && read_domains != write_domain)
  1000. return -EINVAL;
  1001. ret = i915_mutex_lock_interruptible(dev);
  1002. if (ret)
  1003. return ret;
  1004. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1005. if (&obj->base == NULL) {
  1006. ret = -ENOENT;
  1007. goto unlock;
  1008. }
  1009. /* Try to flush the object off the GPU without holding the lock.
  1010. * We will repeat the flush holding the lock in the normal manner
  1011. * to catch cases where we are gazumped.
  1012. */
  1013. ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
  1014. if (ret)
  1015. goto unref;
  1016. if (read_domains & I915_GEM_DOMAIN_GTT) {
  1017. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  1018. /* Silently promote "you're not bound, there was nothing to do"
  1019. * to success, since the client was just asking us to
  1020. * make sure everything was done.
  1021. */
  1022. if (ret == -EINVAL)
  1023. ret = 0;
  1024. } else {
  1025. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  1026. }
  1027. unref:
  1028. drm_gem_object_unreference(&obj->base);
  1029. unlock:
  1030. mutex_unlock(&dev->struct_mutex);
  1031. return ret;
  1032. }
  1033. /**
  1034. * Called when user space has done writes to this buffer
  1035. */
  1036. int
  1037. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1038. struct drm_file *file)
  1039. {
  1040. struct drm_i915_gem_sw_finish *args = data;
  1041. struct drm_i915_gem_object *obj;
  1042. int ret = 0;
  1043. ret = i915_mutex_lock_interruptible(dev);
  1044. if (ret)
  1045. return ret;
  1046. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1047. if (&obj->base == NULL) {
  1048. ret = -ENOENT;
  1049. goto unlock;
  1050. }
  1051. /* Pinned buffers may be scanout, so flush the cache */
  1052. if (obj->pin_count)
  1053. i915_gem_object_flush_cpu_write_domain(obj);
  1054. drm_gem_object_unreference(&obj->base);
  1055. unlock:
  1056. mutex_unlock(&dev->struct_mutex);
  1057. return ret;
  1058. }
  1059. /**
  1060. * Maps the contents of an object, returning the address it is mapped
  1061. * into.
  1062. *
  1063. * While the mapping holds a reference on the contents of the object, it doesn't
  1064. * imply a ref on the object itself.
  1065. */
  1066. int
  1067. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1068. struct drm_file *file)
  1069. {
  1070. struct drm_i915_gem_mmap *args = data;
  1071. struct drm_gem_object *obj;
  1072. unsigned long addr;
  1073. obj = drm_gem_object_lookup(dev, file, args->handle);
  1074. if (obj == NULL)
  1075. return -ENOENT;
  1076. /* prime objects have no backing filp to GEM mmap
  1077. * pages from.
  1078. */
  1079. if (!obj->filp) {
  1080. drm_gem_object_unreference_unlocked(obj);
  1081. return -EINVAL;
  1082. }
  1083. addr = vm_mmap(obj->filp, 0, args->size,
  1084. PROT_READ | PROT_WRITE, MAP_SHARED,
  1085. args->offset);
  1086. drm_gem_object_unreference_unlocked(obj);
  1087. if (IS_ERR((void *)addr))
  1088. return addr;
  1089. args->addr_ptr = (uint64_t) addr;
  1090. return 0;
  1091. }
  1092. /**
  1093. * i915_gem_fault - fault a page into the GTT
  1094. * vma: VMA in question
  1095. * vmf: fault info
  1096. *
  1097. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1098. * from userspace. The fault handler takes care of binding the object to
  1099. * the GTT (if needed), allocating and programming a fence register (again,
  1100. * only if needed based on whether the old reg is still valid or the object
  1101. * is tiled) and inserting a new PTE into the faulting process.
  1102. *
  1103. * Note that the faulting process may involve evicting existing objects
  1104. * from the GTT and/or fence registers to make room. So performance may
  1105. * suffer if the GTT working set is large or there are few fence registers
  1106. * left.
  1107. */
  1108. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1109. {
  1110. struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
  1111. struct drm_device *dev = obj->base.dev;
  1112. drm_i915_private_t *dev_priv = dev->dev_private;
  1113. pgoff_t page_offset;
  1114. unsigned long pfn;
  1115. int ret = 0;
  1116. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1117. /* We don't use vmf->pgoff since that has the fake offset */
  1118. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1119. PAGE_SHIFT;
  1120. ret = i915_mutex_lock_interruptible(dev);
  1121. if (ret)
  1122. goto out;
  1123. trace_i915_gem_object_fault(obj, page_offset, true, write);
  1124. /* Access to snoopable pages through the GTT is incoherent. */
  1125. if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
  1126. ret = -EINVAL;
  1127. goto unlock;
  1128. }
  1129. /* Now bind it into the GTT if needed */
  1130. ret = i915_gem_object_pin(obj, 0, true, false);
  1131. if (ret)
  1132. goto unlock;
  1133. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1134. if (ret)
  1135. goto unpin;
  1136. ret = i915_gem_object_get_fence(obj);
  1137. if (ret)
  1138. goto unpin;
  1139. obj->fault_mappable = true;
  1140. pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
  1141. pfn >>= PAGE_SHIFT;
  1142. pfn += page_offset;
  1143. /* Finally, remap it using the new GTT offset */
  1144. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1145. unpin:
  1146. i915_gem_object_unpin(obj);
  1147. unlock:
  1148. mutex_unlock(&dev->struct_mutex);
  1149. out:
  1150. switch (ret) {
  1151. case -EIO:
  1152. /* If this -EIO is due to a gpu hang, give the reset code a
  1153. * chance to clean up the mess. Otherwise return the proper
  1154. * SIGBUS. */
  1155. if (i915_terminally_wedged(&dev_priv->gpu_error))
  1156. return VM_FAULT_SIGBUS;
  1157. case -EAGAIN:
  1158. /* Give the error handler a chance to run and move the
  1159. * objects off the GPU active list. Next time we service the
  1160. * fault, we should be able to transition the page into the
  1161. * GTT without touching the GPU (and so avoid further
  1162. * EIO/EGAIN). If the GPU is wedged, then there is no issue
  1163. * with coherency, just lost writes.
  1164. */
  1165. set_need_resched();
  1166. case 0:
  1167. case -ERESTARTSYS:
  1168. case -EINTR:
  1169. case -EBUSY:
  1170. /*
  1171. * EBUSY is ok: this just means that another thread
  1172. * already did the job.
  1173. */
  1174. return VM_FAULT_NOPAGE;
  1175. case -ENOMEM:
  1176. return VM_FAULT_OOM;
  1177. case -ENOSPC:
  1178. return VM_FAULT_SIGBUS;
  1179. default:
  1180. WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
  1181. return VM_FAULT_SIGBUS;
  1182. }
  1183. }
  1184. /**
  1185. * i915_gem_release_mmap - remove physical page mappings
  1186. * @obj: obj in question
  1187. *
  1188. * Preserve the reservation of the mmapping with the DRM core code, but
  1189. * relinquish ownership of the pages back to the system.
  1190. *
  1191. * It is vital that we remove the page mapping if we have mapped a tiled
  1192. * object through the GTT and then lose the fence register due to
  1193. * resource pressure. Similarly if the object has been moved out of the
  1194. * aperture, than pages mapped into userspace must be revoked. Removing the
  1195. * mapping will then trigger a page fault on the next user access, allowing
  1196. * fixup by i915_gem_fault().
  1197. */
  1198. void
  1199. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1200. {
  1201. if (!obj->fault_mappable)
  1202. return;
  1203. if (obj->base.dev->dev_mapping)
  1204. unmap_mapping_range(obj->base.dev->dev_mapping,
  1205. (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
  1206. obj->base.size, 1);
  1207. obj->fault_mappable = false;
  1208. }
  1209. uint32_t
  1210. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  1211. {
  1212. uint32_t gtt_size;
  1213. if (INTEL_INFO(dev)->gen >= 4 ||
  1214. tiling_mode == I915_TILING_NONE)
  1215. return size;
  1216. /* Previous chips need a power-of-two fence region when tiling */
  1217. if (INTEL_INFO(dev)->gen == 3)
  1218. gtt_size = 1024*1024;
  1219. else
  1220. gtt_size = 512*1024;
  1221. while (gtt_size < size)
  1222. gtt_size <<= 1;
  1223. return gtt_size;
  1224. }
  1225. /**
  1226. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1227. * @obj: object to check
  1228. *
  1229. * Return the required GTT alignment for an object, taking into account
  1230. * potential fence register mapping.
  1231. */
  1232. uint32_t
  1233. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1234. int tiling_mode, bool fenced)
  1235. {
  1236. /*
  1237. * Minimum alignment is 4k (GTT page size), but might be greater
  1238. * if a fence register is needed for the object.
  1239. */
  1240. if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
  1241. tiling_mode == I915_TILING_NONE)
  1242. return 4096;
  1243. /*
  1244. * Previous chips need to be aligned to the size of the smallest
  1245. * fence register that can contain the object.
  1246. */
  1247. return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1248. }
  1249. static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
  1250. {
  1251. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1252. int ret;
  1253. if (obj->base.map_list.map)
  1254. return 0;
  1255. dev_priv->mm.shrinker_no_lock_stealing = true;
  1256. ret = drm_gem_create_mmap_offset(&obj->base);
  1257. if (ret != -ENOSPC)
  1258. goto out;
  1259. /* Badly fragmented mmap space? The only way we can recover
  1260. * space is by destroying unwanted objects. We can't randomly release
  1261. * mmap_offsets as userspace expects them to be persistent for the
  1262. * lifetime of the objects. The closest we can is to release the
  1263. * offsets on purgeable objects by truncating it and marking it purged,
  1264. * which prevents userspace from ever using that object again.
  1265. */
  1266. i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
  1267. ret = drm_gem_create_mmap_offset(&obj->base);
  1268. if (ret != -ENOSPC)
  1269. goto out;
  1270. i915_gem_shrink_all(dev_priv);
  1271. ret = drm_gem_create_mmap_offset(&obj->base);
  1272. out:
  1273. dev_priv->mm.shrinker_no_lock_stealing = false;
  1274. return ret;
  1275. }
  1276. static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
  1277. {
  1278. if (!obj->base.map_list.map)
  1279. return;
  1280. drm_gem_free_mmap_offset(&obj->base);
  1281. }
  1282. int
  1283. i915_gem_mmap_gtt(struct drm_file *file,
  1284. struct drm_device *dev,
  1285. uint32_t handle,
  1286. uint64_t *offset)
  1287. {
  1288. struct drm_i915_private *dev_priv = dev->dev_private;
  1289. struct drm_i915_gem_object *obj;
  1290. int ret;
  1291. ret = i915_mutex_lock_interruptible(dev);
  1292. if (ret)
  1293. return ret;
  1294. obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  1295. if (&obj->base == NULL) {
  1296. ret = -ENOENT;
  1297. goto unlock;
  1298. }
  1299. if (obj->base.size > dev_priv->gtt.mappable_end) {
  1300. ret = -E2BIG;
  1301. goto out;
  1302. }
  1303. if (obj->madv != I915_MADV_WILLNEED) {
  1304. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1305. ret = -EINVAL;
  1306. goto out;
  1307. }
  1308. ret = i915_gem_object_create_mmap_offset(obj);
  1309. if (ret)
  1310. goto out;
  1311. *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
  1312. out:
  1313. drm_gem_object_unreference(&obj->base);
  1314. unlock:
  1315. mutex_unlock(&dev->struct_mutex);
  1316. return ret;
  1317. }
  1318. /**
  1319. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1320. * @dev: DRM device
  1321. * @data: GTT mapping ioctl data
  1322. * @file: GEM object info
  1323. *
  1324. * Simply returns the fake offset to userspace so it can mmap it.
  1325. * The mmap call will end up in drm_gem_mmap(), which will set things
  1326. * up so we can get faults in the handler above.
  1327. *
  1328. * The fault handler will take care of binding the object into the GTT
  1329. * (since it may have been evicted to make room for something), allocating
  1330. * a fence register, and mapping the appropriate aperture address into
  1331. * userspace.
  1332. */
  1333. int
  1334. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1335. struct drm_file *file)
  1336. {
  1337. struct drm_i915_gem_mmap_gtt *args = data;
  1338. return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1339. }
  1340. /* Immediately discard the backing storage */
  1341. static void
  1342. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1343. {
  1344. struct inode *inode;
  1345. i915_gem_object_free_mmap_offset(obj);
  1346. if (obj->base.filp == NULL)
  1347. return;
  1348. /* Our goal here is to return as much of the memory as
  1349. * is possible back to the system as we are called from OOM.
  1350. * To do this we must instruct the shmfs to drop all of its
  1351. * backing pages, *now*.
  1352. */
  1353. inode = file_inode(obj->base.filp);
  1354. shmem_truncate_range(inode, 0, (loff_t)-1);
  1355. obj->madv = __I915_MADV_PURGED;
  1356. }
  1357. static inline int
  1358. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
  1359. {
  1360. return obj->madv == I915_MADV_DONTNEED;
  1361. }
  1362. static void
  1363. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1364. {
  1365. struct sg_page_iter sg_iter;
  1366. int ret;
  1367. BUG_ON(obj->madv == __I915_MADV_PURGED);
  1368. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  1369. if (ret) {
  1370. /* In the event of a disaster, abandon all caches and
  1371. * hope for the best.
  1372. */
  1373. WARN_ON(ret != -EIO);
  1374. i915_gem_clflush_object(obj);
  1375. obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1376. }
  1377. if (i915_gem_object_needs_bit17_swizzle(obj))
  1378. i915_gem_object_save_bit_17_swizzle(obj);
  1379. if (obj->madv == I915_MADV_DONTNEED)
  1380. obj->dirty = 0;
  1381. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
  1382. struct page *page = sg_page_iter_page(&sg_iter);
  1383. if (obj->dirty)
  1384. set_page_dirty(page);
  1385. if (obj->madv == I915_MADV_WILLNEED)
  1386. mark_page_accessed(page);
  1387. page_cache_release(page);
  1388. }
  1389. obj->dirty = 0;
  1390. sg_free_table(obj->pages);
  1391. kfree(obj->pages);
  1392. }
  1393. int
  1394. i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
  1395. {
  1396. const struct drm_i915_gem_object_ops *ops = obj->ops;
  1397. if (obj->pages == NULL)
  1398. return 0;
  1399. BUG_ON(i915_gem_obj_ggtt_bound(obj));
  1400. if (obj->pages_pin_count)
  1401. return -EBUSY;
  1402. /* ->put_pages might need to allocate memory for the bit17 swizzle
  1403. * array, hence protect them from being reaped by removing them from gtt
  1404. * lists early. */
  1405. list_del(&obj->global_list);
  1406. ops->put_pages(obj);
  1407. obj->pages = NULL;
  1408. if (i915_gem_object_is_purgeable(obj))
  1409. i915_gem_object_truncate(obj);
  1410. return 0;
  1411. }
  1412. static long
  1413. __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
  1414. bool purgeable_only)
  1415. {
  1416. struct drm_i915_gem_object *obj, *next;
  1417. struct i915_address_space *vm = &dev_priv->gtt.base;
  1418. long count = 0;
  1419. list_for_each_entry_safe(obj, next,
  1420. &dev_priv->mm.unbound_list,
  1421. global_list) {
  1422. if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
  1423. i915_gem_object_put_pages(obj) == 0) {
  1424. count += obj->base.size >> PAGE_SHIFT;
  1425. if (count >= target)
  1426. return count;
  1427. }
  1428. }
  1429. list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
  1430. if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
  1431. i915_gem_object_unbind(obj) == 0 &&
  1432. i915_gem_object_put_pages(obj) == 0) {
  1433. count += obj->base.size >> PAGE_SHIFT;
  1434. if (count >= target)
  1435. return count;
  1436. }
  1437. }
  1438. return count;
  1439. }
  1440. static long
  1441. i915_gem_purge(struct drm_i915_private *dev_priv, long target)
  1442. {
  1443. return __i915_gem_shrink(dev_priv, target, true);
  1444. }
  1445. static void
  1446. i915_gem_shrink_all(struct drm_i915_private *dev_priv)
  1447. {
  1448. struct drm_i915_gem_object *obj, *next;
  1449. i915_gem_evict_everything(dev_priv->dev);
  1450. list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
  1451. global_list)
  1452. i915_gem_object_put_pages(obj);
  1453. }
  1454. static int
  1455. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  1456. {
  1457. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1458. int page_count, i;
  1459. struct address_space *mapping;
  1460. struct sg_table *st;
  1461. struct scatterlist *sg;
  1462. struct sg_page_iter sg_iter;
  1463. struct page *page;
  1464. unsigned long last_pfn = 0; /* suppress gcc warning */
  1465. gfp_t gfp;
  1466. /* Assert that the object is not currently in any GPU domain. As it
  1467. * wasn't in the GTT, there shouldn't be any way it could have been in
  1468. * a GPU cache
  1469. */
  1470. BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  1471. BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  1472. st = kmalloc(sizeof(*st), GFP_KERNEL);
  1473. if (st == NULL)
  1474. return -ENOMEM;
  1475. page_count = obj->base.size / PAGE_SIZE;
  1476. if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  1477. sg_free_table(st);
  1478. kfree(st);
  1479. return -ENOMEM;
  1480. }
  1481. /* Get the list of pages out of our struct file. They'll be pinned
  1482. * at this point until we release them.
  1483. *
  1484. * Fail silently without starting the shrinker
  1485. */
  1486. mapping = file_inode(obj->base.filp)->i_mapping;
  1487. gfp = mapping_gfp_mask(mapping);
  1488. gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
  1489. gfp &= ~(__GFP_IO | __GFP_WAIT);
  1490. sg = st->sgl;
  1491. st->nents = 0;
  1492. for (i = 0; i < page_count; i++) {
  1493. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1494. if (IS_ERR(page)) {
  1495. i915_gem_purge(dev_priv, page_count);
  1496. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1497. }
  1498. if (IS_ERR(page)) {
  1499. /* We've tried hard to allocate the memory by reaping
  1500. * our own buffer, now let the real VM do its job and
  1501. * go down in flames if truly OOM.
  1502. */
  1503. gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
  1504. gfp |= __GFP_IO | __GFP_WAIT;
  1505. i915_gem_shrink_all(dev_priv);
  1506. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1507. if (IS_ERR(page))
  1508. goto err_pages;
  1509. gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
  1510. gfp &= ~(__GFP_IO | __GFP_WAIT);
  1511. }
  1512. #ifdef CONFIG_SWIOTLB
  1513. if (swiotlb_nr_tbl()) {
  1514. st->nents++;
  1515. sg_set_page(sg, page, PAGE_SIZE, 0);
  1516. sg = sg_next(sg);
  1517. continue;
  1518. }
  1519. #endif
  1520. if (!i || page_to_pfn(page) != last_pfn + 1) {
  1521. if (i)
  1522. sg = sg_next(sg);
  1523. st->nents++;
  1524. sg_set_page(sg, page, PAGE_SIZE, 0);
  1525. } else {
  1526. sg->length += PAGE_SIZE;
  1527. }
  1528. last_pfn = page_to_pfn(page);
  1529. }
  1530. #ifdef CONFIG_SWIOTLB
  1531. if (!swiotlb_nr_tbl())
  1532. #endif
  1533. sg_mark_end(sg);
  1534. obj->pages = st;
  1535. if (i915_gem_object_needs_bit17_swizzle(obj))
  1536. i915_gem_object_do_bit_17_swizzle(obj);
  1537. return 0;
  1538. err_pages:
  1539. sg_mark_end(sg);
  1540. for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
  1541. page_cache_release(sg_page_iter_page(&sg_iter));
  1542. sg_free_table(st);
  1543. kfree(st);
  1544. return PTR_ERR(page);
  1545. }
  1546. /* Ensure that the associated pages are gathered from the backing storage
  1547. * and pinned into our object. i915_gem_object_get_pages() may be called
  1548. * multiple times before they are released by a single call to
  1549. * i915_gem_object_put_pages() - once the pages are no longer referenced
  1550. * either as a result of memory pressure (reaping pages under the shrinker)
  1551. * or as the object is itself released.
  1552. */
  1553. int
  1554. i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  1555. {
  1556. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1557. const struct drm_i915_gem_object_ops *ops = obj->ops;
  1558. int ret;
  1559. if (obj->pages)
  1560. return 0;
  1561. if (obj->madv != I915_MADV_WILLNEED) {
  1562. DRM_ERROR("Attempting to obtain a purgeable object\n");
  1563. return -EINVAL;
  1564. }
  1565. BUG_ON(obj->pages_pin_count);
  1566. ret = ops->get_pages(obj);
  1567. if (ret)
  1568. return ret;
  1569. list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  1570. return 0;
  1571. }
  1572. void
  1573. i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1574. struct intel_ring_buffer *ring)
  1575. {
  1576. struct drm_device *dev = obj->base.dev;
  1577. struct drm_i915_private *dev_priv = dev->dev_private;
  1578. struct i915_address_space *vm = &dev_priv->gtt.base;
  1579. u32 seqno = intel_ring_get_seqno(ring);
  1580. BUG_ON(ring == NULL);
  1581. obj->ring = ring;
  1582. /* Add a reference if we're newly entering the active list. */
  1583. if (!obj->active) {
  1584. drm_gem_object_reference(&obj->base);
  1585. obj->active = 1;
  1586. }
  1587. /* Move from whatever list we were on to the tail of execution. */
  1588. list_move_tail(&obj->mm_list, &vm->active_list);
  1589. list_move_tail(&obj->ring_list, &ring->active_list);
  1590. obj->last_read_seqno = seqno;
  1591. if (obj->fenced_gpu_access) {
  1592. obj->last_fenced_seqno = seqno;
  1593. /* Bump MRU to take account of the delayed flush */
  1594. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1595. struct drm_i915_fence_reg *reg;
  1596. reg = &dev_priv->fence_regs[obj->fence_reg];
  1597. list_move_tail(&reg->lru_list,
  1598. &dev_priv->mm.fence_list);
  1599. }
  1600. }
  1601. }
  1602. static void
  1603. i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  1604. {
  1605. struct drm_device *dev = obj->base.dev;
  1606. struct drm_i915_private *dev_priv = dev->dev_private;
  1607. struct i915_address_space *vm = &dev_priv->gtt.base;
  1608. BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
  1609. BUG_ON(!obj->active);
  1610. list_move_tail(&obj->mm_list, &vm->inactive_list);
  1611. list_del_init(&obj->ring_list);
  1612. obj->ring = NULL;
  1613. obj->last_read_seqno = 0;
  1614. obj->last_write_seqno = 0;
  1615. obj->base.write_domain = 0;
  1616. obj->last_fenced_seqno = 0;
  1617. obj->fenced_gpu_access = false;
  1618. obj->active = 0;
  1619. drm_gem_object_unreference(&obj->base);
  1620. WARN_ON(i915_verify_lists(dev));
  1621. }
  1622. static int
  1623. i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  1624. {
  1625. struct drm_i915_private *dev_priv = dev->dev_private;
  1626. struct intel_ring_buffer *ring;
  1627. int ret, i, j;
  1628. /* Carefully retire all requests without writing to the rings */
  1629. for_each_ring(ring, dev_priv, i) {
  1630. ret = intel_ring_idle(ring);
  1631. if (ret)
  1632. return ret;
  1633. }
  1634. i915_gem_retire_requests(dev);
  1635. /* Finally reset hw state */
  1636. for_each_ring(ring, dev_priv, i) {
  1637. intel_ring_init_seqno(ring, seqno);
  1638. for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
  1639. ring->sync_seqno[j] = 0;
  1640. }
  1641. return 0;
  1642. }
  1643. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  1644. {
  1645. struct drm_i915_private *dev_priv = dev->dev_private;
  1646. int ret;
  1647. if (seqno == 0)
  1648. return -EINVAL;
  1649. /* HWS page needs to be set less than what we
  1650. * will inject to ring
  1651. */
  1652. ret = i915_gem_init_seqno(dev, seqno - 1);
  1653. if (ret)
  1654. return ret;
  1655. /* Carefully set the last_seqno value so that wrap
  1656. * detection still works
  1657. */
  1658. dev_priv->next_seqno = seqno;
  1659. dev_priv->last_seqno = seqno - 1;
  1660. if (dev_priv->last_seqno == 0)
  1661. dev_priv->last_seqno--;
  1662. return 0;
  1663. }
  1664. int
  1665. i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  1666. {
  1667. struct drm_i915_private *dev_priv = dev->dev_private;
  1668. /* reserve 0 for non-seqno */
  1669. if (dev_priv->next_seqno == 0) {
  1670. int ret = i915_gem_init_seqno(dev, 0);
  1671. if (ret)
  1672. return ret;
  1673. dev_priv->next_seqno = 1;
  1674. }
  1675. *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
  1676. return 0;
  1677. }
  1678. int __i915_add_request(struct intel_ring_buffer *ring,
  1679. struct drm_file *file,
  1680. struct drm_i915_gem_object *obj,
  1681. u32 *out_seqno)
  1682. {
  1683. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1684. struct drm_i915_gem_request *request;
  1685. u32 request_ring_position, request_start;
  1686. int was_empty;
  1687. int ret;
  1688. request_start = intel_ring_get_tail(ring);
  1689. /*
  1690. * Emit any outstanding flushes - execbuf can fail to emit the flush
  1691. * after having emitted the batchbuffer command. Hence we need to fix
  1692. * things up similar to emitting the lazy request. The difference here
  1693. * is that the flush _must_ happen before the next request, no matter
  1694. * what.
  1695. */
  1696. ret = intel_ring_flush_all_caches(ring);
  1697. if (ret)
  1698. return ret;
  1699. request = kmalloc(sizeof(*request), GFP_KERNEL);
  1700. if (request == NULL)
  1701. return -ENOMEM;
  1702. /* Record the position of the start of the request so that
  1703. * should we detect the updated seqno part-way through the
  1704. * GPU processing the request, we never over-estimate the
  1705. * position of the head.
  1706. */
  1707. request_ring_position = intel_ring_get_tail(ring);
  1708. ret = ring->add_request(ring);
  1709. if (ret) {
  1710. kfree(request);
  1711. return ret;
  1712. }
  1713. request->seqno = intel_ring_get_seqno(ring);
  1714. request->ring = ring;
  1715. request->head = request_start;
  1716. request->tail = request_ring_position;
  1717. request->ctx = ring->last_context;
  1718. request->batch_obj = obj;
  1719. /* Whilst this request exists, batch_obj will be on the
  1720. * active_list, and so will hold the active reference. Only when this
  1721. * request is retired will the the batch_obj be moved onto the
  1722. * inactive_list and lose its active reference. Hence we do not need
  1723. * to explicitly hold another reference here.
  1724. */
  1725. if (request->ctx)
  1726. i915_gem_context_reference(request->ctx);
  1727. request->emitted_jiffies = jiffies;
  1728. was_empty = list_empty(&ring->request_list);
  1729. list_add_tail(&request->list, &ring->request_list);
  1730. request->file_priv = NULL;
  1731. if (file) {
  1732. struct drm_i915_file_private *file_priv = file->driver_priv;
  1733. spin_lock(&file_priv->mm.lock);
  1734. request->file_priv = file_priv;
  1735. list_add_tail(&request->client_list,
  1736. &file_priv->mm.request_list);
  1737. spin_unlock(&file_priv->mm.lock);
  1738. }
  1739. trace_i915_gem_request_add(ring, request->seqno);
  1740. ring->outstanding_lazy_request = 0;
  1741. if (!dev_priv->ums.mm_suspended) {
  1742. i915_queue_hangcheck(ring->dev);
  1743. if (was_empty) {
  1744. queue_delayed_work(dev_priv->wq,
  1745. &dev_priv->mm.retire_work,
  1746. round_jiffies_up_relative(HZ));
  1747. intel_mark_busy(dev_priv->dev);
  1748. }
  1749. }
  1750. if (out_seqno)
  1751. *out_seqno = request->seqno;
  1752. return 0;
  1753. }
  1754. static inline void
  1755. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1756. {
  1757. struct drm_i915_file_private *file_priv = request->file_priv;
  1758. if (!file_priv)
  1759. return;
  1760. spin_lock(&file_priv->mm.lock);
  1761. if (request->file_priv) {
  1762. list_del(&request->client_list);
  1763. request->file_priv = NULL;
  1764. }
  1765. spin_unlock(&file_priv->mm.lock);
  1766. }
  1767. static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
  1768. {
  1769. if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
  1770. acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
  1771. return true;
  1772. return false;
  1773. }
  1774. static bool i915_head_inside_request(const u32 acthd_unmasked,
  1775. const u32 request_start,
  1776. const u32 request_end)
  1777. {
  1778. const u32 acthd = acthd_unmasked & HEAD_ADDR;
  1779. if (request_start < request_end) {
  1780. if (acthd >= request_start && acthd < request_end)
  1781. return true;
  1782. } else if (request_start > request_end) {
  1783. if (acthd >= request_start || acthd < request_end)
  1784. return true;
  1785. }
  1786. return false;
  1787. }
  1788. static bool i915_request_guilty(struct drm_i915_gem_request *request,
  1789. const u32 acthd, bool *inside)
  1790. {
  1791. /* There is a possibility that unmasked head address
  1792. * pointing inside the ring, matches the batch_obj address range.
  1793. * However this is extremely unlikely.
  1794. */
  1795. if (request->batch_obj) {
  1796. if (i915_head_inside_object(acthd, request->batch_obj)) {
  1797. *inside = true;
  1798. return true;
  1799. }
  1800. }
  1801. if (i915_head_inside_request(acthd, request->head, request->tail)) {
  1802. *inside = false;
  1803. return true;
  1804. }
  1805. return false;
  1806. }
  1807. static void i915_set_reset_status(struct intel_ring_buffer *ring,
  1808. struct drm_i915_gem_request *request,
  1809. u32 acthd)
  1810. {
  1811. struct i915_ctx_hang_stats *hs = NULL;
  1812. bool inside, guilty;
  1813. /* Innocent until proven guilty */
  1814. guilty = false;
  1815. if (ring->hangcheck.action != wait &&
  1816. i915_request_guilty(request, acthd, &inside)) {
  1817. DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
  1818. ring->name,
  1819. inside ? "inside" : "flushing",
  1820. request->batch_obj ?
  1821. i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
  1822. request->ctx ? request->ctx->id : 0,
  1823. acthd);
  1824. guilty = true;
  1825. }
  1826. /* If contexts are disabled or this is the default context, use
  1827. * file_priv->reset_state
  1828. */
  1829. if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
  1830. hs = &request->ctx->hang_stats;
  1831. else if (request->file_priv)
  1832. hs = &request->file_priv->hang_stats;
  1833. if (hs) {
  1834. if (guilty)
  1835. hs->batch_active++;
  1836. else
  1837. hs->batch_pending++;
  1838. }
  1839. }
  1840. static void i915_gem_free_request(struct drm_i915_gem_request *request)
  1841. {
  1842. list_del(&request->list);
  1843. i915_gem_request_remove_from_client(request);
  1844. if (request->ctx)
  1845. i915_gem_context_unreference(request->ctx);
  1846. kfree(request);
  1847. }
  1848. static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
  1849. struct intel_ring_buffer *ring)
  1850. {
  1851. u32 completed_seqno;
  1852. u32 acthd;
  1853. acthd = intel_ring_get_active_head(ring);
  1854. completed_seqno = ring->get_seqno(ring, false);
  1855. while (!list_empty(&ring->request_list)) {
  1856. struct drm_i915_gem_request *request;
  1857. request = list_first_entry(&ring->request_list,
  1858. struct drm_i915_gem_request,
  1859. list);
  1860. if (request->seqno > completed_seqno)
  1861. i915_set_reset_status(ring, request, acthd);
  1862. i915_gem_free_request(request);
  1863. }
  1864. while (!list_empty(&ring->active_list)) {
  1865. struct drm_i915_gem_object *obj;
  1866. obj = list_first_entry(&ring->active_list,
  1867. struct drm_i915_gem_object,
  1868. ring_list);
  1869. i915_gem_object_move_to_inactive(obj);
  1870. }
  1871. }
  1872. static void i915_gem_reset_fences(struct drm_device *dev)
  1873. {
  1874. struct drm_i915_private *dev_priv = dev->dev_private;
  1875. int i;
  1876. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  1877. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  1878. if (reg->obj)
  1879. i915_gem_object_fence_lost(reg->obj);
  1880. i915_gem_write_fence(dev, i, NULL);
  1881. reg->pin_count = 0;
  1882. reg->obj = NULL;
  1883. INIT_LIST_HEAD(&reg->lru_list);
  1884. }
  1885. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  1886. }
  1887. void i915_gem_reset(struct drm_device *dev)
  1888. {
  1889. struct drm_i915_private *dev_priv = dev->dev_private;
  1890. struct i915_address_space *vm = &dev_priv->gtt.base;
  1891. struct drm_i915_gem_object *obj;
  1892. struct intel_ring_buffer *ring;
  1893. int i;
  1894. for_each_ring(ring, dev_priv, i)
  1895. i915_gem_reset_ring_lists(dev_priv, ring);
  1896. /* Move everything out of the GPU domains to ensure we do any
  1897. * necessary invalidation upon reuse.
  1898. */
  1899. list_for_each_entry(obj, &vm->inactive_list, mm_list)
  1900. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  1901. /* The fence registers are invalidated so clear them out */
  1902. i915_gem_reset_fences(dev);
  1903. }
  1904. /**
  1905. * This function clears the request list as sequence numbers are passed.
  1906. */
  1907. void
  1908. i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
  1909. {
  1910. uint32_t seqno;
  1911. if (list_empty(&ring->request_list))
  1912. return;
  1913. WARN_ON(i915_verify_lists(ring->dev));
  1914. seqno = ring->get_seqno(ring, true);
  1915. while (!list_empty(&ring->request_list)) {
  1916. struct drm_i915_gem_request *request;
  1917. request = list_first_entry(&ring->request_list,
  1918. struct drm_i915_gem_request,
  1919. list);
  1920. if (!i915_seqno_passed(seqno, request->seqno))
  1921. break;
  1922. trace_i915_gem_request_retire(ring, request->seqno);
  1923. /* We know the GPU must have read the request to have
  1924. * sent us the seqno + interrupt, so use the position
  1925. * of tail of the request to update the last known position
  1926. * of the GPU head.
  1927. */
  1928. ring->last_retired_head = request->tail;
  1929. i915_gem_free_request(request);
  1930. }
  1931. /* Move any buffers on the active list that are no longer referenced
  1932. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1933. */
  1934. while (!list_empty(&ring->active_list)) {
  1935. struct drm_i915_gem_object *obj;
  1936. obj = list_first_entry(&ring->active_list,
  1937. struct drm_i915_gem_object,
  1938. ring_list);
  1939. if (!i915_seqno_passed(seqno, obj->last_read_seqno))
  1940. break;
  1941. i915_gem_object_move_to_inactive(obj);
  1942. }
  1943. if (unlikely(ring->trace_irq_seqno &&
  1944. i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
  1945. ring->irq_put(ring);
  1946. ring->trace_irq_seqno = 0;
  1947. }
  1948. WARN_ON(i915_verify_lists(ring->dev));
  1949. }
  1950. void
  1951. i915_gem_retire_requests(struct drm_device *dev)
  1952. {
  1953. drm_i915_private_t *dev_priv = dev->dev_private;
  1954. struct intel_ring_buffer *ring;
  1955. int i;
  1956. for_each_ring(ring, dev_priv, i)
  1957. i915_gem_retire_requests_ring(ring);
  1958. }
  1959. static void
  1960. i915_gem_retire_work_handler(struct work_struct *work)
  1961. {
  1962. drm_i915_private_t *dev_priv;
  1963. struct drm_device *dev;
  1964. struct intel_ring_buffer *ring;
  1965. bool idle;
  1966. int i;
  1967. dev_priv = container_of(work, drm_i915_private_t,
  1968. mm.retire_work.work);
  1969. dev = dev_priv->dev;
  1970. /* Come back later if the device is busy... */
  1971. if (!mutex_trylock(&dev->struct_mutex)) {
  1972. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  1973. round_jiffies_up_relative(HZ));
  1974. return;
  1975. }
  1976. i915_gem_retire_requests(dev);
  1977. /* Send a periodic flush down the ring so we don't hold onto GEM
  1978. * objects indefinitely.
  1979. */
  1980. idle = true;
  1981. for_each_ring(ring, dev_priv, i) {
  1982. if (ring->gpu_caches_dirty)
  1983. i915_add_request(ring, NULL);
  1984. idle &= list_empty(&ring->request_list);
  1985. }
  1986. if (!dev_priv->ums.mm_suspended && !idle)
  1987. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  1988. round_jiffies_up_relative(HZ));
  1989. if (idle)
  1990. intel_mark_idle(dev);
  1991. mutex_unlock(&dev->struct_mutex);
  1992. }
  1993. /**
  1994. * Ensures that an object will eventually get non-busy by flushing any required
  1995. * write domains, emitting any outstanding lazy request and retiring and
  1996. * completed requests.
  1997. */
  1998. static int
  1999. i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  2000. {
  2001. int ret;
  2002. if (obj->active) {
  2003. ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
  2004. if (ret)
  2005. return ret;
  2006. i915_gem_retire_requests_ring(obj->ring);
  2007. }
  2008. return 0;
  2009. }
  2010. /**
  2011. * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  2012. * @DRM_IOCTL_ARGS: standard ioctl arguments
  2013. *
  2014. * Returns 0 if successful, else an error is returned with the remaining time in
  2015. * the timeout parameter.
  2016. * -ETIME: object is still busy after timeout
  2017. * -ERESTARTSYS: signal interrupted the wait
  2018. * -ENONENT: object doesn't exist
  2019. * Also possible, but rare:
  2020. * -EAGAIN: GPU wedged
  2021. * -ENOMEM: damn
  2022. * -ENODEV: Internal IRQ fail
  2023. * -E?: The add request failed
  2024. *
  2025. * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  2026. * non-zero timeout parameter the wait ioctl will wait for the given number of
  2027. * nanoseconds on an object becoming unbusy. Since the wait itself does so
  2028. * without holding struct_mutex the object may become re-busied before this
  2029. * function completes. A similar but shorter * race condition exists in the busy
  2030. * ioctl
  2031. */
  2032. int
  2033. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  2034. {
  2035. drm_i915_private_t *dev_priv = dev->dev_private;
  2036. struct drm_i915_gem_wait *args = data;
  2037. struct drm_i915_gem_object *obj;
  2038. struct intel_ring_buffer *ring = NULL;
  2039. struct timespec timeout_stack, *timeout = NULL;
  2040. unsigned reset_counter;
  2041. u32 seqno = 0;
  2042. int ret = 0;
  2043. if (args->timeout_ns >= 0) {
  2044. timeout_stack = ns_to_timespec(args->timeout_ns);
  2045. timeout = &timeout_stack;
  2046. }
  2047. ret = i915_mutex_lock_interruptible(dev);
  2048. if (ret)
  2049. return ret;
  2050. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
  2051. if (&obj->base == NULL) {
  2052. mutex_unlock(&dev->struct_mutex);
  2053. return -ENOENT;
  2054. }
  2055. /* Need to make sure the object gets inactive eventually. */
  2056. ret = i915_gem_object_flush_active(obj);
  2057. if (ret)
  2058. goto out;
  2059. if (obj->active) {
  2060. seqno = obj->last_read_seqno;
  2061. ring = obj->ring;
  2062. }
  2063. if (seqno == 0)
  2064. goto out;
  2065. /* Do this after OLR check to make sure we make forward progress polling
  2066. * on this IOCTL with a 0 timeout (like busy ioctl)
  2067. */
  2068. if (!args->timeout_ns) {
  2069. ret = -ETIME;
  2070. goto out;
  2071. }
  2072. drm_gem_object_unreference(&obj->base);
  2073. reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  2074. mutex_unlock(&dev->struct_mutex);
  2075. ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
  2076. if (timeout)
  2077. args->timeout_ns = timespec_to_ns(timeout);
  2078. return ret;
  2079. out:
  2080. drm_gem_object_unreference(&obj->base);
  2081. mutex_unlock(&dev->struct_mutex);
  2082. return ret;
  2083. }
  2084. /**
  2085. * i915_gem_object_sync - sync an object to a ring.
  2086. *
  2087. * @obj: object which may be in use on another ring.
  2088. * @to: ring we wish to use the object on. May be NULL.
  2089. *
  2090. * This code is meant to abstract object synchronization with the GPU.
  2091. * Calling with NULL implies synchronizing the object with the CPU
  2092. * rather than a particular GPU ring.
  2093. *
  2094. * Returns 0 if successful, else propagates up the lower layer error.
  2095. */
  2096. int
  2097. i915_gem_object_sync(struct drm_i915_gem_object *obj,
  2098. struct intel_ring_buffer *to)
  2099. {
  2100. struct intel_ring_buffer *from = obj->ring;
  2101. u32 seqno;
  2102. int ret, idx;
  2103. if (from == NULL || to == from)
  2104. return 0;
  2105. if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
  2106. return i915_gem_object_wait_rendering(obj, false);
  2107. idx = intel_ring_sync_index(from, to);
  2108. seqno = obj->last_read_seqno;
  2109. if (seqno <= from->sync_seqno[idx])
  2110. return 0;
  2111. ret = i915_gem_check_olr(obj->ring, seqno);
  2112. if (ret)
  2113. return ret;
  2114. ret = to->sync_to(to, from, seqno);
  2115. if (!ret)
  2116. /* We use last_read_seqno because sync_to()
  2117. * might have just caused seqno wrap under
  2118. * the radar.
  2119. */
  2120. from->sync_seqno[idx] = obj->last_read_seqno;
  2121. return ret;
  2122. }
  2123. static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  2124. {
  2125. u32 old_write_domain, old_read_domains;
  2126. /* Force a pagefault for domain tracking on next user access */
  2127. i915_gem_release_mmap(obj);
  2128. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2129. return;
  2130. /* Wait for any direct GTT access to complete */
  2131. mb();
  2132. old_read_domains = obj->base.read_domains;
  2133. old_write_domain = obj->base.write_domain;
  2134. obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
  2135. obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
  2136. trace_i915_gem_object_change_domain(obj,
  2137. old_read_domains,
  2138. old_write_domain);
  2139. }
  2140. /**
  2141. * Unbinds an object from the GTT aperture.
  2142. */
  2143. int
  2144. i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  2145. {
  2146. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  2147. struct i915_vma *vma;
  2148. int ret;
  2149. if (!i915_gem_obj_ggtt_bound(obj))
  2150. return 0;
  2151. if (obj->pin_count)
  2152. return -EBUSY;
  2153. BUG_ON(obj->pages == NULL);
  2154. ret = i915_gem_object_finish_gpu(obj);
  2155. if (ret)
  2156. return ret;
  2157. /* Continue on if we fail due to EIO, the GPU is hung so we
  2158. * should be safe and we need to cleanup or else we might
  2159. * cause memory corruption through use-after-free.
  2160. */
  2161. i915_gem_object_finish_gtt(obj);
  2162. /* release the fence reg _after_ flushing */
  2163. ret = i915_gem_object_put_fence(obj);
  2164. if (ret)
  2165. return ret;
  2166. trace_i915_gem_object_unbind(obj);
  2167. if (obj->has_global_gtt_mapping)
  2168. i915_gem_gtt_unbind_object(obj);
  2169. if (obj->has_aliasing_ppgtt_mapping) {
  2170. i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
  2171. obj->has_aliasing_ppgtt_mapping = 0;
  2172. }
  2173. i915_gem_gtt_finish_object(obj);
  2174. i915_gem_object_unpin_pages(obj);
  2175. list_del(&obj->mm_list);
  2176. /* Avoid an unnecessary call to unbind on rebind. */
  2177. obj->map_and_fenceable = true;
  2178. vma = __i915_gem_obj_to_vma(obj);
  2179. list_del(&vma->vma_link);
  2180. drm_mm_remove_node(&vma->node);
  2181. i915_gem_vma_destroy(vma);
  2182. /* Since the unbound list is global, only move to that list if
  2183. * no more VMAs exist.
  2184. * NB: Until we have real VMAs there will only ever be one */
  2185. WARN_ON(!list_empty(&obj->vma_list));
  2186. if (list_empty(&obj->vma_list))
  2187. list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  2188. return 0;
  2189. }
  2190. int i915_gpu_idle(struct drm_device *dev)
  2191. {
  2192. drm_i915_private_t *dev_priv = dev->dev_private;
  2193. struct intel_ring_buffer *ring;
  2194. int ret, i;
  2195. /* Flush everything onto the inactive list. */
  2196. for_each_ring(ring, dev_priv, i) {
  2197. ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
  2198. if (ret)
  2199. return ret;
  2200. ret = intel_ring_idle(ring);
  2201. if (ret)
  2202. return ret;
  2203. }
  2204. return 0;
  2205. }
  2206. static void i965_write_fence_reg(struct drm_device *dev, int reg,
  2207. struct drm_i915_gem_object *obj)
  2208. {
  2209. drm_i915_private_t *dev_priv = dev->dev_private;
  2210. int fence_reg;
  2211. int fence_pitch_shift;
  2212. uint64_t val;
  2213. if (INTEL_INFO(dev)->gen >= 6) {
  2214. fence_reg = FENCE_REG_SANDYBRIDGE_0;
  2215. fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
  2216. } else {
  2217. fence_reg = FENCE_REG_965_0;
  2218. fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
  2219. }
  2220. if (obj) {
  2221. u32 size = i915_gem_obj_ggtt_size(obj);
  2222. val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
  2223. 0xfffff000) << 32;
  2224. val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
  2225. val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
  2226. if (obj->tiling_mode == I915_TILING_Y)
  2227. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  2228. val |= I965_FENCE_REG_VALID;
  2229. } else
  2230. val = 0;
  2231. fence_reg += reg * 8;
  2232. I915_WRITE64(fence_reg, val);
  2233. POSTING_READ(fence_reg);
  2234. }
  2235. static void i915_write_fence_reg(struct drm_device *dev, int reg,
  2236. struct drm_i915_gem_object *obj)
  2237. {
  2238. drm_i915_private_t *dev_priv = dev->dev_private;
  2239. u32 val;
  2240. if (obj) {
  2241. u32 size = i915_gem_obj_ggtt_size(obj);
  2242. int pitch_val;
  2243. int tile_width;
  2244. WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
  2245. (size & -size) != size ||
  2246. (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
  2247. "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
  2248. i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
  2249. if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
  2250. tile_width = 128;
  2251. else
  2252. tile_width = 512;
  2253. /* Note: pitch better be a power of two tile widths */
  2254. pitch_val = obj->stride / tile_width;
  2255. pitch_val = ffs(pitch_val) - 1;
  2256. val = i915_gem_obj_ggtt_offset(obj);
  2257. if (obj->tiling_mode == I915_TILING_Y)
  2258. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2259. val |= I915_FENCE_SIZE_BITS(size);
  2260. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2261. val |= I830_FENCE_REG_VALID;
  2262. } else
  2263. val = 0;
  2264. if (reg < 8)
  2265. reg = FENCE_REG_830_0 + reg * 4;
  2266. else
  2267. reg = FENCE_REG_945_8 + (reg - 8) * 4;
  2268. I915_WRITE(reg, val);
  2269. POSTING_READ(reg);
  2270. }
  2271. static void i830_write_fence_reg(struct drm_device *dev, int reg,
  2272. struct drm_i915_gem_object *obj)
  2273. {
  2274. drm_i915_private_t *dev_priv = dev->dev_private;
  2275. uint32_t val;
  2276. if (obj) {
  2277. u32 size = i915_gem_obj_ggtt_size(obj);
  2278. uint32_t pitch_val;
  2279. WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
  2280. (size & -size) != size ||
  2281. (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
  2282. "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
  2283. i915_gem_obj_ggtt_offset(obj), size);
  2284. pitch_val = obj->stride / 128;
  2285. pitch_val = ffs(pitch_val) - 1;
  2286. val = i915_gem_obj_ggtt_offset(obj);
  2287. if (obj->tiling_mode == I915_TILING_Y)
  2288. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2289. val |= I830_FENCE_SIZE_BITS(size);
  2290. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2291. val |= I830_FENCE_REG_VALID;
  2292. } else
  2293. val = 0;
  2294. I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
  2295. POSTING_READ(FENCE_REG_830_0 + reg * 4);
  2296. }
  2297. inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
  2298. {
  2299. return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
  2300. }
  2301. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  2302. struct drm_i915_gem_object *obj)
  2303. {
  2304. struct drm_i915_private *dev_priv = dev->dev_private;
  2305. /* Ensure that all CPU reads are completed before installing a fence
  2306. * and all writes before removing the fence.
  2307. */
  2308. if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
  2309. mb();
  2310. switch (INTEL_INFO(dev)->gen) {
  2311. case 7:
  2312. case 6:
  2313. case 5:
  2314. case 4: i965_write_fence_reg(dev, reg, obj); break;
  2315. case 3: i915_write_fence_reg(dev, reg, obj); break;
  2316. case 2: i830_write_fence_reg(dev, reg, obj); break;
  2317. default: BUG();
  2318. }
  2319. /* And similarly be paranoid that no direct access to this region
  2320. * is reordered to before the fence is installed.
  2321. */
  2322. if (i915_gem_object_needs_mb(obj))
  2323. mb();
  2324. }
  2325. static inline int fence_number(struct drm_i915_private *dev_priv,
  2326. struct drm_i915_fence_reg *fence)
  2327. {
  2328. return fence - dev_priv->fence_regs;
  2329. }
  2330. struct write_fence {
  2331. struct drm_device *dev;
  2332. struct drm_i915_gem_object *obj;
  2333. int fence;
  2334. };
  2335. static void i915_gem_write_fence__ipi(void *data)
  2336. {
  2337. struct write_fence *args = data;
  2338. /* Required for SNB+ with LLC */
  2339. wbinvd();
  2340. /* Required for VLV */
  2341. i915_gem_write_fence(args->dev, args->fence, args->obj);
  2342. }
  2343. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  2344. struct drm_i915_fence_reg *fence,
  2345. bool enable)
  2346. {
  2347. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2348. struct write_fence args = {
  2349. .dev = obj->base.dev,
  2350. .fence = fence_number(dev_priv, fence),
  2351. .obj = enable ? obj : NULL,
  2352. };
  2353. /* In order to fully serialize access to the fenced region and
  2354. * the update to the fence register we need to take extreme
  2355. * measures on SNB+. In theory, the write to the fence register
  2356. * flushes all memory transactions before, and coupled with the
  2357. * mb() placed around the register write we serialise all memory
  2358. * operations with respect to the changes in the tiler. Yet, on
  2359. * SNB+ we need to take a step further and emit an explicit wbinvd()
  2360. * on each processor in order to manually flush all memory
  2361. * transactions before updating the fence register.
  2362. *
  2363. * However, Valleyview complicates matter. There the wbinvd is
  2364. * insufficient and unlike SNB/IVB requires the serialising
  2365. * register write. (Note that that register write by itself is
  2366. * conversely not sufficient for SNB+.) To compromise, we do both.
  2367. */
  2368. if (INTEL_INFO(args.dev)->gen >= 6)
  2369. on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
  2370. else
  2371. i915_gem_write_fence(args.dev, args.fence, args.obj);
  2372. if (enable) {
  2373. obj->fence_reg = args.fence;
  2374. fence->obj = obj;
  2375. list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
  2376. } else {
  2377. obj->fence_reg = I915_FENCE_REG_NONE;
  2378. fence->obj = NULL;
  2379. list_del_init(&fence->lru_list);
  2380. }
  2381. }
  2382. static int
  2383. i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  2384. {
  2385. if (obj->last_fenced_seqno) {
  2386. int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
  2387. if (ret)
  2388. return ret;
  2389. obj->last_fenced_seqno = 0;
  2390. }
  2391. obj->fenced_gpu_access = false;
  2392. return 0;
  2393. }
  2394. int
  2395. i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
  2396. {
  2397. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2398. struct drm_i915_fence_reg *fence;
  2399. int ret;
  2400. ret = i915_gem_object_wait_fence(obj);
  2401. if (ret)
  2402. return ret;
  2403. if (obj->fence_reg == I915_FENCE_REG_NONE)
  2404. return 0;
  2405. fence = &dev_priv->fence_regs[obj->fence_reg];
  2406. i915_gem_object_fence_lost(obj);
  2407. i915_gem_object_update_fence(obj, fence, false);
  2408. return 0;
  2409. }
  2410. static struct drm_i915_fence_reg *
  2411. i915_find_fence_reg(struct drm_device *dev)
  2412. {
  2413. struct drm_i915_private *dev_priv = dev->dev_private;
  2414. struct drm_i915_fence_reg *reg, *avail;
  2415. int i;
  2416. /* First try to find a free reg */
  2417. avail = NULL;
  2418. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2419. reg = &dev_priv->fence_regs[i];
  2420. if (!reg->obj)
  2421. return reg;
  2422. if (!reg->pin_count)
  2423. avail = reg;
  2424. }
  2425. if (avail == NULL)
  2426. return NULL;
  2427. /* None available, try to steal one or wait for a user to finish */
  2428. list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
  2429. if (reg->pin_count)
  2430. continue;
  2431. return reg;
  2432. }
  2433. return NULL;
  2434. }
  2435. /**
  2436. * i915_gem_object_get_fence - set up fencing for an object
  2437. * @obj: object to map through a fence reg
  2438. *
  2439. * When mapping objects through the GTT, userspace wants to be able to write
  2440. * to them without having to worry about swizzling if the object is tiled.
  2441. * This function walks the fence regs looking for a free one for @obj,
  2442. * stealing one if it can't find any.
  2443. *
  2444. * It then sets up the reg based on the object's properties: address, pitch
  2445. * and tiling format.
  2446. *
  2447. * For an untiled surface, this removes any existing fence.
  2448. */
  2449. int
  2450. i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
  2451. {
  2452. struct drm_device *dev = obj->base.dev;
  2453. struct drm_i915_private *dev_priv = dev->dev_private;
  2454. bool enable = obj->tiling_mode != I915_TILING_NONE;
  2455. struct drm_i915_fence_reg *reg;
  2456. int ret;
  2457. /* Have we updated the tiling parameters upon the object and so
  2458. * will need to serialise the write to the associated fence register?
  2459. */
  2460. if (obj->fence_dirty) {
  2461. ret = i915_gem_object_wait_fence(obj);
  2462. if (ret)
  2463. return ret;
  2464. }
  2465. /* Just update our place in the LRU if our fence is getting reused. */
  2466. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  2467. reg = &dev_priv->fence_regs[obj->fence_reg];
  2468. if (!obj->fence_dirty) {
  2469. list_move_tail(&reg->lru_list,
  2470. &dev_priv->mm.fence_list);
  2471. return 0;
  2472. }
  2473. } else if (enable) {
  2474. reg = i915_find_fence_reg(dev);
  2475. if (reg == NULL)
  2476. return -EDEADLK;
  2477. if (reg->obj) {
  2478. struct drm_i915_gem_object *old = reg->obj;
  2479. ret = i915_gem_object_wait_fence(old);
  2480. if (ret)
  2481. return ret;
  2482. i915_gem_object_fence_lost(old);
  2483. }
  2484. } else
  2485. return 0;
  2486. i915_gem_object_update_fence(obj, reg, enable);
  2487. obj->fence_dirty = false;
  2488. return 0;
  2489. }
  2490. static bool i915_gem_valid_gtt_space(struct drm_device *dev,
  2491. struct drm_mm_node *gtt_space,
  2492. unsigned long cache_level)
  2493. {
  2494. struct drm_mm_node *other;
  2495. /* On non-LLC machines we have to be careful when putting differing
  2496. * types of snoopable memory together to avoid the prefetcher
  2497. * crossing memory domains and dying.
  2498. */
  2499. if (HAS_LLC(dev))
  2500. return true;
  2501. if (!drm_mm_node_allocated(gtt_space))
  2502. return true;
  2503. if (list_empty(&gtt_space->node_list))
  2504. return true;
  2505. other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
  2506. if (other->allocated && !other->hole_follows && other->color != cache_level)
  2507. return false;
  2508. other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
  2509. if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
  2510. return false;
  2511. return true;
  2512. }
  2513. static void i915_gem_verify_gtt(struct drm_device *dev)
  2514. {
  2515. #if WATCH_GTT
  2516. struct drm_i915_private *dev_priv = dev->dev_private;
  2517. struct drm_i915_gem_object *obj;
  2518. int err = 0;
  2519. list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
  2520. if (obj->gtt_space == NULL) {
  2521. printk(KERN_ERR "object found on GTT list with no space reserved\n");
  2522. err++;
  2523. continue;
  2524. }
  2525. if (obj->cache_level != obj->gtt_space->color) {
  2526. printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
  2527. i915_gem_obj_ggtt_offset(obj),
  2528. i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
  2529. obj->cache_level,
  2530. obj->gtt_space->color);
  2531. err++;
  2532. continue;
  2533. }
  2534. if (!i915_gem_valid_gtt_space(dev,
  2535. obj->gtt_space,
  2536. obj->cache_level)) {
  2537. printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
  2538. i915_gem_obj_ggtt_offset(obj),
  2539. i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
  2540. obj->cache_level);
  2541. err++;
  2542. continue;
  2543. }
  2544. }
  2545. WARN_ON(err);
  2546. #endif
  2547. }
  2548. /**
  2549. * Finds free space in the GTT aperture and binds the object there.
  2550. */
  2551. static int
  2552. i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  2553. unsigned alignment,
  2554. bool map_and_fenceable,
  2555. bool nonblocking)
  2556. {
  2557. struct drm_device *dev = obj->base.dev;
  2558. drm_i915_private_t *dev_priv = dev->dev_private;
  2559. struct i915_address_space *vm = &dev_priv->gtt.base;
  2560. u32 size, fence_size, fence_alignment, unfenced_alignment;
  2561. bool mappable, fenceable;
  2562. size_t gtt_max = map_and_fenceable ?
  2563. dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
  2564. struct i915_vma *vma;
  2565. int ret;
  2566. if (WARN_ON(!list_empty(&obj->vma_list)))
  2567. return -EBUSY;
  2568. fence_size = i915_gem_get_gtt_size(dev,
  2569. obj->base.size,
  2570. obj->tiling_mode);
  2571. fence_alignment = i915_gem_get_gtt_alignment(dev,
  2572. obj->base.size,
  2573. obj->tiling_mode, true);
  2574. unfenced_alignment =
  2575. i915_gem_get_gtt_alignment(dev,
  2576. obj->base.size,
  2577. obj->tiling_mode, false);
  2578. if (alignment == 0)
  2579. alignment = map_and_fenceable ? fence_alignment :
  2580. unfenced_alignment;
  2581. if (map_and_fenceable && alignment & (fence_alignment - 1)) {
  2582. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2583. return -EINVAL;
  2584. }
  2585. size = map_and_fenceable ? fence_size : obj->base.size;
  2586. /* If the object is bigger than the entire aperture, reject it early
  2587. * before evicting everything in a vain attempt to find space.
  2588. */
  2589. if (obj->base.size > gtt_max) {
  2590. DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
  2591. obj->base.size,
  2592. map_and_fenceable ? "mappable" : "total",
  2593. gtt_max);
  2594. return -E2BIG;
  2595. }
  2596. ret = i915_gem_object_get_pages(obj);
  2597. if (ret)
  2598. return ret;
  2599. i915_gem_object_pin_pages(obj);
  2600. vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
  2601. if (IS_ERR(vma)) {
  2602. ret = PTR_ERR(vma);
  2603. goto err_unpin;
  2604. }
  2605. search_free:
  2606. ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
  2607. &vma->node,
  2608. size, alignment,
  2609. obj->cache_level, 0, gtt_max);
  2610. if (ret) {
  2611. ret = i915_gem_evict_something(dev, size, alignment,
  2612. obj->cache_level,
  2613. map_and_fenceable,
  2614. nonblocking);
  2615. if (ret == 0)
  2616. goto search_free;
  2617. goto err_free_vma;
  2618. }
  2619. if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
  2620. obj->cache_level))) {
  2621. ret = -EINVAL;
  2622. goto err_remove_node;
  2623. }
  2624. ret = i915_gem_gtt_prepare_object(obj);
  2625. if (ret)
  2626. goto err_remove_node;
  2627. list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
  2628. list_add_tail(&obj->mm_list, &vm->inactive_list);
  2629. list_add(&vma->vma_link, &obj->vma_list);
  2630. fenceable =
  2631. i915_gem_obj_ggtt_size(obj) == fence_size &&
  2632. (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
  2633. mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
  2634. dev_priv->gtt.mappable_end;
  2635. obj->map_and_fenceable = mappable && fenceable;
  2636. trace_i915_gem_object_bind(obj, map_and_fenceable);
  2637. i915_gem_verify_gtt(dev);
  2638. return 0;
  2639. err_remove_node:
  2640. drm_mm_remove_node(&vma->node);
  2641. err_free_vma:
  2642. i915_gem_vma_destroy(vma);
  2643. err_unpin:
  2644. i915_gem_object_unpin_pages(obj);
  2645. return ret;
  2646. }
  2647. void
  2648. i915_gem_clflush_object(struct drm_i915_gem_object *obj)
  2649. {
  2650. /* If we don't have a page list set up, then we're not pinned
  2651. * to GPU, and we can ignore the cache flush because it'll happen
  2652. * again at bind time.
  2653. */
  2654. if (obj->pages == NULL)
  2655. return;
  2656. /*
  2657. * Stolen memory is always coherent with the GPU as it is explicitly
  2658. * marked as wc by the system, or the system is cache-coherent.
  2659. */
  2660. if (obj->stolen)
  2661. return;
  2662. /* If the GPU is snooping the contents of the CPU cache,
  2663. * we do not need to manually clear the CPU cache lines. However,
  2664. * the caches are only snooped when the render cache is
  2665. * flushed/invalidated. As we always have to emit invalidations
  2666. * and flushes when moving into and out of the RENDER domain, correct
  2667. * snooping behaviour occurs naturally as the result of our domain
  2668. * tracking.
  2669. */
  2670. if (obj->cache_level != I915_CACHE_NONE)
  2671. return;
  2672. trace_i915_gem_object_clflush(obj);
  2673. drm_clflush_sg(obj->pages);
  2674. }
  2675. /** Flushes the GTT write domain for the object if it's dirty. */
  2676. static void
  2677. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  2678. {
  2679. uint32_t old_write_domain;
  2680. if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  2681. return;
  2682. /* No actual flushing is required for the GTT write domain. Writes
  2683. * to it immediately go to main memory as far as we know, so there's
  2684. * no chipset flush. It also doesn't land in render cache.
  2685. *
  2686. * However, we do have to enforce the order so that all writes through
  2687. * the GTT land before any writes to the device, such as updates to
  2688. * the GATT itself.
  2689. */
  2690. wmb();
  2691. old_write_domain = obj->base.write_domain;
  2692. obj->base.write_domain = 0;
  2693. trace_i915_gem_object_change_domain(obj,
  2694. obj->base.read_domains,
  2695. old_write_domain);
  2696. }
  2697. /** Flushes the CPU write domain for the object if it's dirty. */
  2698. static void
  2699. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
  2700. {
  2701. uint32_t old_write_domain;
  2702. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  2703. return;
  2704. i915_gem_clflush_object(obj);
  2705. i915_gem_chipset_flush(obj->base.dev);
  2706. old_write_domain = obj->base.write_domain;
  2707. obj->base.write_domain = 0;
  2708. trace_i915_gem_object_change_domain(obj,
  2709. obj->base.read_domains,
  2710. old_write_domain);
  2711. }
  2712. /**
  2713. * Moves a single object to the GTT read, and possibly write domain.
  2714. *
  2715. * This function returns when the move is complete, including waiting on
  2716. * flushes to occur.
  2717. */
  2718. int
  2719. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  2720. {
  2721. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  2722. uint32_t old_write_domain, old_read_domains;
  2723. int ret;
  2724. /* Not valid to be called on unbound objects. */
  2725. if (!i915_gem_obj_ggtt_bound(obj))
  2726. return -EINVAL;
  2727. if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
  2728. return 0;
  2729. ret = i915_gem_object_wait_rendering(obj, !write);
  2730. if (ret)
  2731. return ret;
  2732. i915_gem_object_flush_cpu_write_domain(obj);
  2733. /* Serialise direct access to this object with the barriers for
  2734. * coherent writes from the GPU, by effectively invalidating the
  2735. * GTT domain upon first access.
  2736. */
  2737. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2738. mb();
  2739. old_write_domain = obj->base.write_domain;
  2740. old_read_domains = obj->base.read_domains;
  2741. /* It should now be out of any other write domains, and we can update
  2742. * the domain values for our changes.
  2743. */
  2744. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2745. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2746. if (write) {
  2747. obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  2748. obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  2749. obj->dirty = 1;
  2750. }
  2751. trace_i915_gem_object_change_domain(obj,
  2752. old_read_domains,
  2753. old_write_domain);
  2754. /* And bump the LRU for this access */
  2755. if (i915_gem_object_is_inactive(obj))
  2756. list_move_tail(&obj->mm_list,
  2757. &dev_priv->gtt.base.inactive_list);
  2758. return 0;
  2759. }
  2760. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  2761. enum i915_cache_level cache_level)
  2762. {
  2763. struct drm_device *dev = obj->base.dev;
  2764. drm_i915_private_t *dev_priv = dev->dev_private;
  2765. struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
  2766. int ret;
  2767. if (obj->cache_level == cache_level)
  2768. return 0;
  2769. if (obj->pin_count) {
  2770. DRM_DEBUG("can not change the cache level of pinned objects\n");
  2771. return -EBUSY;
  2772. }
  2773. if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
  2774. ret = i915_gem_object_unbind(obj);
  2775. if (ret)
  2776. return ret;
  2777. }
  2778. if (i915_gem_obj_ggtt_bound(obj)) {
  2779. ret = i915_gem_object_finish_gpu(obj);
  2780. if (ret)
  2781. return ret;
  2782. i915_gem_object_finish_gtt(obj);
  2783. /* Before SandyBridge, you could not use tiling or fence
  2784. * registers with snooped memory, so relinquish any fences
  2785. * currently pointing to our region in the aperture.
  2786. */
  2787. if (INTEL_INFO(dev)->gen < 6) {
  2788. ret = i915_gem_object_put_fence(obj);
  2789. if (ret)
  2790. return ret;
  2791. }
  2792. if (obj->has_global_gtt_mapping)
  2793. i915_gem_gtt_bind_object(obj, cache_level);
  2794. if (obj->has_aliasing_ppgtt_mapping)
  2795. i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
  2796. obj, cache_level);
  2797. i915_gem_obj_ggtt_set_color(obj, cache_level);
  2798. }
  2799. if (cache_level == I915_CACHE_NONE) {
  2800. u32 old_read_domains, old_write_domain;
  2801. /* If we're coming from LLC cached, then we haven't
  2802. * actually been tracking whether the data is in the
  2803. * CPU cache or not, since we only allow one bit set
  2804. * in obj->write_domain and have been skipping the clflushes.
  2805. * Just set it to the CPU cache for now.
  2806. */
  2807. WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
  2808. WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
  2809. old_read_domains = obj->base.read_domains;
  2810. old_write_domain = obj->base.write_domain;
  2811. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2812. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2813. trace_i915_gem_object_change_domain(obj,
  2814. old_read_domains,
  2815. old_write_domain);
  2816. }
  2817. obj->cache_level = cache_level;
  2818. i915_gem_verify_gtt(dev);
  2819. return 0;
  2820. }
  2821. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  2822. struct drm_file *file)
  2823. {
  2824. struct drm_i915_gem_caching *args = data;
  2825. struct drm_i915_gem_object *obj;
  2826. int ret;
  2827. ret = i915_mutex_lock_interruptible(dev);
  2828. if (ret)
  2829. return ret;
  2830. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2831. if (&obj->base == NULL) {
  2832. ret = -ENOENT;
  2833. goto unlock;
  2834. }
  2835. args->caching = obj->cache_level != I915_CACHE_NONE;
  2836. drm_gem_object_unreference(&obj->base);
  2837. unlock:
  2838. mutex_unlock(&dev->struct_mutex);
  2839. return ret;
  2840. }
  2841. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  2842. struct drm_file *file)
  2843. {
  2844. struct drm_i915_gem_caching *args = data;
  2845. struct drm_i915_gem_object *obj;
  2846. enum i915_cache_level level;
  2847. int ret;
  2848. switch (args->caching) {
  2849. case I915_CACHING_NONE:
  2850. level = I915_CACHE_NONE;
  2851. break;
  2852. case I915_CACHING_CACHED:
  2853. level = I915_CACHE_LLC;
  2854. break;
  2855. default:
  2856. return -EINVAL;
  2857. }
  2858. ret = i915_mutex_lock_interruptible(dev);
  2859. if (ret)
  2860. return ret;
  2861. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2862. if (&obj->base == NULL) {
  2863. ret = -ENOENT;
  2864. goto unlock;
  2865. }
  2866. ret = i915_gem_object_set_cache_level(obj, level);
  2867. drm_gem_object_unreference(&obj->base);
  2868. unlock:
  2869. mutex_unlock(&dev->struct_mutex);
  2870. return ret;
  2871. }
  2872. /*
  2873. * Prepare buffer for display plane (scanout, cursors, etc).
  2874. * Can be called from an uninterruptible phase (modesetting) and allows
  2875. * any flushes to be pipelined (for pageflips).
  2876. */
  2877. int
  2878. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  2879. u32 alignment,
  2880. struct intel_ring_buffer *pipelined)
  2881. {
  2882. u32 old_read_domains, old_write_domain;
  2883. int ret;
  2884. if (pipelined != obj->ring) {
  2885. ret = i915_gem_object_sync(obj, pipelined);
  2886. if (ret)
  2887. return ret;
  2888. }
  2889. /* The display engine is not coherent with the LLC cache on gen6. As
  2890. * a result, we make sure that the pinning that is about to occur is
  2891. * done with uncached PTEs. This is lowest common denominator for all
  2892. * chipsets.
  2893. *
  2894. * However for gen6+, we could do better by using the GFDT bit instead
  2895. * of uncaching, which would allow us to flush all the LLC-cached data
  2896. * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  2897. */
  2898. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
  2899. if (ret)
  2900. return ret;
  2901. /* As the user may map the buffer once pinned in the display plane
  2902. * (e.g. libkms for the bootup splash), we have to ensure that we
  2903. * always use map_and_fenceable for all scanout buffers.
  2904. */
  2905. ret = i915_gem_object_pin(obj, alignment, true, false);
  2906. if (ret)
  2907. return ret;
  2908. i915_gem_object_flush_cpu_write_domain(obj);
  2909. old_write_domain = obj->base.write_domain;
  2910. old_read_domains = obj->base.read_domains;
  2911. /* It should now be out of any other write domains, and we can update
  2912. * the domain values for our changes.
  2913. */
  2914. obj->base.write_domain = 0;
  2915. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2916. trace_i915_gem_object_change_domain(obj,
  2917. old_read_domains,
  2918. old_write_domain);
  2919. return 0;
  2920. }
  2921. int
  2922. i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
  2923. {
  2924. int ret;
  2925. if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
  2926. return 0;
  2927. ret = i915_gem_object_wait_rendering(obj, false);
  2928. if (ret)
  2929. return ret;
  2930. /* Ensure that we invalidate the GPU's caches and TLBs. */
  2931. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  2932. return 0;
  2933. }
  2934. /**
  2935. * Moves a single object to the CPU read, and possibly write domain.
  2936. *
  2937. * This function returns when the move is complete, including waiting on
  2938. * flushes to occur.
  2939. */
  2940. int
  2941. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  2942. {
  2943. uint32_t old_write_domain, old_read_domains;
  2944. int ret;
  2945. if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  2946. return 0;
  2947. ret = i915_gem_object_wait_rendering(obj, !write);
  2948. if (ret)
  2949. return ret;
  2950. i915_gem_object_flush_gtt_write_domain(obj);
  2951. old_write_domain = obj->base.write_domain;
  2952. old_read_domains = obj->base.read_domains;
  2953. /* Flush the CPU cache if it's still invalid. */
  2954. if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2955. i915_gem_clflush_object(obj);
  2956. obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  2957. }
  2958. /* It should now be out of any other write domains, and we can update
  2959. * the domain values for our changes.
  2960. */
  2961. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2962. /* If we're writing through the CPU, then the GPU read domains will
  2963. * need to be invalidated at next use.
  2964. */
  2965. if (write) {
  2966. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2967. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2968. }
  2969. trace_i915_gem_object_change_domain(obj,
  2970. old_read_domains,
  2971. old_write_domain);
  2972. return 0;
  2973. }
  2974. /* Throttle our rendering by waiting until the ring has completed our requests
  2975. * emitted over 20 msec ago.
  2976. *
  2977. * Note that if we were to use the current jiffies each time around the loop,
  2978. * we wouldn't escape the function with any frames outstanding if the time to
  2979. * render a frame was over 20ms.
  2980. *
  2981. * This should get us reasonable parallelism between CPU and GPU but also
  2982. * relatively low latency when blocking on a particular request to finish.
  2983. */
  2984. static int
  2985. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  2986. {
  2987. struct drm_i915_private *dev_priv = dev->dev_private;
  2988. struct drm_i915_file_private *file_priv = file->driver_priv;
  2989. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2990. struct drm_i915_gem_request *request;
  2991. struct intel_ring_buffer *ring = NULL;
  2992. unsigned reset_counter;
  2993. u32 seqno = 0;
  2994. int ret;
  2995. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  2996. if (ret)
  2997. return ret;
  2998. ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
  2999. if (ret)
  3000. return ret;
  3001. spin_lock(&file_priv->mm.lock);
  3002. list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  3003. if (time_after_eq(request->emitted_jiffies, recent_enough))
  3004. break;
  3005. ring = request->ring;
  3006. seqno = request->seqno;
  3007. }
  3008. reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  3009. spin_unlock(&file_priv->mm.lock);
  3010. if (seqno == 0)
  3011. return 0;
  3012. ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
  3013. if (ret == 0)
  3014. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  3015. return ret;
  3016. }
  3017. int
  3018. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  3019. uint32_t alignment,
  3020. bool map_and_fenceable,
  3021. bool nonblocking)
  3022. {
  3023. int ret;
  3024. if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
  3025. return -EBUSY;
  3026. if (i915_gem_obj_ggtt_bound(obj)) {
  3027. if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
  3028. (map_and_fenceable && !obj->map_and_fenceable)) {
  3029. WARN(obj->pin_count,
  3030. "bo is already pinned with incorrect alignment:"
  3031. " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
  3032. " obj->map_and_fenceable=%d\n",
  3033. i915_gem_obj_ggtt_offset(obj), alignment,
  3034. map_and_fenceable,
  3035. obj->map_and_fenceable);
  3036. ret = i915_gem_object_unbind(obj);
  3037. if (ret)
  3038. return ret;
  3039. }
  3040. }
  3041. if (!i915_gem_obj_ggtt_bound(obj)) {
  3042. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3043. ret = i915_gem_object_bind_to_gtt(obj, alignment,
  3044. map_and_fenceable,
  3045. nonblocking);
  3046. if (ret)
  3047. return ret;
  3048. if (!dev_priv->mm.aliasing_ppgtt)
  3049. i915_gem_gtt_bind_object(obj, obj->cache_level);
  3050. }
  3051. if (!obj->has_global_gtt_mapping && map_and_fenceable)
  3052. i915_gem_gtt_bind_object(obj, obj->cache_level);
  3053. obj->pin_count++;
  3054. obj->pin_mappable |= map_and_fenceable;
  3055. return 0;
  3056. }
  3057. void
  3058. i915_gem_object_unpin(struct drm_i915_gem_object *obj)
  3059. {
  3060. BUG_ON(obj->pin_count == 0);
  3061. BUG_ON(!i915_gem_obj_ggtt_bound(obj));
  3062. if (--obj->pin_count == 0)
  3063. obj->pin_mappable = false;
  3064. }
  3065. int
  3066. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3067. struct drm_file *file)
  3068. {
  3069. struct drm_i915_gem_pin *args = data;
  3070. struct drm_i915_gem_object *obj;
  3071. int ret;
  3072. ret = i915_mutex_lock_interruptible(dev);
  3073. if (ret)
  3074. return ret;
  3075. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3076. if (&obj->base == NULL) {
  3077. ret = -ENOENT;
  3078. goto unlock;
  3079. }
  3080. if (obj->madv != I915_MADV_WILLNEED) {
  3081. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3082. ret = -EINVAL;
  3083. goto out;
  3084. }
  3085. if (obj->pin_filp != NULL && obj->pin_filp != file) {
  3086. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3087. args->handle);
  3088. ret = -EINVAL;
  3089. goto out;
  3090. }
  3091. if (obj->user_pin_count == 0) {
  3092. ret = i915_gem_object_pin(obj, args->alignment, true, false);
  3093. if (ret)
  3094. goto out;
  3095. }
  3096. obj->user_pin_count++;
  3097. obj->pin_filp = file;
  3098. /* XXX - flush the CPU caches for pinned objects
  3099. * as the X server doesn't manage domains yet
  3100. */
  3101. i915_gem_object_flush_cpu_write_domain(obj);
  3102. args->offset = i915_gem_obj_ggtt_offset(obj);
  3103. out:
  3104. drm_gem_object_unreference(&obj->base);
  3105. unlock:
  3106. mutex_unlock(&dev->struct_mutex);
  3107. return ret;
  3108. }
  3109. int
  3110. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3111. struct drm_file *file)
  3112. {
  3113. struct drm_i915_gem_pin *args = data;
  3114. struct drm_i915_gem_object *obj;
  3115. int ret;
  3116. ret = i915_mutex_lock_interruptible(dev);
  3117. if (ret)
  3118. return ret;
  3119. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3120. if (&obj->base == NULL) {
  3121. ret = -ENOENT;
  3122. goto unlock;
  3123. }
  3124. if (obj->pin_filp != file) {
  3125. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3126. args->handle);
  3127. ret = -EINVAL;
  3128. goto out;
  3129. }
  3130. obj->user_pin_count--;
  3131. if (obj->user_pin_count == 0) {
  3132. obj->pin_filp = NULL;
  3133. i915_gem_object_unpin(obj);
  3134. }
  3135. out:
  3136. drm_gem_object_unreference(&obj->base);
  3137. unlock:
  3138. mutex_unlock(&dev->struct_mutex);
  3139. return ret;
  3140. }
  3141. int
  3142. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3143. struct drm_file *file)
  3144. {
  3145. struct drm_i915_gem_busy *args = data;
  3146. struct drm_i915_gem_object *obj;
  3147. int ret;
  3148. ret = i915_mutex_lock_interruptible(dev);
  3149. if (ret)
  3150. return ret;
  3151. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3152. if (&obj->base == NULL) {
  3153. ret = -ENOENT;
  3154. goto unlock;
  3155. }
  3156. /* Count all active objects as busy, even if they are currently not used
  3157. * by the gpu. Users of this interface expect objects to eventually
  3158. * become non-busy without any further actions, therefore emit any
  3159. * necessary flushes here.
  3160. */
  3161. ret = i915_gem_object_flush_active(obj);
  3162. args->busy = obj->active;
  3163. if (obj->ring) {
  3164. BUILD_BUG_ON(I915_NUM_RINGS > 16);
  3165. args->busy |= intel_ring_flag(obj->ring) << 16;
  3166. }
  3167. drm_gem_object_unreference(&obj->base);
  3168. unlock:
  3169. mutex_unlock(&dev->struct_mutex);
  3170. return ret;
  3171. }
  3172. int
  3173. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3174. struct drm_file *file_priv)
  3175. {
  3176. return i915_gem_ring_throttle(dev, file_priv);
  3177. }
  3178. int
  3179. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3180. struct drm_file *file_priv)
  3181. {
  3182. struct drm_i915_gem_madvise *args = data;
  3183. struct drm_i915_gem_object *obj;
  3184. int ret;
  3185. switch (args->madv) {
  3186. case I915_MADV_DONTNEED:
  3187. case I915_MADV_WILLNEED:
  3188. break;
  3189. default:
  3190. return -EINVAL;
  3191. }
  3192. ret = i915_mutex_lock_interruptible(dev);
  3193. if (ret)
  3194. return ret;
  3195. obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  3196. if (&obj->base == NULL) {
  3197. ret = -ENOENT;
  3198. goto unlock;
  3199. }
  3200. if (obj->pin_count) {
  3201. ret = -EINVAL;
  3202. goto out;
  3203. }
  3204. if (obj->madv != __I915_MADV_PURGED)
  3205. obj->madv = args->madv;
  3206. /* if the object is no longer attached, discard its backing storage */
  3207. if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
  3208. i915_gem_object_truncate(obj);
  3209. args->retained = obj->madv != __I915_MADV_PURGED;
  3210. out:
  3211. drm_gem_object_unreference(&obj->base);
  3212. unlock:
  3213. mutex_unlock(&dev->struct_mutex);
  3214. return ret;
  3215. }
  3216. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3217. const struct drm_i915_gem_object_ops *ops)
  3218. {
  3219. INIT_LIST_HEAD(&obj->mm_list);
  3220. INIT_LIST_HEAD(&obj->global_list);
  3221. INIT_LIST_HEAD(&obj->ring_list);
  3222. INIT_LIST_HEAD(&obj->exec_list);
  3223. INIT_LIST_HEAD(&obj->vma_list);
  3224. obj->ops = ops;
  3225. obj->fence_reg = I915_FENCE_REG_NONE;
  3226. obj->madv = I915_MADV_WILLNEED;
  3227. /* Avoid an unnecessary call to unbind on the first bind. */
  3228. obj->map_and_fenceable = true;
  3229. i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
  3230. }
  3231. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3232. .get_pages = i915_gem_object_get_pages_gtt,
  3233. .put_pages = i915_gem_object_put_pages_gtt,
  3234. };
  3235. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  3236. size_t size)
  3237. {
  3238. struct drm_i915_gem_object *obj;
  3239. struct address_space *mapping;
  3240. gfp_t mask;
  3241. obj = i915_gem_object_alloc(dev);
  3242. if (obj == NULL)
  3243. return NULL;
  3244. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3245. i915_gem_object_free(obj);
  3246. return NULL;
  3247. }
  3248. mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
  3249. if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
  3250. /* 965gm cannot relocate objects above 4GiB. */
  3251. mask &= ~__GFP_HIGHMEM;
  3252. mask |= __GFP_DMA32;
  3253. }
  3254. mapping = file_inode(obj->base.filp)->i_mapping;
  3255. mapping_set_gfp_mask(mapping, mask);
  3256. i915_gem_object_init(obj, &i915_gem_object_ops);
  3257. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3258. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3259. if (HAS_LLC(dev)) {
  3260. /* On some devices, we can have the GPU use the LLC (the CPU
  3261. * cache) for about a 10% performance improvement
  3262. * compared to uncached. Graphics requests other than
  3263. * display scanout are coherent with the CPU in
  3264. * accessing this cache. This means in this mode we
  3265. * don't need to clflush on the CPU side, and on the
  3266. * GPU side we only need to flush internal caches to
  3267. * get data visible to the CPU.
  3268. *
  3269. * However, we maintain the display planes as UC, and so
  3270. * need to rebind when first used as such.
  3271. */
  3272. obj->cache_level = I915_CACHE_LLC;
  3273. } else
  3274. obj->cache_level = I915_CACHE_NONE;
  3275. trace_i915_gem_object_create(obj);
  3276. return obj;
  3277. }
  3278. int i915_gem_init_object(struct drm_gem_object *obj)
  3279. {
  3280. BUG();
  3281. return 0;
  3282. }
  3283. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  3284. {
  3285. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  3286. struct drm_device *dev = obj->base.dev;
  3287. drm_i915_private_t *dev_priv = dev->dev_private;
  3288. trace_i915_gem_object_destroy(obj);
  3289. if (obj->phys_obj)
  3290. i915_gem_detach_phys_object(dev, obj);
  3291. obj->pin_count = 0;
  3292. if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
  3293. bool was_interruptible;
  3294. was_interruptible = dev_priv->mm.interruptible;
  3295. dev_priv->mm.interruptible = false;
  3296. WARN_ON(i915_gem_object_unbind(obj));
  3297. dev_priv->mm.interruptible = was_interruptible;
  3298. }
  3299. /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
  3300. * before progressing. */
  3301. if (obj->stolen)
  3302. i915_gem_object_unpin_pages(obj);
  3303. if (WARN_ON(obj->pages_pin_count))
  3304. obj->pages_pin_count = 0;
  3305. i915_gem_object_put_pages(obj);
  3306. i915_gem_object_free_mmap_offset(obj);
  3307. i915_gem_object_release_stolen(obj);
  3308. BUG_ON(obj->pages);
  3309. if (obj->base.import_attach)
  3310. drm_prime_gem_destroy(&obj->base, NULL);
  3311. drm_gem_object_release(&obj->base);
  3312. i915_gem_info_remove_obj(dev_priv, obj->base.size);
  3313. kfree(obj->bit_17);
  3314. i915_gem_object_free(obj);
  3315. }
  3316. struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
  3317. struct i915_address_space *vm)
  3318. {
  3319. struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  3320. if (vma == NULL)
  3321. return ERR_PTR(-ENOMEM);
  3322. INIT_LIST_HEAD(&vma->vma_link);
  3323. vma->vm = vm;
  3324. vma->obj = obj;
  3325. return vma;
  3326. }
  3327. void i915_gem_vma_destroy(struct i915_vma *vma)
  3328. {
  3329. WARN_ON(vma->node.allocated);
  3330. kfree(vma);
  3331. }
  3332. int
  3333. i915_gem_idle(struct drm_device *dev)
  3334. {
  3335. drm_i915_private_t *dev_priv = dev->dev_private;
  3336. int ret;
  3337. if (dev_priv->ums.mm_suspended) {
  3338. mutex_unlock(&dev->struct_mutex);
  3339. return 0;
  3340. }
  3341. ret = i915_gpu_idle(dev);
  3342. if (ret) {
  3343. mutex_unlock(&dev->struct_mutex);
  3344. return ret;
  3345. }
  3346. i915_gem_retire_requests(dev);
  3347. /* Under UMS, be paranoid and evict. */
  3348. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3349. i915_gem_evict_everything(dev);
  3350. i915_gem_reset_fences(dev);
  3351. del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  3352. i915_kernel_lost_context(dev);
  3353. i915_gem_cleanup_ringbuffer(dev);
  3354. /* Cancel the retire work handler, which should be idle now. */
  3355. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3356. return 0;
  3357. }
  3358. void i915_gem_l3_remap(struct drm_device *dev)
  3359. {
  3360. drm_i915_private_t *dev_priv = dev->dev_private;
  3361. u32 misccpctl;
  3362. int i;
  3363. if (!HAS_L3_GPU_CACHE(dev))
  3364. return;
  3365. if (!dev_priv->l3_parity.remap_info)
  3366. return;
  3367. misccpctl = I915_READ(GEN7_MISCCPCTL);
  3368. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  3369. POSTING_READ(GEN7_MISCCPCTL);
  3370. for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
  3371. u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
  3372. if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
  3373. DRM_DEBUG("0x%x was already programmed to %x\n",
  3374. GEN7_L3LOG_BASE + i, remap);
  3375. if (remap && !dev_priv->l3_parity.remap_info[i/4])
  3376. DRM_DEBUG_DRIVER("Clearing remapped register\n");
  3377. I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
  3378. }
  3379. /* Make sure all the writes land before disabling dop clock gating */
  3380. POSTING_READ(GEN7_L3LOG_BASE);
  3381. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  3382. }
  3383. void i915_gem_init_swizzling(struct drm_device *dev)
  3384. {
  3385. drm_i915_private_t *dev_priv = dev->dev_private;
  3386. if (INTEL_INFO(dev)->gen < 5 ||
  3387. dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  3388. return;
  3389. I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  3390. DISP_TILE_SURFACE_SWIZZLING);
  3391. if (IS_GEN5(dev))
  3392. return;
  3393. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  3394. if (IS_GEN6(dev))
  3395. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  3396. else if (IS_GEN7(dev))
  3397. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  3398. else
  3399. BUG();
  3400. }
  3401. static bool
  3402. intel_enable_blt(struct drm_device *dev)
  3403. {
  3404. if (!HAS_BLT(dev))
  3405. return false;
  3406. /* The blitter was dysfunctional on early prototypes */
  3407. if (IS_GEN6(dev) && dev->pdev->revision < 8) {
  3408. DRM_INFO("BLT not supported on this pre-production hardware;"
  3409. " graphics performance will be degraded.\n");
  3410. return false;
  3411. }
  3412. return true;
  3413. }
  3414. static int i915_gem_init_rings(struct drm_device *dev)
  3415. {
  3416. struct drm_i915_private *dev_priv = dev->dev_private;
  3417. int ret;
  3418. ret = intel_init_render_ring_buffer(dev);
  3419. if (ret)
  3420. return ret;
  3421. if (HAS_BSD(dev)) {
  3422. ret = intel_init_bsd_ring_buffer(dev);
  3423. if (ret)
  3424. goto cleanup_render_ring;
  3425. }
  3426. if (intel_enable_blt(dev)) {
  3427. ret = intel_init_blt_ring_buffer(dev);
  3428. if (ret)
  3429. goto cleanup_bsd_ring;
  3430. }
  3431. if (HAS_VEBOX(dev)) {
  3432. ret = intel_init_vebox_ring_buffer(dev);
  3433. if (ret)
  3434. goto cleanup_blt_ring;
  3435. }
  3436. ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
  3437. if (ret)
  3438. goto cleanup_vebox_ring;
  3439. return 0;
  3440. cleanup_vebox_ring:
  3441. intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
  3442. cleanup_blt_ring:
  3443. intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
  3444. cleanup_bsd_ring:
  3445. intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
  3446. cleanup_render_ring:
  3447. intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
  3448. return ret;
  3449. }
  3450. int
  3451. i915_gem_init_hw(struct drm_device *dev)
  3452. {
  3453. drm_i915_private_t *dev_priv = dev->dev_private;
  3454. int ret;
  3455. if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
  3456. return -EIO;
  3457. if (dev_priv->ellc_size)
  3458. I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  3459. if (HAS_PCH_NOP(dev)) {
  3460. u32 temp = I915_READ(GEN7_MSG_CTL);
  3461. temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
  3462. I915_WRITE(GEN7_MSG_CTL, temp);
  3463. }
  3464. i915_gem_l3_remap(dev);
  3465. i915_gem_init_swizzling(dev);
  3466. ret = i915_gem_init_rings(dev);
  3467. if (ret)
  3468. return ret;
  3469. /*
  3470. * XXX: There was some w/a described somewhere suggesting loading
  3471. * contexts before PPGTT.
  3472. */
  3473. i915_gem_context_init(dev);
  3474. if (dev_priv->mm.aliasing_ppgtt) {
  3475. ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
  3476. if (ret) {
  3477. i915_gem_cleanup_aliasing_ppgtt(dev);
  3478. DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
  3479. }
  3480. }
  3481. return 0;
  3482. }
  3483. int i915_gem_init(struct drm_device *dev)
  3484. {
  3485. struct drm_i915_private *dev_priv = dev->dev_private;
  3486. int ret;
  3487. mutex_lock(&dev->struct_mutex);
  3488. if (IS_VALLEYVIEW(dev)) {
  3489. /* VLVA0 (potential hack), BIOS isn't actually waking us */
  3490. I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
  3491. if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
  3492. DRM_DEBUG_DRIVER("allow wake ack timed out\n");
  3493. }
  3494. i915_gem_init_global_gtt(dev);
  3495. ret = i915_gem_init_hw(dev);
  3496. mutex_unlock(&dev->struct_mutex);
  3497. if (ret) {
  3498. i915_gem_cleanup_aliasing_ppgtt(dev);
  3499. return ret;
  3500. }
  3501. /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
  3502. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3503. dev_priv->dri1.allow_batchbuffer = 1;
  3504. return 0;
  3505. }
  3506. void
  3507. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3508. {
  3509. drm_i915_private_t *dev_priv = dev->dev_private;
  3510. struct intel_ring_buffer *ring;
  3511. int i;
  3512. for_each_ring(ring, dev_priv, i)
  3513. intel_cleanup_ring_buffer(ring);
  3514. }
  3515. int
  3516. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3517. struct drm_file *file_priv)
  3518. {
  3519. struct drm_i915_private *dev_priv = dev->dev_private;
  3520. int ret;
  3521. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3522. return 0;
  3523. if (i915_reset_in_progress(&dev_priv->gpu_error)) {
  3524. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3525. atomic_set(&dev_priv->gpu_error.reset_counter, 0);
  3526. }
  3527. mutex_lock(&dev->struct_mutex);
  3528. dev_priv->ums.mm_suspended = 0;
  3529. ret = i915_gem_init_hw(dev);
  3530. if (ret != 0) {
  3531. mutex_unlock(&dev->struct_mutex);
  3532. return ret;
  3533. }
  3534. BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
  3535. mutex_unlock(&dev->struct_mutex);
  3536. ret = drm_irq_install(dev);
  3537. if (ret)
  3538. goto cleanup_ringbuffer;
  3539. return 0;
  3540. cleanup_ringbuffer:
  3541. mutex_lock(&dev->struct_mutex);
  3542. i915_gem_cleanup_ringbuffer(dev);
  3543. dev_priv->ums.mm_suspended = 1;
  3544. mutex_unlock(&dev->struct_mutex);
  3545. return ret;
  3546. }
  3547. int
  3548. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3549. struct drm_file *file_priv)
  3550. {
  3551. struct drm_i915_private *dev_priv = dev->dev_private;
  3552. int ret;
  3553. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3554. return 0;
  3555. drm_irq_uninstall(dev);
  3556. mutex_lock(&dev->struct_mutex);
  3557. ret = i915_gem_idle(dev);
  3558. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3559. * We need to replace this with a semaphore, or something.
  3560. * And not confound ums.mm_suspended!
  3561. */
  3562. if (ret != 0)
  3563. dev_priv->ums.mm_suspended = 1;
  3564. mutex_unlock(&dev->struct_mutex);
  3565. return ret;
  3566. }
  3567. void
  3568. i915_gem_lastclose(struct drm_device *dev)
  3569. {
  3570. int ret;
  3571. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3572. return;
  3573. mutex_lock(&dev->struct_mutex);
  3574. ret = i915_gem_idle(dev);
  3575. if (ret)
  3576. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3577. mutex_unlock(&dev->struct_mutex);
  3578. }
  3579. static void
  3580. init_ring_lists(struct intel_ring_buffer *ring)
  3581. {
  3582. INIT_LIST_HEAD(&ring->active_list);
  3583. INIT_LIST_HEAD(&ring->request_list);
  3584. }
  3585. void
  3586. i915_gem_load(struct drm_device *dev)
  3587. {
  3588. drm_i915_private_t *dev_priv = dev->dev_private;
  3589. int i;
  3590. dev_priv->slab =
  3591. kmem_cache_create("i915_gem_object",
  3592. sizeof(struct drm_i915_gem_object), 0,
  3593. SLAB_HWCACHE_ALIGN,
  3594. NULL);
  3595. INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
  3596. INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
  3597. INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
  3598. INIT_LIST_HEAD(&dev_priv->mm.bound_list);
  3599. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3600. for (i = 0; i < I915_NUM_RINGS; i++)
  3601. init_ring_lists(&dev_priv->ring[i]);
  3602. for (i = 0; i < I915_MAX_NUM_FENCES; i++)
  3603. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  3604. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3605. i915_gem_retire_work_handler);
  3606. init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  3607. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  3608. if (IS_GEN3(dev)) {
  3609. I915_WRITE(MI_ARB_STATE,
  3610. _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  3611. }
  3612. dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  3613. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3614. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3615. dev_priv->fence_reg_start = 3;
  3616. if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
  3617. dev_priv->num_fence_regs = 32;
  3618. else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3619. dev_priv->num_fence_regs = 16;
  3620. else
  3621. dev_priv->num_fence_regs = 8;
  3622. /* Initialize fence registers to zero */
  3623. i915_gem_reset_fences(dev);
  3624. i915_gem_detect_bit_6_swizzle(dev);
  3625. init_waitqueue_head(&dev_priv->pending_flip_queue);
  3626. dev_priv->mm.interruptible = true;
  3627. dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
  3628. dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
  3629. register_shrinker(&dev_priv->mm.inactive_shrinker);
  3630. }
  3631. /*
  3632. * Create a physically contiguous memory object for this object
  3633. * e.g. for cursor + overlay regs
  3634. */
  3635. static int i915_gem_init_phys_object(struct drm_device *dev,
  3636. int id, int size, int align)
  3637. {
  3638. drm_i915_private_t *dev_priv = dev->dev_private;
  3639. struct drm_i915_gem_phys_object *phys_obj;
  3640. int ret;
  3641. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3642. return 0;
  3643. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3644. if (!phys_obj)
  3645. return -ENOMEM;
  3646. phys_obj->id = id;
  3647. phys_obj->handle = drm_pci_alloc(dev, size, align);
  3648. if (!phys_obj->handle) {
  3649. ret = -ENOMEM;
  3650. goto kfree_obj;
  3651. }
  3652. #ifdef CONFIG_X86
  3653. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3654. #endif
  3655. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3656. return 0;
  3657. kfree_obj:
  3658. kfree(phys_obj);
  3659. return ret;
  3660. }
  3661. static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3662. {
  3663. drm_i915_private_t *dev_priv = dev->dev_private;
  3664. struct drm_i915_gem_phys_object *phys_obj;
  3665. if (!dev_priv->mm.phys_objs[id - 1])
  3666. return;
  3667. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3668. if (phys_obj->cur_obj) {
  3669. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3670. }
  3671. #ifdef CONFIG_X86
  3672. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3673. #endif
  3674. drm_pci_free(dev, phys_obj->handle);
  3675. kfree(phys_obj);
  3676. dev_priv->mm.phys_objs[id - 1] = NULL;
  3677. }
  3678. void i915_gem_free_all_phys_object(struct drm_device *dev)
  3679. {
  3680. int i;
  3681. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  3682. i915_gem_free_phys_object(dev, i);
  3683. }
  3684. void i915_gem_detach_phys_object(struct drm_device *dev,
  3685. struct drm_i915_gem_object *obj)
  3686. {
  3687. struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
  3688. char *vaddr;
  3689. int i;
  3690. int page_count;
  3691. if (!obj->phys_obj)
  3692. return;
  3693. vaddr = obj->phys_obj->handle->vaddr;
  3694. page_count = obj->base.size / PAGE_SIZE;
  3695. for (i = 0; i < page_count; i++) {
  3696. struct page *page = shmem_read_mapping_page(mapping, i);
  3697. if (!IS_ERR(page)) {
  3698. char *dst = kmap_atomic(page);
  3699. memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
  3700. kunmap_atomic(dst);
  3701. drm_clflush_pages(&page, 1);
  3702. set_page_dirty(page);
  3703. mark_page_accessed(page);
  3704. page_cache_release(page);
  3705. }
  3706. }
  3707. i915_gem_chipset_flush(dev);
  3708. obj->phys_obj->cur_obj = NULL;
  3709. obj->phys_obj = NULL;
  3710. }
  3711. int
  3712. i915_gem_attach_phys_object(struct drm_device *dev,
  3713. struct drm_i915_gem_object *obj,
  3714. int id,
  3715. int align)
  3716. {
  3717. struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
  3718. drm_i915_private_t *dev_priv = dev->dev_private;
  3719. int ret = 0;
  3720. int page_count;
  3721. int i;
  3722. if (id > I915_MAX_PHYS_OBJECT)
  3723. return -EINVAL;
  3724. if (obj->phys_obj) {
  3725. if (obj->phys_obj->id == id)
  3726. return 0;
  3727. i915_gem_detach_phys_object(dev, obj);
  3728. }
  3729. /* create a new object */
  3730. if (!dev_priv->mm.phys_objs[id - 1]) {
  3731. ret = i915_gem_init_phys_object(dev, id,
  3732. obj->base.size, align);
  3733. if (ret) {
  3734. DRM_ERROR("failed to init phys object %d size: %zu\n",
  3735. id, obj->base.size);
  3736. return ret;
  3737. }
  3738. }
  3739. /* bind to the object */
  3740. obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
  3741. obj->phys_obj->cur_obj = obj;
  3742. page_count = obj->base.size / PAGE_SIZE;
  3743. for (i = 0; i < page_count; i++) {
  3744. struct page *page;
  3745. char *dst, *src;
  3746. page = shmem_read_mapping_page(mapping, i);
  3747. if (IS_ERR(page))
  3748. return PTR_ERR(page);
  3749. src = kmap_atomic(page);
  3750. dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  3751. memcpy(dst, src, PAGE_SIZE);
  3752. kunmap_atomic(src);
  3753. mark_page_accessed(page);
  3754. page_cache_release(page);
  3755. }
  3756. return 0;
  3757. }
  3758. static int
  3759. i915_gem_phys_pwrite(struct drm_device *dev,
  3760. struct drm_i915_gem_object *obj,
  3761. struct drm_i915_gem_pwrite *args,
  3762. struct drm_file *file_priv)
  3763. {
  3764. void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
  3765. char __user *user_data = to_user_ptr(args->data_ptr);
  3766. if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
  3767. unsigned long unwritten;
  3768. /* The physical object once assigned is fixed for the lifetime
  3769. * of the obj, so we can safely drop the lock and continue
  3770. * to access vaddr.
  3771. */
  3772. mutex_unlock(&dev->struct_mutex);
  3773. unwritten = copy_from_user(vaddr, user_data, args->size);
  3774. mutex_lock(&dev->struct_mutex);
  3775. if (unwritten)
  3776. return -EFAULT;
  3777. }
  3778. i915_gem_chipset_flush(dev);
  3779. return 0;
  3780. }
  3781. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  3782. {
  3783. struct drm_i915_file_private *file_priv = file->driver_priv;
  3784. /* Clean up our request list when the client is going away, so that
  3785. * later retire_requests won't dereference our soon-to-be-gone
  3786. * file_priv.
  3787. */
  3788. spin_lock(&file_priv->mm.lock);
  3789. while (!list_empty(&file_priv->mm.request_list)) {
  3790. struct drm_i915_gem_request *request;
  3791. request = list_first_entry(&file_priv->mm.request_list,
  3792. struct drm_i915_gem_request,
  3793. client_list);
  3794. list_del(&request->client_list);
  3795. request->file_priv = NULL;
  3796. }
  3797. spin_unlock(&file_priv->mm.lock);
  3798. }
  3799. static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
  3800. {
  3801. if (!mutex_is_locked(mutex))
  3802. return false;
  3803. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
  3804. return mutex->owner == task;
  3805. #else
  3806. /* Since UP may be pre-empted, we cannot assume that we own the lock */
  3807. return false;
  3808. #endif
  3809. }
  3810. static int
  3811. i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
  3812. {
  3813. struct drm_i915_private *dev_priv =
  3814. container_of(shrinker,
  3815. struct drm_i915_private,
  3816. mm.inactive_shrinker);
  3817. struct drm_device *dev = dev_priv->dev;
  3818. struct i915_address_space *vm = &dev_priv->gtt.base;
  3819. struct drm_i915_gem_object *obj;
  3820. int nr_to_scan = sc->nr_to_scan;
  3821. bool unlock = true;
  3822. int cnt;
  3823. if (!mutex_trylock(&dev->struct_mutex)) {
  3824. if (!mutex_is_locked_by(&dev->struct_mutex, current))
  3825. return 0;
  3826. if (dev_priv->mm.shrinker_no_lock_stealing)
  3827. return 0;
  3828. unlock = false;
  3829. }
  3830. if (nr_to_scan) {
  3831. nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
  3832. if (nr_to_scan > 0)
  3833. nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
  3834. false);
  3835. if (nr_to_scan > 0)
  3836. i915_gem_shrink_all(dev_priv);
  3837. }
  3838. cnt = 0;
  3839. list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
  3840. if (obj->pages_pin_count == 0)
  3841. cnt += obj->base.size >> PAGE_SHIFT;
  3842. list_for_each_entry(obj, &vm->inactive_list, global_list)
  3843. if (obj->pin_count == 0 && obj->pages_pin_count == 0)
  3844. cnt += obj->base.size >> PAGE_SHIFT;
  3845. if (unlock)
  3846. mutex_unlock(&dev->struct_mutex);
  3847. return cnt;
  3848. }