i915_gem.c 134 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/slab.h>
  34. #include <linux/swap.h>
  35. #include <linux/pci.h>
  36. struct change_domains {
  37. uint32_t invalidate_domains;
  38. uint32_t flush_domains;
  39. uint32_t flush_rings;
  40. };
  41. static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
  42. struct intel_ring_buffer *pipelined);
  43. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  44. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  45. static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
  46. bool write);
  47. static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
  48. uint64_t offset,
  49. uint64_t size);
  50. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
  51. static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  52. bool interruptible);
  53. static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  54. unsigned alignment,
  55. bool map_and_fenceable);
  56. static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj);
  57. static int i915_gem_phys_pwrite(struct drm_device *dev,
  58. struct drm_i915_gem_object *obj,
  59. struct drm_i915_gem_pwrite *args,
  60. struct drm_file *file);
  61. static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
  62. static int i915_gem_inactive_shrink(struct shrinker *shrinker,
  63. int nr_to_scan,
  64. gfp_t gfp_mask);
  65. /* some bookkeeping */
  66. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  67. size_t size)
  68. {
  69. dev_priv->mm.object_count++;
  70. dev_priv->mm.object_memory += size;
  71. }
  72. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  73. size_t size)
  74. {
  75. dev_priv->mm.object_count--;
  76. dev_priv->mm.object_memory -= size;
  77. }
  78. static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
  79. struct drm_i915_gem_object *obj)
  80. {
  81. dev_priv->mm.gtt_count++;
  82. dev_priv->mm.gtt_memory += obj->gtt_space->size;
  83. if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
  84. dev_priv->mm.mappable_gtt_used +=
  85. min_t(size_t, obj->gtt_space->size,
  86. dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
  87. }
  88. list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
  89. }
  90. static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
  91. struct drm_i915_gem_object *obj)
  92. {
  93. dev_priv->mm.gtt_count--;
  94. dev_priv->mm.gtt_memory -= obj->gtt_space->size;
  95. if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
  96. dev_priv->mm.mappable_gtt_used -=
  97. min_t(size_t, obj->gtt_space->size,
  98. dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
  99. }
  100. list_del_init(&obj->gtt_list);
  101. }
  102. /**
  103. * Update the mappable working set counters. Call _only_ when there is a change
  104. * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
  105. * @mappable: new state the changed mappable flag (either pin_ or fault_).
  106. */
  107. static void
  108. i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
  109. struct drm_i915_gem_object *obj,
  110. bool mappable)
  111. {
  112. if (mappable) {
  113. if (obj->pin_mappable && obj->fault_mappable)
  114. /* Combined state was already mappable. */
  115. return;
  116. dev_priv->mm.gtt_mappable_count++;
  117. dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
  118. } else {
  119. if (obj->pin_mappable || obj->fault_mappable)
  120. /* Combined state still mappable. */
  121. return;
  122. dev_priv->mm.gtt_mappable_count--;
  123. dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
  124. }
  125. }
  126. static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
  127. struct drm_i915_gem_object *obj,
  128. bool mappable)
  129. {
  130. dev_priv->mm.pin_count++;
  131. dev_priv->mm.pin_memory += obj->gtt_space->size;
  132. if (mappable) {
  133. obj->pin_mappable = true;
  134. i915_gem_info_update_mappable(dev_priv, obj, true);
  135. }
  136. }
  137. static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
  138. struct drm_i915_gem_object *obj)
  139. {
  140. dev_priv->mm.pin_count--;
  141. dev_priv->mm.pin_memory -= obj->gtt_space->size;
  142. if (obj->pin_mappable) {
  143. obj->pin_mappable = false;
  144. i915_gem_info_update_mappable(dev_priv, obj, false);
  145. }
  146. }
  147. int
  148. i915_gem_check_is_wedged(struct drm_device *dev)
  149. {
  150. struct drm_i915_private *dev_priv = dev->dev_private;
  151. struct completion *x = &dev_priv->error_completion;
  152. unsigned long flags;
  153. int ret;
  154. if (!atomic_read(&dev_priv->mm.wedged))
  155. return 0;
  156. ret = wait_for_completion_interruptible(x);
  157. if (ret)
  158. return ret;
  159. /* Success, we reset the GPU! */
  160. if (!atomic_read(&dev_priv->mm.wedged))
  161. return 0;
  162. /* GPU is hung, bump the completion count to account for
  163. * the token we just consumed so that we never hit zero and
  164. * end up waiting upon a subsequent completion event that
  165. * will never happen.
  166. */
  167. spin_lock_irqsave(&x->wait.lock, flags);
  168. x->done++;
  169. spin_unlock_irqrestore(&x->wait.lock, flags);
  170. return -EIO;
  171. }
  172. static int i915_mutex_lock_interruptible(struct drm_device *dev)
  173. {
  174. struct drm_i915_private *dev_priv = dev->dev_private;
  175. int ret;
  176. ret = i915_gem_check_is_wedged(dev);
  177. if (ret)
  178. return ret;
  179. ret = mutex_lock_interruptible(&dev->struct_mutex);
  180. if (ret)
  181. return ret;
  182. if (atomic_read(&dev_priv->mm.wedged)) {
  183. mutex_unlock(&dev->struct_mutex);
  184. return -EAGAIN;
  185. }
  186. WARN_ON(i915_verify_lists(dev));
  187. return 0;
  188. }
  189. static inline bool
  190. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  191. {
  192. return obj->gtt_space && !obj->active && obj->pin_count == 0;
  193. }
  194. void i915_gem_do_init(struct drm_device *dev,
  195. unsigned long start,
  196. unsigned long mappable_end,
  197. unsigned long end)
  198. {
  199. drm_i915_private_t *dev_priv = dev->dev_private;
  200. drm_mm_init(&dev_priv->mm.gtt_space, start,
  201. end - start);
  202. dev_priv->mm.gtt_total = end - start;
  203. dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
  204. dev_priv->mm.gtt_mappable_end = mappable_end;
  205. }
  206. int
  207. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  208. struct drm_file *file)
  209. {
  210. struct drm_i915_gem_init *args = data;
  211. if (args->gtt_start >= args->gtt_end ||
  212. (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
  213. return -EINVAL;
  214. mutex_lock(&dev->struct_mutex);
  215. i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
  216. mutex_unlock(&dev->struct_mutex);
  217. return 0;
  218. }
  219. int
  220. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  221. struct drm_file *file)
  222. {
  223. struct drm_i915_private *dev_priv = dev->dev_private;
  224. struct drm_i915_gem_get_aperture *args = data;
  225. if (!(dev->driver->driver_features & DRIVER_GEM))
  226. return -ENODEV;
  227. mutex_lock(&dev->struct_mutex);
  228. args->aper_size = dev_priv->mm.gtt_total;
  229. args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
  230. mutex_unlock(&dev->struct_mutex);
  231. return 0;
  232. }
  233. /**
  234. * Creates a new mm object and returns a handle to it.
  235. */
  236. int
  237. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  238. struct drm_file *file)
  239. {
  240. struct drm_i915_gem_create *args = data;
  241. struct drm_i915_gem_object *obj;
  242. int ret;
  243. u32 handle;
  244. args->size = roundup(args->size, PAGE_SIZE);
  245. /* Allocate the new object */
  246. obj = i915_gem_alloc_object(dev, args->size);
  247. if (obj == NULL)
  248. return -ENOMEM;
  249. ret = drm_gem_handle_create(file, &obj->base, &handle);
  250. if (ret) {
  251. drm_gem_object_release(&obj->base);
  252. i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
  253. kfree(obj);
  254. return ret;
  255. }
  256. /* drop reference from allocate - handle holds it now */
  257. drm_gem_object_unreference(&obj->base);
  258. trace_i915_gem_object_create(obj);
  259. args->handle = handle;
  260. return 0;
  261. }
  262. static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
  263. {
  264. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  265. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  266. obj->tiling_mode != I915_TILING_NONE;
  267. }
  268. static inline void
  269. slow_shmem_copy(struct page *dst_page,
  270. int dst_offset,
  271. struct page *src_page,
  272. int src_offset,
  273. int length)
  274. {
  275. char *dst_vaddr, *src_vaddr;
  276. dst_vaddr = kmap(dst_page);
  277. src_vaddr = kmap(src_page);
  278. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  279. kunmap(src_page);
  280. kunmap(dst_page);
  281. }
  282. static inline void
  283. slow_shmem_bit17_copy(struct page *gpu_page,
  284. int gpu_offset,
  285. struct page *cpu_page,
  286. int cpu_offset,
  287. int length,
  288. int is_read)
  289. {
  290. char *gpu_vaddr, *cpu_vaddr;
  291. /* Use the unswizzled path if this page isn't affected. */
  292. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  293. if (is_read)
  294. return slow_shmem_copy(cpu_page, cpu_offset,
  295. gpu_page, gpu_offset, length);
  296. else
  297. return slow_shmem_copy(gpu_page, gpu_offset,
  298. cpu_page, cpu_offset, length);
  299. }
  300. gpu_vaddr = kmap(gpu_page);
  301. cpu_vaddr = kmap(cpu_page);
  302. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  303. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  304. */
  305. while (length > 0) {
  306. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  307. int this_length = min(cacheline_end - gpu_offset, length);
  308. int swizzled_gpu_offset = gpu_offset ^ 64;
  309. if (is_read) {
  310. memcpy(cpu_vaddr + cpu_offset,
  311. gpu_vaddr + swizzled_gpu_offset,
  312. this_length);
  313. } else {
  314. memcpy(gpu_vaddr + swizzled_gpu_offset,
  315. cpu_vaddr + cpu_offset,
  316. this_length);
  317. }
  318. cpu_offset += this_length;
  319. gpu_offset += this_length;
  320. length -= this_length;
  321. }
  322. kunmap(cpu_page);
  323. kunmap(gpu_page);
  324. }
  325. /**
  326. * This is the fast shmem pread path, which attempts to copy_from_user directly
  327. * from the backing pages of the object to the user's address space. On a
  328. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  329. */
  330. static int
  331. i915_gem_shmem_pread_fast(struct drm_device *dev,
  332. struct drm_i915_gem_object *obj,
  333. struct drm_i915_gem_pread *args,
  334. struct drm_file *file)
  335. {
  336. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  337. ssize_t remain;
  338. loff_t offset;
  339. char __user *user_data;
  340. int page_offset, page_length;
  341. user_data = (char __user *) (uintptr_t) args->data_ptr;
  342. remain = args->size;
  343. offset = args->offset;
  344. while (remain > 0) {
  345. struct page *page;
  346. char *vaddr;
  347. int ret;
  348. /* Operation in this page
  349. *
  350. * page_offset = offset within page
  351. * page_length = bytes to copy for this page
  352. */
  353. page_offset = offset & (PAGE_SIZE-1);
  354. page_length = remain;
  355. if ((page_offset + remain) > PAGE_SIZE)
  356. page_length = PAGE_SIZE - page_offset;
  357. page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
  358. GFP_HIGHUSER | __GFP_RECLAIMABLE);
  359. if (IS_ERR(page))
  360. return PTR_ERR(page);
  361. vaddr = kmap_atomic(page);
  362. ret = __copy_to_user_inatomic(user_data,
  363. vaddr + page_offset,
  364. page_length);
  365. kunmap_atomic(vaddr);
  366. mark_page_accessed(page);
  367. page_cache_release(page);
  368. if (ret)
  369. return -EFAULT;
  370. remain -= page_length;
  371. user_data += page_length;
  372. offset += page_length;
  373. }
  374. return 0;
  375. }
  376. /**
  377. * This is the fallback shmem pread path, which allocates temporary storage
  378. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  379. * can copy out of the object's backing pages while holding the struct mutex
  380. * and not take page faults.
  381. */
  382. static int
  383. i915_gem_shmem_pread_slow(struct drm_device *dev,
  384. struct drm_i915_gem_object *obj,
  385. struct drm_i915_gem_pread *args,
  386. struct drm_file *file)
  387. {
  388. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  389. struct mm_struct *mm = current->mm;
  390. struct page **user_pages;
  391. ssize_t remain;
  392. loff_t offset, pinned_pages, i;
  393. loff_t first_data_page, last_data_page, num_pages;
  394. int shmem_page_offset;
  395. int data_page_index, data_page_offset;
  396. int page_length;
  397. int ret;
  398. uint64_t data_ptr = args->data_ptr;
  399. int do_bit17_swizzling;
  400. remain = args->size;
  401. /* Pin the user pages containing the data. We can't fault while
  402. * holding the struct mutex, yet we want to hold it while
  403. * dereferencing the user data.
  404. */
  405. first_data_page = data_ptr / PAGE_SIZE;
  406. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  407. num_pages = last_data_page - first_data_page + 1;
  408. user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
  409. if (user_pages == NULL)
  410. return -ENOMEM;
  411. mutex_unlock(&dev->struct_mutex);
  412. down_read(&mm->mmap_sem);
  413. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  414. num_pages, 1, 0, user_pages, NULL);
  415. up_read(&mm->mmap_sem);
  416. mutex_lock(&dev->struct_mutex);
  417. if (pinned_pages < num_pages) {
  418. ret = -EFAULT;
  419. goto out;
  420. }
  421. ret = i915_gem_object_set_cpu_read_domain_range(obj,
  422. args->offset,
  423. args->size);
  424. if (ret)
  425. goto out;
  426. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  427. offset = args->offset;
  428. while (remain > 0) {
  429. struct page *page;
  430. /* Operation in this page
  431. *
  432. * shmem_page_offset = offset within page in shmem file
  433. * data_page_index = page number in get_user_pages return
  434. * data_page_offset = offset with data_page_index page.
  435. * page_length = bytes to copy for this page
  436. */
  437. shmem_page_offset = offset & ~PAGE_MASK;
  438. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  439. data_page_offset = data_ptr & ~PAGE_MASK;
  440. page_length = remain;
  441. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  442. page_length = PAGE_SIZE - shmem_page_offset;
  443. if ((data_page_offset + page_length) > PAGE_SIZE)
  444. page_length = PAGE_SIZE - data_page_offset;
  445. page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
  446. GFP_HIGHUSER | __GFP_RECLAIMABLE);
  447. if (IS_ERR(page))
  448. return PTR_ERR(page);
  449. if (do_bit17_swizzling) {
  450. slow_shmem_bit17_copy(page,
  451. shmem_page_offset,
  452. user_pages[data_page_index],
  453. data_page_offset,
  454. page_length,
  455. 1);
  456. } else {
  457. slow_shmem_copy(user_pages[data_page_index],
  458. data_page_offset,
  459. page,
  460. shmem_page_offset,
  461. page_length);
  462. }
  463. mark_page_accessed(page);
  464. page_cache_release(page);
  465. remain -= page_length;
  466. data_ptr += page_length;
  467. offset += page_length;
  468. }
  469. out:
  470. for (i = 0; i < pinned_pages; i++) {
  471. SetPageDirty(user_pages[i]);
  472. mark_page_accessed(user_pages[i]);
  473. page_cache_release(user_pages[i]);
  474. }
  475. drm_free_large(user_pages);
  476. return ret;
  477. }
  478. /**
  479. * Reads data from the object referenced by handle.
  480. *
  481. * On error, the contents of *data are undefined.
  482. */
  483. int
  484. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  485. struct drm_file *file)
  486. {
  487. struct drm_i915_gem_pread *args = data;
  488. struct drm_i915_gem_object *obj;
  489. int ret = 0;
  490. if (args->size == 0)
  491. return 0;
  492. if (!access_ok(VERIFY_WRITE,
  493. (char __user *)(uintptr_t)args->data_ptr,
  494. args->size))
  495. return -EFAULT;
  496. ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
  497. args->size);
  498. if (ret)
  499. return -EFAULT;
  500. ret = i915_mutex_lock_interruptible(dev);
  501. if (ret)
  502. return ret;
  503. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  504. if (obj == NULL) {
  505. ret = -ENOENT;
  506. goto unlock;
  507. }
  508. /* Bounds check source. */
  509. if (args->offset > obj->base.size ||
  510. args->size > obj->base.size - args->offset) {
  511. ret = -EINVAL;
  512. goto out;
  513. }
  514. ret = i915_gem_object_set_cpu_read_domain_range(obj,
  515. args->offset,
  516. args->size);
  517. if (ret)
  518. goto out;
  519. ret = -EFAULT;
  520. if (!i915_gem_object_needs_bit17_swizzle(obj))
  521. ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
  522. if (ret == -EFAULT)
  523. ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
  524. out:
  525. drm_gem_object_unreference(&obj->base);
  526. unlock:
  527. mutex_unlock(&dev->struct_mutex);
  528. return ret;
  529. }
  530. /* This is the fast write path which cannot handle
  531. * page faults in the source data
  532. */
  533. static inline int
  534. fast_user_write(struct io_mapping *mapping,
  535. loff_t page_base, int page_offset,
  536. char __user *user_data,
  537. int length)
  538. {
  539. char *vaddr_atomic;
  540. unsigned long unwritten;
  541. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  542. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  543. user_data, length);
  544. io_mapping_unmap_atomic(vaddr_atomic);
  545. return unwritten;
  546. }
  547. /* Here's the write path which can sleep for
  548. * page faults
  549. */
  550. static inline void
  551. slow_kernel_write(struct io_mapping *mapping,
  552. loff_t gtt_base, int gtt_offset,
  553. struct page *user_page, int user_offset,
  554. int length)
  555. {
  556. char __iomem *dst_vaddr;
  557. char *src_vaddr;
  558. dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
  559. src_vaddr = kmap(user_page);
  560. memcpy_toio(dst_vaddr + gtt_offset,
  561. src_vaddr + user_offset,
  562. length);
  563. kunmap(user_page);
  564. io_mapping_unmap(dst_vaddr);
  565. }
  566. /**
  567. * This is the fast pwrite path, where we copy the data directly from the
  568. * user into the GTT, uncached.
  569. */
  570. static int
  571. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  572. struct drm_i915_gem_object *obj,
  573. struct drm_i915_gem_pwrite *args,
  574. struct drm_file *file)
  575. {
  576. drm_i915_private_t *dev_priv = dev->dev_private;
  577. ssize_t remain;
  578. loff_t offset, page_base;
  579. char __user *user_data;
  580. int page_offset, page_length;
  581. user_data = (char __user *) (uintptr_t) args->data_ptr;
  582. remain = args->size;
  583. offset = obj->gtt_offset + args->offset;
  584. while (remain > 0) {
  585. /* Operation in this page
  586. *
  587. * page_base = page offset within aperture
  588. * page_offset = offset within page
  589. * page_length = bytes to copy for this page
  590. */
  591. page_base = (offset & ~(PAGE_SIZE-1));
  592. page_offset = offset & (PAGE_SIZE-1);
  593. page_length = remain;
  594. if ((page_offset + remain) > PAGE_SIZE)
  595. page_length = PAGE_SIZE - page_offset;
  596. /* If we get a fault while copying data, then (presumably) our
  597. * source page isn't available. Return the error and we'll
  598. * retry in the slow path.
  599. */
  600. if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
  601. page_offset, user_data, page_length))
  602. return -EFAULT;
  603. remain -= page_length;
  604. user_data += page_length;
  605. offset += page_length;
  606. }
  607. return 0;
  608. }
  609. /**
  610. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  611. * the memory and maps it using kmap_atomic for copying.
  612. *
  613. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  614. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  615. */
  616. static int
  617. i915_gem_gtt_pwrite_slow(struct drm_device *dev,
  618. struct drm_i915_gem_object *obj,
  619. struct drm_i915_gem_pwrite *args,
  620. struct drm_file *file)
  621. {
  622. drm_i915_private_t *dev_priv = dev->dev_private;
  623. ssize_t remain;
  624. loff_t gtt_page_base, offset;
  625. loff_t first_data_page, last_data_page, num_pages;
  626. loff_t pinned_pages, i;
  627. struct page **user_pages;
  628. struct mm_struct *mm = current->mm;
  629. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  630. int ret;
  631. uint64_t data_ptr = args->data_ptr;
  632. remain = args->size;
  633. /* Pin the user pages containing the data. We can't fault while
  634. * holding the struct mutex, and all of the pwrite implementations
  635. * want to hold it while dereferencing the user data.
  636. */
  637. first_data_page = data_ptr / PAGE_SIZE;
  638. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  639. num_pages = last_data_page - first_data_page + 1;
  640. user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
  641. if (user_pages == NULL)
  642. return -ENOMEM;
  643. mutex_unlock(&dev->struct_mutex);
  644. down_read(&mm->mmap_sem);
  645. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  646. num_pages, 0, 0, user_pages, NULL);
  647. up_read(&mm->mmap_sem);
  648. mutex_lock(&dev->struct_mutex);
  649. if (pinned_pages < num_pages) {
  650. ret = -EFAULT;
  651. goto out_unpin_pages;
  652. }
  653. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  654. if (ret)
  655. goto out_unpin_pages;
  656. offset = obj->gtt_offset + args->offset;
  657. while (remain > 0) {
  658. /* Operation in this page
  659. *
  660. * gtt_page_base = page offset within aperture
  661. * gtt_page_offset = offset within page in aperture
  662. * data_page_index = page number in get_user_pages return
  663. * data_page_offset = offset with data_page_index page.
  664. * page_length = bytes to copy for this page
  665. */
  666. gtt_page_base = offset & PAGE_MASK;
  667. gtt_page_offset = offset & ~PAGE_MASK;
  668. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  669. data_page_offset = data_ptr & ~PAGE_MASK;
  670. page_length = remain;
  671. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  672. page_length = PAGE_SIZE - gtt_page_offset;
  673. if ((data_page_offset + page_length) > PAGE_SIZE)
  674. page_length = PAGE_SIZE - data_page_offset;
  675. slow_kernel_write(dev_priv->mm.gtt_mapping,
  676. gtt_page_base, gtt_page_offset,
  677. user_pages[data_page_index],
  678. data_page_offset,
  679. page_length);
  680. remain -= page_length;
  681. offset += page_length;
  682. data_ptr += page_length;
  683. }
  684. out_unpin_pages:
  685. for (i = 0; i < pinned_pages; i++)
  686. page_cache_release(user_pages[i]);
  687. drm_free_large(user_pages);
  688. return ret;
  689. }
  690. /**
  691. * This is the fast shmem pwrite path, which attempts to directly
  692. * copy_from_user into the kmapped pages backing the object.
  693. */
  694. static int
  695. i915_gem_shmem_pwrite_fast(struct drm_device *dev,
  696. struct drm_i915_gem_object *obj,
  697. struct drm_i915_gem_pwrite *args,
  698. struct drm_file *file)
  699. {
  700. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  701. ssize_t remain;
  702. loff_t offset;
  703. char __user *user_data;
  704. int page_offset, page_length;
  705. user_data = (char __user *) (uintptr_t) args->data_ptr;
  706. remain = args->size;
  707. offset = args->offset;
  708. obj->dirty = 1;
  709. while (remain > 0) {
  710. struct page *page;
  711. char *vaddr;
  712. int ret;
  713. /* Operation in this page
  714. *
  715. * page_offset = offset within page
  716. * page_length = bytes to copy for this page
  717. */
  718. page_offset = offset & (PAGE_SIZE-1);
  719. page_length = remain;
  720. if ((page_offset + remain) > PAGE_SIZE)
  721. page_length = PAGE_SIZE - page_offset;
  722. page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
  723. GFP_HIGHUSER | __GFP_RECLAIMABLE);
  724. if (IS_ERR(page))
  725. return PTR_ERR(page);
  726. vaddr = kmap_atomic(page, KM_USER0);
  727. ret = __copy_from_user_inatomic(vaddr + page_offset,
  728. user_data,
  729. page_length);
  730. kunmap_atomic(vaddr, KM_USER0);
  731. set_page_dirty(page);
  732. mark_page_accessed(page);
  733. page_cache_release(page);
  734. /* If we get a fault while copying data, then (presumably) our
  735. * source page isn't available. Return the error and we'll
  736. * retry in the slow path.
  737. */
  738. if (ret)
  739. return -EFAULT;
  740. remain -= page_length;
  741. user_data += page_length;
  742. offset += page_length;
  743. }
  744. return 0;
  745. }
  746. /**
  747. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  748. * the memory and maps it using kmap_atomic for copying.
  749. *
  750. * This avoids taking mmap_sem for faulting on the user's address while the
  751. * struct_mutex is held.
  752. */
  753. static int
  754. i915_gem_shmem_pwrite_slow(struct drm_device *dev,
  755. struct drm_i915_gem_object *obj,
  756. struct drm_i915_gem_pwrite *args,
  757. struct drm_file *file)
  758. {
  759. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  760. struct mm_struct *mm = current->mm;
  761. struct page **user_pages;
  762. ssize_t remain;
  763. loff_t offset, pinned_pages, i;
  764. loff_t first_data_page, last_data_page, num_pages;
  765. int shmem_page_offset;
  766. int data_page_index, data_page_offset;
  767. int page_length;
  768. int ret;
  769. uint64_t data_ptr = args->data_ptr;
  770. int do_bit17_swizzling;
  771. remain = args->size;
  772. /* Pin the user pages containing the data. We can't fault while
  773. * holding the struct mutex, and all of the pwrite implementations
  774. * want to hold it while dereferencing the user data.
  775. */
  776. first_data_page = data_ptr / PAGE_SIZE;
  777. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  778. num_pages = last_data_page - first_data_page + 1;
  779. user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
  780. if (user_pages == NULL)
  781. return -ENOMEM;
  782. mutex_unlock(&dev->struct_mutex);
  783. down_read(&mm->mmap_sem);
  784. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  785. num_pages, 0, 0, user_pages, NULL);
  786. up_read(&mm->mmap_sem);
  787. mutex_lock(&dev->struct_mutex);
  788. if (pinned_pages < num_pages) {
  789. ret = -EFAULT;
  790. goto out;
  791. }
  792. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  793. if (ret)
  794. goto out;
  795. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  796. offset = args->offset;
  797. obj->dirty = 1;
  798. while (remain > 0) {
  799. struct page *page;
  800. /* Operation in this page
  801. *
  802. * shmem_page_offset = offset within page in shmem file
  803. * data_page_index = page number in get_user_pages return
  804. * data_page_offset = offset with data_page_index page.
  805. * page_length = bytes to copy for this page
  806. */
  807. shmem_page_offset = offset & ~PAGE_MASK;
  808. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  809. data_page_offset = data_ptr & ~PAGE_MASK;
  810. page_length = remain;
  811. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  812. page_length = PAGE_SIZE - shmem_page_offset;
  813. if ((data_page_offset + page_length) > PAGE_SIZE)
  814. page_length = PAGE_SIZE - data_page_offset;
  815. page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
  816. GFP_HIGHUSER | __GFP_RECLAIMABLE);
  817. if (IS_ERR(page)) {
  818. ret = PTR_ERR(page);
  819. goto out;
  820. }
  821. if (do_bit17_swizzling) {
  822. slow_shmem_bit17_copy(page,
  823. shmem_page_offset,
  824. user_pages[data_page_index],
  825. data_page_offset,
  826. page_length,
  827. 0);
  828. } else {
  829. slow_shmem_copy(page,
  830. shmem_page_offset,
  831. user_pages[data_page_index],
  832. data_page_offset,
  833. page_length);
  834. }
  835. set_page_dirty(page);
  836. mark_page_accessed(page);
  837. page_cache_release(page);
  838. remain -= page_length;
  839. data_ptr += page_length;
  840. offset += page_length;
  841. }
  842. out:
  843. for (i = 0; i < pinned_pages; i++)
  844. page_cache_release(user_pages[i]);
  845. drm_free_large(user_pages);
  846. return ret;
  847. }
  848. /**
  849. * Writes data to the object referenced by handle.
  850. *
  851. * On error, the contents of the buffer that were to be modified are undefined.
  852. */
  853. int
  854. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  855. struct drm_file *file)
  856. {
  857. struct drm_i915_gem_pwrite *args = data;
  858. struct drm_i915_gem_object *obj;
  859. int ret;
  860. if (args->size == 0)
  861. return 0;
  862. if (!access_ok(VERIFY_READ,
  863. (char __user *)(uintptr_t)args->data_ptr,
  864. args->size))
  865. return -EFAULT;
  866. ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
  867. args->size);
  868. if (ret)
  869. return -EFAULT;
  870. ret = i915_mutex_lock_interruptible(dev);
  871. if (ret)
  872. return ret;
  873. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  874. if (obj == NULL) {
  875. ret = -ENOENT;
  876. goto unlock;
  877. }
  878. /* Bounds check destination. */
  879. if (args->offset > obj->base.size ||
  880. args->size > obj->base.size - args->offset) {
  881. ret = -EINVAL;
  882. goto out;
  883. }
  884. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  885. * it would end up going through the fenced access, and we'll get
  886. * different detiling behavior between reading and writing.
  887. * pread/pwrite currently are reading and writing from the CPU
  888. * perspective, requiring manual detiling by the client.
  889. */
  890. if (obj->phys_obj)
  891. ret = i915_gem_phys_pwrite(dev, obj, args, file);
  892. else if (obj->tiling_mode == I915_TILING_NONE &&
  893. obj->gtt_space &&
  894. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  895. ret = i915_gem_object_pin(obj, 0, true);
  896. if (ret)
  897. goto out;
  898. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  899. if (ret)
  900. goto out_unpin;
  901. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  902. if (ret == -EFAULT)
  903. ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
  904. out_unpin:
  905. i915_gem_object_unpin(obj);
  906. } else {
  907. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  908. if (ret)
  909. goto out;
  910. ret = -EFAULT;
  911. if (!i915_gem_object_needs_bit17_swizzle(obj))
  912. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
  913. if (ret == -EFAULT)
  914. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
  915. }
  916. out:
  917. drm_gem_object_unreference(&obj->base);
  918. unlock:
  919. mutex_unlock(&dev->struct_mutex);
  920. return ret;
  921. }
  922. /**
  923. * Called when user space prepares to use an object with the CPU, either
  924. * through the mmap ioctl's mapping or a GTT mapping.
  925. */
  926. int
  927. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  928. struct drm_file *file)
  929. {
  930. struct drm_i915_private *dev_priv = dev->dev_private;
  931. struct drm_i915_gem_set_domain *args = data;
  932. struct drm_i915_gem_object *obj;
  933. uint32_t read_domains = args->read_domains;
  934. uint32_t write_domain = args->write_domain;
  935. int ret;
  936. if (!(dev->driver->driver_features & DRIVER_GEM))
  937. return -ENODEV;
  938. /* Only handle setting domains to types used by the CPU. */
  939. if (write_domain & I915_GEM_GPU_DOMAINS)
  940. return -EINVAL;
  941. if (read_domains & I915_GEM_GPU_DOMAINS)
  942. return -EINVAL;
  943. /* Having something in the write domain implies it's in the read
  944. * domain, and only that read domain. Enforce that in the request.
  945. */
  946. if (write_domain != 0 && read_domains != write_domain)
  947. return -EINVAL;
  948. ret = i915_mutex_lock_interruptible(dev);
  949. if (ret)
  950. return ret;
  951. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  952. if (obj == NULL) {
  953. ret = -ENOENT;
  954. goto unlock;
  955. }
  956. intel_mark_busy(dev, obj);
  957. if (read_domains & I915_GEM_DOMAIN_GTT) {
  958. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  959. /* Update the LRU on the fence for the CPU access that's
  960. * about to occur.
  961. */
  962. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  963. struct drm_i915_fence_reg *reg =
  964. &dev_priv->fence_regs[obj->fence_reg];
  965. list_move_tail(&reg->lru_list,
  966. &dev_priv->mm.fence_list);
  967. }
  968. /* Silently promote "you're not bound, there was nothing to do"
  969. * to success, since the client was just asking us to
  970. * make sure everything was done.
  971. */
  972. if (ret == -EINVAL)
  973. ret = 0;
  974. } else {
  975. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  976. }
  977. /* Maintain LRU order of "inactive" objects */
  978. if (ret == 0 && i915_gem_object_is_inactive(obj))
  979. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  980. drm_gem_object_unreference(&obj->base);
  981. unlock:
  982. mutex_unlock(&dev->struct_mutex);
  983. return ret;
  984. }
  985. /**
  986. * Called when user space has done writes to this buffer
  987. */
  988. int
  989. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  990. struct drm_file *file)
  991. {
  992. struct drm_i915_gem_sw_finish *args = data;
  993. struct drm_i915_gem_object *obj;
  994. int ret = 0;
  995. if (!(dev->driver->driver_features & DRIVER_GEM))
  996. return -ENODEV;
  997. ret = i915_mutex_lock_interruptible(dev);
  998. if (ret)
  999. return ret;
  1000. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1001. if (obj == NULL) {
  1002. ret = -ENOENT;
  1003. goto unlock;
  1004. }
  1005. /* Pinned buffers may be scanout, so flush the cache */
  1006. if (obj->pin_count)
  1007. i915_gem_object_flush_cpu_write_domain(obj);
  1008. drm_gem_object_unreference(&obj->base);
  1009. unlock:
  1010. mutex_unlock(&dev->struct_mutex);
  1011. return ret;
  1012. }
  1013. /**
  1014. * Maps the contents of an object, returning the address it is mapped
  1015. * into.
  1016. *
  1017. * While the mapping holds a reference on the contents of the object, it doesn't
  1018. * imply a ref on the object itself.
  1019. */
  1020. int
  1021. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1022. struct drm_file *file)
  1023. {
  1024. struct drm_i915_private *dev_priv = dev->dev_private;
  1025. struct drm_i915_gem_mmap *args = data;
  1026. struct drm_gem_object *obj;
  1027. loff_t offset;
  1028. unsigned long addr;
  1029. if (!(dev->driver->driver_features & DRIVER_GEM))
  1030. return -ENODEV;
  1031. obj = drm_gem_object_lookup(dev, file, args->handle);
  1032. if (obj == NULL)
  1033. return -ENOENT;
  1034. if (obj->size > dev_priv->mm.gtt_mappable_end) {
  1035. drm_gem_object_unreference_unlocked(obj);
  1036. return -E2BIG;
  1037. }
  1038. offset = args->offset;
  1039. down_write(&current->mm->mmap_sem);
  1040. addr = do_mmap(obj->filp, 0, args->size,
  1041. PROT_READ | PROT_WRITE, MAP_SHARED,
  1042. args->offset);
  1043. up_write(&current->mm->mmap_sem);
  1044. drm_gem_object_unreference_unlocked(obj);
  1045. if (IS_ERR((void *)addr))
  1046. return addr;
  1047. args->addr_ptr = (uint64_t) addr;
  1048. return 0;
  1049. }
  1050. /**
  1051. * i915_gem_fault - fault a page into the GTT
  1052. * vma: VMA in question
  1053. * vmf: fault info
  1054. *
  1055. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1056. * from userspace. The fault handler takes care of binding the object to
  1057. * the GTT (if needed), allocating and programming a fence register (again,
  1058. * only if needed based on whether the old reg is still valid or the object
  1059. * is tiled) and inserting a new PTE into the faulting process.
  1060. *
  1061. * Note that the faulting process may involve evicting existing objects
  1062. * from the GTT and/or fence registers to make room. So performance may
  1063. * suffer if the GTT working set is large or there are few fence registers
  1064. * left.
  1065. */
  1066. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1067. {
  1068. struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
  1069. struct drm_device *dev = obj->base.dev;
  1070. drm_i915_private_t *dev_priv = dev->dev_private;
  1071. pgoff_t page_offset;
  1072. unsigned long pfn;
  1073. int ret = 0;
  1074. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1075. /* We don't use vmf->pgoff since that has the fake offset */
  1076. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1077. PAGE_SHIFT;
  1078. /* Now bind it into the GTT if needed */
  1079. mutex_lock(&dev->struct_mutex);
  1080. BUG_ON(obj->pin_count && !obj->pin_mappable);
  1081. if (!obj->map_and_fenceable) {
  1082. ret = i915_gem_object_unbind(obj);
  1083. if (ret)
  1084. goto unlock;
  1085. }
  1086. if (!obj->gtt_space) {
  1087. ret = i915_gem_object_bind_to_gtt(obj, 0, true);
  1088. if (ret)
  1089. goto unlock;
  1090. }
  1091. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1092. if (ret)
  1093. goto unlock;
  1094. if (!obj->fault_mappable) {
  1095. obj->fault_mappable = true;
  1096. i915_gem_info_update_mappable(dev_priv, obj, true);
  1097. }
  1098. /* Need a new fence register? */
  1099. if (obj->tiling_mode != I915_TILING_NONE) {
  1100. ret = i915_gem_object_get_fence_reg(obj, true);
  1101. if (ret)
  1102. goto unlock;
  1103. }
  1104. if (i915_gem_object_is_inactive(obj))
  1105. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  1106. pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
  1107. page_offset;
  1108. /* Finally, remap it using the new GTT offset */
  1109. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1110. unlock:
  1111. mutex_unlock(&dev->struct_mutex);
  1112. switch (ret) {
  1113. case -EAGAIN:
  1114. set_need_resched();
  1115. case 0:
  1116. case -ERESTARTSYS:
  1117. return VM_FAULT_NOPAGE;
  1118. case -ENOMEM:
  1119. return VM_FAULT_OOM;
  1120. default:
  1121. return VM_FAULT_SIGBUS;
  1122. }
  1123. }
  1124. /**
  1125. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1126. * @obj: obj in question
  1127. *
  1128. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1129. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1130. * up the object based on the offset and sets up the various memory mapping
  1131. * structures.
  1132. *
  1133. * This routine allocates and attaches a fake offset for @obj.
  1134. */
  1135. static int
  1136. i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
  1137. {
  1138. struct drm_device *dev = obj->base.dev;
  1139. struct drm_gem_mm *mm = dev->mm_private;
  1140. struct drm_map_list *list;
  1141. struct drm_local_map *map;
  1142. int ret = 0;
  1143. /* Set the object up for mmap'ing */
  1144. list = &obj->base.map_list;
  1145. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1146. if (!list->map)
  1147. return -ENOMEM;
  1148. map = list->map;
  1149. map->type = _DRM_GEM;
  1150. map->size = obj->base.size;
  1151. map->handle = obj;
  1152. /* Get a DRM GEM mmap offset allocated... */
  1153. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1154. obj->base.size / PAGE_SIZE,
  1155. 0, 0);
  1156. if (!list->file_offset_node) {
  1157. DRM_ERROR("failed to allocate offset for bo %d\n",
  1158. obj->base.name);
  1159. ret = -ENOSPC;
  1160. goto out_free_list;
  1161. }
  1162. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1163. obj->base.size / PAGE_SIZE,
  1164. 0);
  1165. if (!list->file_offset_node) {
  1166. ret = -ENOMEM;
  1167. goto out_free_list;
  1168. }
  1169. list->hash.key = list->file_offset_node->start;
  1170. ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  1171. if (ret) {
  1172. DRM_ERROR("failed to add to map hash\n");
  1173. goto out_free_mm;
  1174. }
  1175. return 0;
  1176. out_free_mm:
  1177. drm_mm_put_block(list->file_offset_node);
  1178. out_free_list:
  1179. kfree(list->map);
  1180. list->map = NULL;
  1181. return ret;
  1182. }
  1183. /**
  1184. * i915_gem_release_mmap - remove physical page mappings
  1185. * @obj: obj in question
  1186. *
  1187. * Preserve the reservation of the mmapping with the DRM core code, but
  1188. * relinquish ownership of the pages back to the system.
  1189. *
  1190. * It is vital that we remove the page mapping if we have mapped a tiled
  1191. * object through the GTT and then lose the fence register due to
  1192. * resource pressure. Similarly if the object has been moved out of the
  1193. * aperture, than pages mapped into userspace must be revoked. Removing the
  1194. * mapping will then trigger a page fault on the next user access, allowing
  1195. * fixup by i915_gem_fault().
  1196. */
  1197. void
  1198. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1199. {
  1200. struct drm_device *dev = obj->base.dev;
  1201. struct drm_i915_private *dev_priv = dev->dev_private;
  1202. if (unlikely(obj->base.map_list.map && dev->dev_mapping))
  1203. unmap_mapping_range(dev->dev_mapping,
  1204. (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
  1205. obj->base.size, 1);
  1206. if (obj->fault_mappable) {
  1207. obj->fault_mappable = false;
  1208. i915_gem_info_update_mappable(dev_priv, obj, false);
  1209. }
  1210. }
  1211. static void
  1212. i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
  1213. {
  1214. struct drm_device *dev = obj->base.dev;
  1215. struct drm_gem_mm *mm = dev->mm_private;
  1216. struct drm_map_list *list = &obj->base.map_list;
  1217. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1218. drm_mm_put_block(list->file_offset_node);
  1219. kfree(list->map);
  1220. list->map = NULL;
  1221. }
  1222. static uint32_t
  1223. i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
  1224. {
  1225. struct drm_device *dev = obj->base.dev;
  1226. uint32_t size;
  1227. if (INTEL_INFO(dev)->gen >= 4 ||
  1228. obj->tiling_mode == I915_TILING_NONE)
  1229. return obj->base.size;
  1230. /* Previous chips need a power-of-two fence region when tiling */
  1231. if (INTEL_INFO(dev)->gen == 3)
  1232. size = 1024*1024;
  1233. else
  1234. size = 512*1024;
  1235. while (size < obj->base.size)
  1236. size <<= 1;
  1237. return size;
  1238. }
  1239. /**
  1240. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1241. * @obj: object to check
  1242. *
  1243. * Return the required GTT alignment for an object, taking into account
  1244. * potential fence register mapping.
  1245. */
  1246. static uint32_t
  1247. i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
  1248. {
  1249. struct drm_device *dev = obj->base.dev;
  1250. /*
  1251. * Minimum alignment is 4k (GTT page size), but might be greater
  1252. * if a fence register is needed for the object.
  1253. */
  1254. if (INTEL_INFO(dev)->gen >= 4 ||
  1255. obj->tiling_mode == I915_TILING_NONE)
  1256. return 4096;
  1257. /*
  1258. * Previous chips need to be aligned to the size of the smallest
  1259. * fence register that can contain the object.
  1260. */
  1261. return i915_gem_get_gtt_size(obj);
  1262. }
  1263. /**
  1264. * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
  1265. * unfenced object
  1266. * @obj: object to check
  1267. *
  1268. * Return the required GTT alignment for an object, only taking into account
  1269. * unfenced tiled surface requirements.
  1270. */
  1271. static uint32_t
  1272. i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
  1273. {
  1274. struct drm_device *dev = obj->base.dev;
  1275. int tile_height;
  1276. /*
  1277. * Minimum alignment is 4k (GTT page size) for sane hw.
  1278. */
  1279. if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
  1280. obj->tiling_mode == I915_TILING_NONE)
  1281. return 4096;
  1282. /*
  1283. * Older chips need unfenced tiled buffers to be aligned to the left
  1284. * edge of an even tile row (where tile rows are counted as if the bo is
  1285. * placed in a fenced gtt region).
  1286. */
  1287. if (IS_GEN2(dev) ||
  1288. (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
  1289. tile_height = 32;
  1290. else
  1291. tile_height = 8;
  1292. return tile_height * obj->stride * 2;
  1293. }
  1294. /**
  1295. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1296. * @dev: DRM device
  1297. * @data: GTT mapping ioctl data
  1298. * @file: GEM object info
  1299. *
  1300. * Simply returns the fake offset to userspace so it can mmap it.
  1301. * The mmap call will end up in drm_gem_mmap(), which will set things
  1302. * up so we can get faults in the handler above.
  1303. *
  1304. * The fault handler will take care of binding the object into the GTT
  1305. * (since it may have been evicted to make room for something), allocating
  1306. * a fence register, and mapping the appropriate aperture address into
  1307. * userspace.
  1308. */
  1309. int
  1310. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1311. struct drm_file *file)
  1312. {
  1313. struct drm_i915_private *dev_priv = dev->dev_private;
  1314. struct drm_i915_gem_mmap_gtt *args = data;
  1315. struct drm_i915_gem_object *obj;
  1316. int ret;
  1317. if (!(dev->driver->driver_features & DRIVER_GEM))
  1318. return -ENODEV;
  1319. ret = i915_mutex_lock_interruptible(dev);
  1320. if (ret)
  1321. return ret;
  1322. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1323. if (obj == NULL) {
  1324. ret = -ENOENT;
  1325. goto unlock;
  1326. }
  1327. if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
  1328. ret = -E2BIG;
  1329. goto unlock;
  1330. }
  1331. if (obj->madv != I915_MADV_WILLNEED) {
  1332. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1333. ret = -EINVAL;
  1334. goto out;
  1335. }
  1336. if (!obj->base.map_list.map) {
  1337. ret = i915_gem_create_mmap_offset(obj);
  1338. if (ret)
  1339. goto out;
  1340. }
  1341. args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
  1342. out:
  1343. drm_gem_object_unreference(&obj->base);
  1344. unlock:
  1345. mutex_unlock(&dev->struct_mutex);
  1346. return ret;
  1347. }
  1348. static int
  1349. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
  1350. gfp_t gfpmask)
  1351. {
  1352. int page_count, i;
  1353. struct address_space *mapping;
  1354. struct inode *inode;
  1355. struct page *page;
  1356. /* Get the list of pages out of our struct file. They'll be pinned
  1357. * at this point until we release them.
  1358. */
  1359. page_count = obj->base.size / PAGE_SIZE;
  1360. BUG_ON(obj->pages != NULL);
  1361. obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
  1362. if (obj->pages == NULL)
  1363. return -ENOMEM;
  1364. inode = obj->base.filp->f_path.dentry->d_inode;
  1365. mapping = inode->i_mapping;
  1366. for (i = 0; i < page_count; i++) {
  1367. page = read_cache_page_gfp(mapping, i,
  1368. GFP_HIGHUSER |
  1369. __GFP_COLD |
  1370. __GFP_RECLAIMABLE |
  1371. gfpmask);
  1372. if (IS_ERR(page))
  1373. goto err_pages;
  1374. obj->pages[i] = page;
  1375. }
  1376. if (obj->tiling_mode != I915_TILING_NONE)
  1377. i915_gem_object_do_bit_17_swizzle(obj);
  1378. return 0;
  1379. err_pages:
  1380. while (i--)
  1381. page_cache_release(obj->pages[i]);
  1382. drm_free_large(obj->pages);
  1383. obj->pages = NULL;
  1384. return PTR_ERR(page);
  1385. }
  1386. static void
  1387. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1388. {
  1389. int page_count = obj->base.size / PAGE_SIZE;
  1390. int i;
  1391. BUG_ON(obj->madv == __I915_MADV_PURGED);
  1392. if (obj->tiling_mode != I915_TILING_NONE)
  1393. i915_gem_object_save_bit_17_swizzle(obj);
  1394. if (obj->madv == I915_MADV_DONTNEED)
  1395. obj->dirty = 0;
  1396. for (i = 0; i < page_count; i++) {
  1397. if (obj->dirty)
  1398. set_page_dirty(obj->pages[i]);
  1399. if (obj->madv == I915_MADV_WILLNEED)
  1400. mark_page_accessed(obj->pages[i]);
  1401. page_cache_release(obj->pages[i]);
  1402. }
  1403. obj->dirty = 0;
  1404. drm_free_large(obj->pages);
  1405. obj->pages = NULL;
  1406. }
  1407. static uint32_t
  1408. i915_gem_next_request_seqno(struct drm_device *dev,
  1409. struct intel_ring_buffer *ring)
  1410. {
  1411. drm_i915_private_t *dev_priv = dev->dev_private;
  1412. return ring->outstanding_lazy_request = dev_priv->next_seqno;
  1413. }
  1414. static void
  1415. i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1416. struct intel_ring_buffer *ring)
  1417. {
  1418. struct drm_device *dev = obj->base.dev;
  1419. struct drm_i915_private *dev_priv = dev->dev_private;
  1420. uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
  1421. BUG_ON(ring == NULL);
  1422. obj->ring = ring;
  1423. /* Add a reference if we're newly entering the active list. */
  1424. if (!obj->active) {
  1425. drm_gem_object_reference(&obj->base);
  1426. obj->active = 1;
  1427. }
  1428. /* Move from whatever list we were on to the tail of execution. */
  1429. list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
  1430. list_move_tail(&obj->ring_list, &ring->active_list);
  1431. obj->last_rendering_seqno = seqno;
  1432. if (obj->fenced_gpu_access) {
  1433. struct drm_i915_fence_reg *reg;
  1434. BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
  1435. obj->last_fenced_seqno = seqno;
  1436. obj->last_fenced_ring = ring;
  1437. reg = &dev_priv->fence_regs[obj->fence_reg];
  1438. list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  1439. }
  1440. }
  1441. static void
  1442. i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
  1443. {
  1444. list_del_init(&obj->ring_list);
  1445. obj->last_rendering_seqno = 0;
  1446. obj->last_fenced_seqno = 0;
  1447. }
  1448. static void
  1449. i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
  1450. {
  1451. struct drm_device *dev = obj->base.dev;
  1452. drm_i915_private_t *dev_priv = dev->dev_private;
  1453. BUG_ON(!obj->active);
  1454. list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
  1455. i915_gem_object_move_off_active(obj);
  1456. }
  1457. static void
  1458. i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  1459. {
  1460. struct drm_device *dev = obj->base.dev;
  1461. struct drm_i915_private *dev_priv = dev->dev_private;
  1462. if (obj->pin_count != 0)
  1463. list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
  1464. else
  1465. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  1466. BUG_ON(!list_empty(&obj->gpu_write_list));
  1467. BUG_ON(!obj->active);
  1468. obj->ring = NULL;
  1469. i915_gem_object_move_off_active(obj);
  1470. obj->fenced_gpu_access = false;
  1471. obj->last_fenced_ring = NULL;
  1472. obj->active = 0;
  1473. drm_gem_object_unreference(&obj->base);
  1474. WARN_ON(i915_verify_lists(dev));
  1475. }
  1476. /* Immediately discard the backing storage */
  1477. static void
  1478. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1479. {
  1480. struct inode *inode;
  1481. /* Our goal here is to return as much of the memory as
  1482. * is possible back to the system as we are called from OOM.
  1483. * To do this we must instruct the shmfs to drop all of its
  1484. * backing pages, *now*. Here we mirror the actions taken
  1485. * when by shmem_delete_inode() to release the backing store.
  1486. */
  1487. inode = obj->base.filp->f_path.dentry->d_inode;
  1488. truncate_inode_pages(inode->i_mapping, 0);
  1489. if (inode->i_op->truncate_range)
  1490. inode->i_op->truncate_range(inode, 0, (loff_t)-1);
  1491. obj->madv = __I915_MADV_PURGED;
  1492. }
  1493. static inline int
  1494. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
  1495. {
  1496. return obj->madv == I915_MADV_DONTNEED;
  1497. }
  1498. static void
  1499. i915_gem_process_flushing_list(struct drm_device *dev,
  1500. uint32_t flush_domains,
  1501. struct intel_ring_buffer *ring)
  1502. {
  1503. struct drm_i915_gem_object *obj, *next;
  1504. list_for_each_entry_safe(obj, next,
  1505. &ring->gpu_write_list,
  1506. gpu_write_list) {
  1507. if (obj->base.write_domain & flush_domains) {
  1508. uint32_t old_write_domain = obj->base.write_domain;
  1509. obj->base.write_domain = 0;
  1510. list_del_init(&obj->gpu_write_list);
  1511. i915_gem_object_move_to_active(obj, ring);
  1512. trace_i915_gem_object_change_domain(obj,
  1513. obj->base.read_domains,
  1514. old_write_domain);
  1515. }
  1516. }
  1517. }
  1518. int
  1519. i915_add_request(struct drm_device *dev,
  1520. struct drm_file *file,
  1521. struct drm_i915_gem_request *request,
  1522. struct intel_ring_buffer *ring)
  1523. {
  1524. drm_i915_private_t *dev_priv = dev->dev_private;
  1525. struct drm_i915_file_private *file_priv = NULL;
  1526. uint32_t seqno;
  1527. int was_empty;
  1528. int ret;
  1529. BUG_ON(request == NULL);
  1530. if (file != NULL)
  1531. file_priv = file->driver_priv;
  1532. ret = ring->add_request(ring, &seqno);
  1533. if (ret)
  1534. return ret;
  1535. ring->outstanding_lazy_request = false;
  1536. request->seqno = seqno;
  1537. request->ring = ring;
  1538. request->emitted_jiffies = jiffies;
  1539. was_empty = list_empty(&ring->request_list);
  1540. list_add_tail(&request->list, &ring->request_list);
  1541. if (file_priv) {
  1542. spin_lock(&file_priv->mm.lock);
  1543. request->file_priv = file_priv;
  1544. list_add_tail(&request->client_list,
  1545. &file_priv->mm.request_list);
  1546. spin_unlock(&file_priv->mm.lock);
  1547. }
  1548. if (!dev_priv->mm.suspended) {
  1549. mod_timer(&dev_priv->hangcheck_timer,
  1550. jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
  1551. if (was_empty)
  1552. queue_delayed_work(dev_priv->wq,
  1553. &dev_priv->mm.retire_work, HZ);
  1554. }
  1555. return 0;
  1556. }
  1557. /**
  1558. * Command execution barrier
  1559. *
  1560. * Ensures that all commands in the ring are finished
  1561. * before signalling the CPU
  1562. */
  1563. static void
  1564. i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
  1565. {
  1566. uint32_t flush_domains = 0;
  1567. /* The sampler always gets flushed on i965 (sigh) */
  1568. if (INTEL_INFO(dev)->gen >= 4)
  1569. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1570. ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
  1571. }
  1572. static inline void
  1573. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1574. {
  1575. struct drm_i915_file_private *file_priv = request->file_priv;
  1576. if (!file_priv)
  1577. return;
  1578. spin_lock(&file_priv->mm.lock);
  1579. list_del(&request->client_list);
  1580. request->file_priv = NULL;
  1581. spin_unlock(&file_priv->mm.lock);
  1582. }
  1583. static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
  1584. struct intel_ring_buffer *ring)
  1585. {
  1586. while (!list_empty(&ring->request_list)) {
  1587. struct drm_i915_gem_request *request;
  1588. request = list_first_entry(&ring->request_list,
  1589. struct drm_i915_gem_request,
  1590. list);
  1591. list_del(&request->list);
  1592. i915_gem_request_remove_from_client(request);
  1593. kfree(request);
  1594. }
  1595. while (!list_empty(&ring->active_list)) {
  1596. struct drm_i915_gem_object *obj;
  1597. obj = list_first_entry(&ring->active_list,
  1598. struct drm_i915_gem_object,
  1599. ring_list);
  1600. obj->base.write_domain = 0;
  1601. list_del_init(&obj->gpu_write_list);
  1602. i915_gem_object_move_to_inactive(obj);
  1603. }
  1604. }
  1605. static void i915_gem_reset_fences(struct drm_device *dev)
  1606. {
  1607. struct drm_i915_private *dev_priv = dev->dev_private;
  1608. int i;
  1609. for (i = 0; i < 16; i++) {
  1610. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  1611. if (reg->obj)
  1612. i915_gem_clear_fence_reg(reg->obj);
  1613. }
  1614. }
  1615. void i915_gem_reset(struct drm_device *dev)
  1616. {
  1617. struct drm_i915_private *dev_priv = dev->dev_private;
  1618. struct drm_i915_gem_object *obj;
  1619. i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
  1620. i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
  1621. i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
  1622. /* Remove anything from the flushing lists. The GPU cache is likely
  1623. * to be lost on reset along with the data, so simply move the
  1624. * lost bo to the inactive list.
  1625. */
  1626. while (!list_empty(&dev_priv->mm.flushing_list)) {
  1627. obj= list_first_entry(&dev_priv->mm.flushing_list,
  1628. struct drm_i915_gem_object,
  1629. mm_list);
  1630. obj->base.write_domain = 0;
  1631. list_del_init(&obj->gpu_write_list);
  1632. i915_gem_object_move_to_inactive(obj);
  1633. }
  1634. /* Move everything out of the GPU domains to ensure we do any
  1635. * necessary invalidation upon reuse.
  1636. */
  1637. list_for_each_entry(obj,
  1638. &dev_priv->mm.inactive_list,
  1639. mm_list)
  1640. {
  1641. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  1642. }
  1643. /* The fence registers are invalidated so clear them out */
  1644. i915_gem_reset_fences(dev);
  1645. }
  1646. /**
  1647. * This function clears the request list as sequence numbers are passed.
  1648. */
  1649. static void
  1650. i915_gem_retire_requests_ring(struct drm_device *dev,
  1651. struct intel_ring_buffer *ring)
  1652. {
  1653. drm_i915_private_t *dev_priv = dev->dev_private;
  1654. uint32_t seqno;
  1655. if (!ring->status_page.page_addr ||
  1656. list_empty(&ring->request_list))
  1657. return;
  1658. WARN_ON(i915_verify_lists(dev));
  1659. seqno = ring->get_seqno(ring);
  1660. while (!list_empty(&ring->request_list)) {
  1661. struct drm_i915_gem_request *request;
  1662. request = list_first_entry(&ring->request_list,
  1663. struct drm_i915_gem_request,
  1664. list);
  1665. if (!i915_seqno_passed(seqno, request->seqno))
  1666. break;
  1667. trace_i915_gem_request_retire(dev, request->seqno);
  1668. list_del(&request->list);
  1669. i915_gem_request_remove_from_client(request);
  1670. kfree(request);
  1671. }
  1672. /* Move any buffers on the active list that are no longer referenced
  1673. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1674. */
  1675. while (!list_empty(&ring->active_list)) {
  1676. struct drm_i915_gem_object *obj;
  1677. obj= list_first_entry(&ring->active_list,
  1678. struct drm_i915_gem_object,
  1679. ring_list);
  1680. if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
  1681. break;
  1682. if (obj->base.write_domain != 0)
  1683. i915_gem_object_move_to_flushing(obj);
  1684. else
  1685. i915_gem_object_move_to_inactive(obj);
  1686. }
  1687. if (unlikely (dev_priv->trace_irq_seqno &&
  1688. i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
  1689. ring->user_irq_put(ring);
  1690. dev_priv->trace_irq_seqno = 0;
  1691. }
  1692. WARN_ON(i915_verify_lists(dev));
  1693. }
  1694. void
  1695. i915_gem_retire_requests(struct drm_device *dev)
  1696. {
  1697. drm_i915_private_t *dev_priv = dev->dev_private;
  1698. if (!list_empty(&dev_priv->mm.deferred_free_list)) {
  1699. struct drm_i915_gem_object *obj, *next;
  1700. /* We must be careful that during unbind() we do not
  1701. * accidentally infinitely recurse into retire requests.
  1702. * Currently:
  1703. * retire -> free -> unbind -> wait -> retire_ring
  1704. */
  1705. list_for_each_entry_safe(obj, next,
  1706. &dev_priv->mm.deferred_free_list,
  1707. mm_list)
  1708. i915_gem_free_object_tail(obj);
  1709. }
  1710. i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
  1711. i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
  1712. i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
  1713. }
  1714. static void
  1715. i915_gem_retire_work_handler(struct work_struct *work)
  1716. {
  1717. drm_i915_private_t *dev_priv;
  1718. struct drm_device *dev;
  1719. dev_priv = container_of(work, drm_i915_private_t,
  1720. mm.retire_work.work);
  1721. dev = dev_priv->dev;
  1722. /* Come back later if the device is busy... */
  1723. if (!mutex_trylock(&dev->struct_mutex)) {
  1724. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1725. return;
  1726. }
  1727. i915_gem_retire_requests(dev);
  1728. if (!dev_priv->mm.suspended &&
  1729. (!list_empty(&dev_priv->render_ring.request_list) ||
  1730. !list_empty(&dev_priv->bsd_ring.request_list) ||
  1731. !list_empty(&dev_priv->blt_ring.request_list)))
  1732. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1733. mutex_unlock(&dev->struct_mutex);
  1734. }
  1735. int
  1736. i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
  1737. bool interruptible, struct intel_ring_buffer *ring)
  1738. {
  1739. drm_i915_private_t *dev_priv = dev->dev_private;
  1740. u32 ier;
  1741. int ret = 0;
  1742. BUG_ON(seqno == 0);
  1743. if (atomic_read(&dev_priv->mm.wedged))
  1744. return -EAGAIN;
  1745. if (seqno == ring->outstanding_lazy_request) {
  1746. struct drm_i915_gem_request *request;
  1747. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1748. if (request == NULL)
  1749. return -ENOMEM;
  1750. ret = i915_add_request(dev, NULL, request, ring);
  1751. if (ret) {
  1752. kfree(request);
  1753. return ret;
  1754. }
  1755. seqno = request->seqno;
  1756. }
  1757. if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
  1758. if (HAS_PCH_SPLIT(dev))
  1759. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1760. else
  1761. ier = I915_READ(IER);
  1762. if (!ier) {
  1763. DRM_ERROR("something (likely vbetool) disabled "
  1764. "interrupts, re-enabling\n");
  1765. i915_driver_irq_preinstall(dev);
  1766. i915_driver_irq_postinstall(dev);
  1767. }
  1768. trace_i915_gem_request_wait_begin(dev, seqno);
  1769. ring->waiting_seqno = seqno;
  1770. ring->user_irq_get(ring);
  1771. if (interruptible)
  1772. ret = wait_event_interruptible(ring->irq_queue,
  1773. i915_seqno_passed(ring->get_seqno(ring), seqno)
  1774. || atomic_read(&dev_priv->mm.wedged));
  1775. else
  1776. wait_event(ring->irq_queue,
  1777. i915_seqno_passed(ring->get_seqno(ring), seqno)
  1778. || atomic_read(&dev_priv->mm.wedged));
  1779. ring->user_irq_put(ring);
  1780. ring->waiting_seqno = 0;
  1781. trace_i915_gem_request_wait_end(dev, seqno);
  1782. }
  1783. if (atomic_read(&dev_priv->mm.wedged))
  1784. ret = -EAGAIN;
  1785. if (ret && ret != -ERESTARTSYS)
  1786. DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
  1787. __func__, ret, seqno, ring->get_seqno(ring),
  1788. dev_priv->next_seqno);
  1789. /* Directly dispatch request retiring. While we have the work queue
  1790. * to handle this, the waiter on a request often wants an associated
  1791. * buffer to have made it to the inactive list, and we would need
  1792. * a separate wait queue to handle that.
  1793. */
  1794. if (ret == 0)
  1795. i915_gem_retire_requests_ring(dev, ring);
  1796. return ret;
  1797. }
  1798. /**
  1799. * Waits for a sequence number to be signaled, and cleans up the
  1800. * request and object lists appropriately for that event.
  1801. */
  1802. static int
  1803. i915_wait_request(struct drm_device *dev, uint32_t seqno,
  1804. struct intel_ring_buffer *ring)
  1805. {
  1806. return i915_do_wait_request(dev, seqno, 1, ring);
  1807. }
  1808. static void
  1809. i915_gem_flush_ring(struct drm_device *dev,
  1810. struct intel_ring_buffer *ring,
  1811. uint32_t invalidate_domains,
  1812. uint32_t flush_domains)
  1813. {
  1814. ring->flush(ring, invalidate_domains, flush_domains);
  1815. i915_gem_process_flushing_list(dev, flush_domains, ring);
  1816. }
  1817. static void
  1818. i915_gem_flush(struct drm_device *dev,
  1819. uint32_t invalidate_domains,
  1820. uint32_t flush_domains,
  1821. uint32_t flush_rings)
  1822. {
  1823. drm_i915_private_t *dev_priv = dev->dev_private;
  1824. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1825. intel_gtt_chipset_flush();
  1826. if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
  1827. if (flush_rings & RING_RENDER)
  1828. i915_gem_flush_ring(dev, &dev_priv->render_ring,
  1829. invalidate_domains, flush_domains);
  1830. if (flush_rings & RING_BSD)
  1831. i915_gem_flush_ring(dev, &dev_priv->bsd_ring,
  1832. invalidate_domains, flush_domains);
  1833. if (flush_rings & RING_BLT)
  1834. i915_gem_flush_ring(dev, &dev_priv->blt_ring,
  1835. invalidate_domains, flush_domains);
  1836. }
  1837. }
  1838. /**
  1839. * Ensures that all rendering to the object has completed and the object is
  1840. * safe to unbind from the GTT or access from the CPU.
  1841. */
  1842. static int
  1843. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  1844. bool interruptible)
  1845. {
  1846. struct drm_device *dev = obj->base.dev;
  1847. int ret;
  1848. /* This function only exists to support waiting for existing rendering,
  1849. * not for emitting required flushes.
  1850. */
  1851. BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1852. /* If there is rendering queued on the buffer being evicted, wait for
  1853. * it.
  1854. */
  1855. if (obj->active) {
  1856. ret = i915_do_wait_request(dev,
  1857. obj->last_rendering_seqno,
  1858. interruptible,
  1859. obj->ring);
  1860. if (ret)
  1861. return ret;
  1862. }
  1863. return 0;
  1864. }
  1865. /**
  1866. * Unbinds an object from the GTT aperture.
  1867. */
  1868. int
  1869. i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  1870. {
  1871. struct drm_device *dev = obj->base.dev;
  1872. struct drm_i915_private *dev_priv = dev->dev_private;
  1873. int ret = 0;
  1874. if (obj->gtt_space == NULL)
  1875. return 0;
  1876. if (obj->pin_count != 0) {
  1877. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1878. return -EINVAL;
  1879. }
  1880. /* blow away mappings if mapped through GTT */
  1881. i915_gem_release_mmap(obj);
  1882. /* Move the object to the CPU domain to ensure that
  1883. * any possible CPU writes while it's not in the GTT
  1884. * are flushed when we go to remap it. This will
  1885. * also ensure that all pending GPU writes are finished
  1886. * before we unbind.
  1887. */
  1888. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1889. if (ret == -ERESTARTSYS)
  1890. return ret;
  1891. /* Continue on if we fail due to EIO, the GPU is hung so we
  1892. * should be safe and we need to cleanup or else we might
  1893. * cause memory corruption through use-after-free.
  1894. */
  1895. if (ret) {
  1896. i915_gem_clflush_object(obj);
  1897. obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1898. }
  1899. /* release the fence reg _after_ flushing */
  1900. if (obj->fence_reg != I915_FENCE_REG_NONE)
  1901. i915_gem_clear_fence_reg(obj);
  1902. i915_gem_gtt_unbind_object(obj);
  1903. i915_gem_object_put_pages_gtt(obj);
  1904. i915_gem_info_remove_gtt(dev_priv, obj);
  1905. list_del_init(&obj->mm_list);
  1906. /* Avoid an unnecessary call to unbind on rebind. */
  1907. obj->map_and_fenceable = true;
  1908. drm_mm_put_block(obj->gtt_space);
  1909. obj->gtt_space = NULL;
  1910. obj->gtt_offset = 0;
  1911. if (i915_gem_object_is_purgeable(obj))
  1912. i915_gem_object_truncate(obj);
  1913. trace_i915_gem_object_unbind(obj);
  1914. return ret;
  1915. }
  1916. static int i915_ring_idle(struct drm_device *dev,
  1917. struct intel_ring_buffer *ring)
  1918. {
  1919. if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
  1920. return 0;
  1921. i915_gem_flush_ring(dev, ring,
  1922. I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1923. return i915_wait_request(dev,
  1924. i915_gem_next_request_seqno(dev, ring),
  1925. ring);
  1926. }
  1927. int
  1928. i915_gpu_idle(struct drm_device *dev)
  1929. {
  1930. drm_i915_private_t *dev_priv = dev->dev_private;
  1931. bool lists_empty;
  1932. int ret;
  1933. lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
  1934. list_empty(&dev_priv->mm.active_list));
  1935. if (lists_empty)
  1936. return 0;
  1937. /* Flush everything onto the inactive list. */
  1938. ret = i915_ring_idle(dev, &dev_priv->render_ring);
  1939. if (ret)
  1940. return ret;
  1941. ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
  1942. if (ret)
  1943. return ret;
  1944. ret = i915_ring_idle(dev, &dev_priv->blt_ring);
  1945. if (ret)
  1946. return ret;
  1947. return 0;
  1948. }
  1949. static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
  1950. struct intel_ring_buffer *pipelined)
  1951. {
  1952. struct drm_device *dev = obj->base.dev;
  1953. drm_i915_private_t *dev_priv = dev->dev_private;
  1954. u32 size = obj->gtt_space->size;
  1955. int regnum = obj->fence_reg;
  1956. uint64_t val;
  1957. val = (uint64_t)((obj->gtt_offset + size - 4096) &
  1958. 0xfffff000) << 32;
  1959. val |= obj->gtt_offset & 0xfffff000;
  1960. val |= (uint64_t)((obj->stride / 128) - 1) <<
  1961. SANDYBRIDGE_FENCE_PITCH_SHIFT;
  1962. if (obj->tiling_mode == I915_TILING_Y)
  1963. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1964. val |= I965_FENCE_REG_VALID;
  1965. if (pipelined) {
  1966. int ret = intel_ring_begin(pipelined, 6);
  1967. if (ret)
  1968. return ret;
  1969. intel_ring_emit(pipelined, MI_NOOP);
  1970. intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
  1971. intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
  1972. intel_ring_emit(pipelined, (u32)val);
  1973. intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
  1974. intel_ring_emit(pipelined, (u32)(val >> 32));
  1975. intel_ring_advance(pipelined);
  1976. } else
  1977. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
  1978. return 0;
  1979. }
  1980. static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
  1981. struct intel_ring_buffer *pipelined)
  1982. {
  1983. struct drm_device *dev = obj->base.dev;
  1984. drm_i915_private_t *dev_priv = dev->dev_private;
  1985. u32 size = obj->gtt_space->size;
  1986. int regnum = obj->fence_reg;
  1987. uint64_t val;
  1988. val = (uint64_t)((obj->gtt_offset + size - 4096) &
  1989. 0xfffff000) << 32;
  1990. val |= obj->gtt_offset & 0xfffff000;
  1991. val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1992. if (obj->tiling_mode == I915_TILING_Y)
  1993. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1994. val |= I965_FENCE_REG_VALID;
  1995. if (pipelined) {
  1996. int ret = intel_ring_begin(pipelined, 6);
  1997. if (ret)
  1998. return ret;
  1999. intel_ring_emit(pipelined, MI_NOOP);
  2000. intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
  2001. intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
  2002. intel_ring_emit(pipelined, (u32)val);
  2003. intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
  2004. intel_ring_emit(pipelined, (u32)(val >> 32));
  2005. intel_ring_advance(pipelined);
  2006. } else
  2007. I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
  2008. return 0;
  2009. }
  2010. static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
  2011. struct intel_ring_buffer *pipelined)
  2012. {
  2013. struct drm_device *dev = obj->base.dev;
  2014. drm_i915_private_t *dev_priv = dev->dev_private;
  2015. u32 size = obj->gtt_space->size;
  2016. u32 fence_reg, val, pitch_val;
  2017. int tile_width;
  2018. if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
  2019. (size & -size) != size ||
  2020. (obj->gtt_offset & (size - 1)),
  2021. "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
  2022. obj->gtt_offset, obj->map_and_fenceable, size))
  2023. return -EINVAL;
  2024. if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
  2025. tile_width = 128;
  2026. else
  2027. tile_width = 512;
  2028. /* Note: pitch better be a power of two tile widths */
  2029. pitch_val = obj->stride / tile_width;
  2030. pitch_val = ffs(pitch_val) - 1;
  2031. val = obj->gtt_offset;
  2032. if (obj->tiling_mode == I915_TILING_Y)
  2033. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2034. val |= I915_FENCE_SIZE_BITS(size);
  2035. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2036. val |= I830_FENCE_REG_VALID;
  2037. fence_reg = obj->fence_reg;
  2038. if (fence_reg < 8)
  2039. fence_reg = FENCE_REG_830_0 + fence_reg * 4;
  2040. else
  2041. fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
  2042. if (pipelined) {
  2043. int ret = intel_ring_begin(pipelined, 4);
  2044. if (ret)
  2045. return ret;
  2046. intel_ring_emit(pipelined, MI_NOOP);
  2047. intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
  2048. intel_ring_emit(pipelined, fence_reg);
  2049. intel_ring_emit(pipelined, val);
  2050. intel_ring_advance(pipelined);
  2051. } else
  2052. I915_WRITE(fence_reg, val);
  2053. return 0;
  2054. }
  2055. static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
  2056. struct intel_ring_buffer *pipelined)
  2057. {
  2058. struct drm_device *dev = obj->base.dev;
  2059. drm_i915_private_t *dev_priv = dev->dev_private;
  2060. u32 size = obj->gtt_space->size;
  2061. int regnum = obj->fence_reg;
  2062. uint32_t val;
  2063. uint32_t pitch_val;
  2064. if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
  2065. (size & -size) != size ||
  2066. (obj->gtt_offset & (size - 1)),
  2067. "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
  2068. obj->gtt_offset, size))
  2069. return -EINVAL;
  2070. pitch_val = obj->stride / 128;
  2071. pitch_val = ffs(pitch_val) - 1;
  2072. val = obj->gtt_offset;
  2073. if (obj->tiling_mode == I915_TILING_Y)
  2074. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2075. val |= I830_FENCE_SIZE_BITS(size);
  2076. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2077. val |= I830_FENCE_REG_VALID;
  2078. if (pipelined) {
  2079. int ret = intel_ring_begin(pipelined, 4);
  2080. if (ret)
  2081. return ret;
  2082. intel_ring_emit(pipelined, MI_NOOP);
  2083. intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
  2084. intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
  2085. intel_ring_emit(pipelined, val);
  2086. intel_ring_advance(pipelined);
  2087. } else
  2088. I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
  2089. return 0;
  2090. }
  2091. static int i915_find_fence_reg(struct drm_device *dev,
  2092. bool interruptible)
  2093. {
  2094. struct drm_i915_private *dev_priv = dev->dev_private;
  2095. struct drm_i915_fence_reg *reg;
  2096. struct drm_i915_gem_object *obj = NULL;
  2097. int i, avail, ret;
  2098. /* First try to find a free reg */
  2099. avail = 0;
  2100. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2101. reg = &dev_priv->fence_regs[i];
  2102. if (!reg->obj)
  2103. return i;
  2104. if (!reg->obj->pin_count)
  2105. avail++;
  2106. }
  2107. if (avail == 0)
  2108. return -ENOSPC;
  2109. /* None available, try to steal one or wait for a user to finish */
  2110. avail = I915_FENCE_REG_NONE;
  2111. list_for_each_entry(reg, &dev_priv->mm.fence_list,
  2112. lru_list) {
  2113. obj = reg->obj;
  2114. if (obj->pin_count)
  2115. continue;
  2116. /* found one! */
  2117. avail = obj->fence_reg;
  2118. break;
  2119. }
  2120. BUG_ON(avail == I915_FENCE_REG_NONE);
  2121. /* We only have a reference on obj from the active list. put_fence_reg
  2122. * might drop that one, causing a use-after-free in it. So hold a
  2123. * private reference to obj like the other callers of put_fence_reg
  2124. * (set_tiling ioctl) do. */
  2125. drm_gem_object_reference(&obj->base);
  2126. ret = i915_gem_object_put_fence_reg(obj, interruptible);
  2127. drm_gem_object_unreference(&obj->base);
  2128. if (ret != 0)
  2129. return ret;
  2130. return avail;
  2131. }
  2132. /**
  2133. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  2134. * @obj: object to map through a fence reg
  2135. *
  2136. * When mapping objects through the GTT, userspace wants to be able to write
  2137. * to them without having to worry about swizzling if the object is tiled.
  2138. *
  2139. * This function walks the fence regs looking for a free one for @obj,
  2140. * stealing one if it can't find any.
  2141. *
  2142. * It then sets up the reg based on the object's properties: address, pitch
  2143. * and tiling format.
  2144. */
  2145. int
  2146. i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
  2147. bool interruptible)
  2148. {
  2149. struct drm_device *dev = obj->base.dev;
  2150. struct drm_i915_private *dev_priv = dev->dev_private;
  2151. struct drm_i915_fence_reg *reg = NULL;
  2152. struct intel_ring_buffer *pipelined = NULL;
  2153. int ret;
  2154. /* Just update our place in the LRU if our fence is getting used. */
  2155. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  2156. reg = &dev_priv->fence_regs[obj->fence_reg];
  2157. list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  2158. return 0;
  2159. }
  2160. switch (obj->tiling_mode) {
  2161. case I915_TILING_NONE:
  2162. WARN(1, "allocating a fence for non-tiled object?\n");
  2163. break;
  2164. case I915_TILING_X:
  2165. if (!obj->stride)
  2166. return -EINVAL;
  2167. WARN((obj->stride & (512 - 1)),
  2168. "object 0x%08x is X tiled but has non-512B pitch\n",
  2169. obj->gtt_offset);
  2170. break;
  2171. case I915_TILING_Y:
  2172. if (!obj->stride)
  2173. return -EINVAL;
  2174. WARN((obj->stride & (128 - 1)),
  2175. "object 0x%08x is Y tiled but has non-128B pitch\n",
  2176. obj->gtt_offset);
  2177. break;
  2178. }
  2179. ret = i915_find_fence_reg(dev, interruptible);
  2180. if (ret < 0)
  2181. return ret;
  2182. obj->fence_reg = ret;
  2183. reg = &dev_priv->fence_regs[obj->fence_reg];
  2184. list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  2185. reg->obj = obj;
  2186. switch (INTEL_INFO(dev)->gen) {
  2187. case 6:
  2188. ret = sandybridge_write_fence_reg(obj, pipelined);
  2189. break;
  2190. case 5:
  2191. case 4:
  2192. ret = i965_write_fence_reg(obj, pipelined);
  2193. break;
  2194. case 3:
  2195. ret = i915_write_fence_reg(obj, pipelined);
  2196. break;
  2197. case 2:
  2198. ret = i830_write_fence_reg(obj, pipelined);
  2199. break;
  2200. }
  2201. trace_i915_gem_object_get_fence(obj,
  2202. obj->fence_reg,
  2203. obj->tiling_mode);
  2204. return ret;
  2205. }
  2206. /**
  2207. * i915_gem_clear_fence_reg - clear out fence register info
  2208. * @obj: object to clear
  2209. *
  2210. * Zeroes out the fence register itself and clears out the associated
  2211. * data structures in dev_priv and obj.
  2212. */
  2213. static void
  2214. i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj)
  2215. {
  2216. struct drm_device *dev = obj->base.dev;
  2217. drm_i915_private_t *dev_priv = dev->dev_private;
  2218. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg];
  2219. uint32_t fence_reg;
  2220. switch (INTEL_INFO(dev)->gen) {
  2221. case 6:
  2222. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
  2223. (obj->fence_reg * 8), 0);
  2224. break;
  2225. case 5:
  2226. case 4:
  2227. I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0);
  2228. break;
  2229. case 3:
  2230. if (obj->fence_reg >= 8)
  2231. fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4;
  2232. else
  2233. case 2:
  2234. fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4;
  2235. I915_WRITE(fence_reg, 0);
  2236. break;
  2237. }
  2238. reg->obj = NULL;
  2239. obj->fence_reg = I915_FENCE_REG_NONE;
  2240. list_del_init(&reg->lru_list);
  2241. }
  2242. /**
  2243. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2244. * to the buffer to finish, and then resets the fence register.
  2245. * @obj: tiled object holding a fence register.
  2246. * @bool: whether the wait upon the fence is interruptible
  2247. *
  2248. * Zeroes out the fence register itself and clears out the associated
  2249. * data structures in dev_priv and obj.
  2250. */
  2251. int
  2252. i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
  2253. bool interruptible)
  2254. {
  2255. struct drm_device *dev = obj->base.dev;
  2256. int ret;
  2257. if (obj->fence_reg == I915_FENCE_REG_NONE)
  2258. return 0;
  2259. /* If we've changed tiling, GTT-mappings of the object
  2260. * need to re-fault to ensure that the correct fence register
  2261. * setup is in place.
  2262. */
  2263. i915_gem_release_mmap(obj);
  2264. /* On the i915, GPU access to tiled buffers is via a fence,
  2265. * therefore we must wait for any outstanding access to complete
  2266. * before clearing the fence.
  2267. */
  2268. if (obj->fenced_gpu_access) {
  2269. ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
  2270. if (ret)
  2271. return ret;
  2272. obj->fenced_gpu_access = false;
  2273. }
  2274. if (obj->last_fenced_seqno) {
  2275. ret = i915_do_wait_request(dev,
  2276. obj->last_fenced_seqno,
  2277. interruptible,
  2278. obj->last_fenced_ring);
  2279. if (ret)
  2280. return ret;
  2281. obj->last_fenced_seqno = false;
  2282. }
  2283. i915_gem_object_flush_gtt_write_domain(obj);
  2284. i915_gem_clear_fence_reg(obj);
  2285. return 0;
  2286. }
  2287. /**
  2288. * Finds free space in the GTT aperture and binds the object there.
  2289. */
  2290. static int
  2291. i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  2292. unsigned alignment,
  2293. bool map_and_fenceable)
  2294. {
  2295. struct drm_device *dev = obj->base.dev;
  2296. drm_i915_private_t *dev_priv = dev->dev_private;
  2297. struct drm_mm_node *free_space;
  2298. gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
  2299. u32 size, fence_size, fence_alignment, unfenced_alignment;
  2300. bool mappable, fenceable;
  2301. int ret;
  2302. if (obj->madv != I915_MADV_WILLNEED) {
  2303. DRM_ERROR("Attempting to bind a purgeable object\n");
  2304. return -EINVAL;
  2305. }
  2306. fence_size = i915_gem_get_gtt_size(obj);
  2307. fence_alignment = i915_gem_get_gtt_alignment(obj);
  2308. unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
  2309. if (alignment == 0)
  2310. alignment = map_and_fenceable ? fence_alignment :
  2311. unfenced_alignment;
  2312. if (map_and_fenceable && alignment & (fence_alignment - 1)) {
  2313. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2314. return -EINVAL;
  2315. }
  2316. size = map_and_fenceable ? fence_size : obj->base.size;
  2317. /* If the object is bigger than the entire aperture, reject it early
  2318. * before evicting everything in a vain attempt to find space.
  2319. */
  2320. if (obj->base.size >
  2321. (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
  2322. DRM_ERROR("Attempting to bind an object larger than the aperture\n");
  2323. return -E2BIG;
  2324. }
  2325. search_free:
  2326. if (map_and_fenceable)
  2327. free_space =
  2328. drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
  2329. size, alignment, 0,
  2330. dev_priv->mm.gtt_mappable_end,
  2331. 0);
  2332. else
  2333. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2334. size, alignment, 0);
  2335. if (free_space != NULL) {
  2336. if (map_and_fenceable)
  2337. obj->gtt_space =
  2338. drm_mm_get_block_range_generic(free_space,
  2339. size, alignment, 0,
  2340. dev_priv->mm.gtt_mappable_end,
  2341. 0);
  2342. else
  2343. obj->gtt_space =
  2344. drm_mm_get_block(free_space, size, alignment);
  2345. }
  2346. if (obj->gtt_space == NULL) {
  2347. /* If the gtt is empty and we're still having trouble
  2348. * fitting our object in, we're out of memory.
  2349. */
  2350. ret = i915_gem_evict_something(dev, size, alignment,
  2351. map_and_fenceable);
  2352. if (ret)
  2353. return ret;
  2354. goto search_free;
  2355. }
  2356. ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
  2357. if (ret) {
  2358. drm_mm_put_block(obj->gtt_space);
  2359. obj->gtt_space = NULL;
  2360. if (ret == -ENOMEM) {
  2361. /* first try to clear up some space from the GTT */
  2362. ret = i915_gem_evict_something(dev, size,
  2363. alignment,
  2364. map_and_fenceable);
  2365. if (ret) {
  2366. /* now try to shrink everyone else */
  2367. if (gfpmask) {
  2368. gfpmask = 0;
  2369. goto search_free;
  2370. }
  2371. return ret;
  2372. }
  2373. goto search_free;
  2374. }
  2375. return ret;
  2376. }
  2377. ret = i915_gem_gtt_bind_object(obj);
  2378. if (ret) {
  2379. i915_gem_object_put_pages_gtt(obj);
  2380. drm_mm_put_block(obj->gtt_space);
  2381. obj->gtt_space = NULL;
  2382. ret = i915_gem_evict_something(dev, size,
  2383. alignment, map_and_fenceable);
  2384. if (ret)
  2385. return ret;
  2386. goto search_free;
  2387. }
  2388. obj->gtt_offset = obj->gtt_space->start;
  2389. /* keep track of bounds object by adding it to the inactive list */
  2390. list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  2391. i915_gem_info_add_gtt(dev_priv, obj);
  2392. /* Assert that the object is not currently in any GPU domain. As it
  2393. * wasn't in the GTT, there shouldn't be any way it could have been in
  2394. * a GPU cache
  2395. */
  2396. BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  2397. BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  2398. trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
  2399. fenceable =
  2400. obj->gtt_space->size == fence_size &&
  2401. (obj->gtt_space->start & (fence_alignment -1)) == 0;
  2402. mappable =
  2403. obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
  2404. obj->map_and_fenceable = mappable && fenceable;
  2405. return 0;
  2406. }
  2407. void
  2408. i915_gem_clflush_object(struct drm_i915_gem_object *obj)
  2409. {
  2410. /* If we don't have a page list set up, then we're not pinned
  2411. * to GPU, and we can ignore the cache flush because it'll happen
  2412. * again at bind time.
  2413. */
  2414. if (obj->pages == NULL)
  2415. return;
  2416. trace_i915_gem_object_clflush(obj);
  2417. drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
  2418. }
  2419. /** Flushes any GPU write domain for the object if it's dirty. */
  2420. static int
  2421. i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
  2422. struct intel_ring_buffer *pipelined)
  2423. {
  2424. struct drm_device *dev = obj->base.dev;
  2425. if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2426. return 0;
  2427. /* Queue the GPU write cache flushing we need. */
  2428. i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
  2429. BUG_ON(obj->base.write_domain);
  2430. if (pipelined && pipelined == obj->ring)
  2431. return 0;
  2432. return i915_gem_object_wait_rendering(obj, true);
  2433. }
  2434. /** Flushes the GTT write domain for the object if it's dirty. */
  2435. static void
  2436. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  2437. {
  2438. uint32_t old_write_domain;
  2439. if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  2440. return;
  2441. /* No actual flushing is required for the GTT write domain. Writes
  2442. * to it immediately go to main memory as far as we know, so there's
  2443. * no chipset flush. It also doesn't land in render cache.
  2444. */
  2445. i915_gem_release_mmap(obj);
  2446. old_write_domain = obj->base.write_domain;
  2447. obj->base.write_domain = 0;
  2448. trace_i915_gem_object_change_domain(obj,
  2449. obj->base.read_domains,
  2450. old_write_domain);
  2451. }
  2452. /** Flushes the CPU write domain for the object if it's dirty. */
  2453. static void
  2454. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
  2455. {
  2456. uint32_t old_write_domain;
  2457. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  2458. return;
  2459. i915_gem_clflush_object(obj);
  2460. intel_gtt_chipset_flush();
  2461. old_write_domain = obj->base.write_domain;
  2462. obj->base.write_domain = 0;
  2463. trace_i915_gem_object_change_domain(obj,
  2464. obj->base.read_domains,
  2465. old_write_domain);
  2466. }
  2467. /**
  2468. * Moves a single object to the GTT read, and possibly write domain.
  2469. *
  2470. * This function returns when the move is complete, including waiting on
  2471. * flushes to occur.
  2472. */
  2473. int
  2474. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  2475. {
  2476. uint32_t old_write_domain, old_read_domains;
  2477. int ret;
  2478. /* Not valid to be called on unbound objects. */
  2479. if (obj->gtt_space == NULL)
  2480. return -EINVAL;
  2481. ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
  2482. if (ret != 0)
  2483. return ret;
  2484. i915_gem_object_flush_cpu_write_domain(obj);
  2485. if (write) {
  2486. ret = i915_gem_object_wait_rendering(obj, true);
  2487. if (ret)
  2488. return ret;
  2489. }
  2490. old_write_domain = obj->base.write_domain;
  2491. old_read_domains = obj->base.read_domains;
  2492. /* It should now be out of any other write domains, and we can update
  2493. * the domain values for our changes.
  2494. */
  2495. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2496. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2497. if (write) {
  2498. obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  2499. obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  2500. obj->dirty = 1;
  2501. }
  2502. trace_i915_gem_object_change_domain(obj,
  2503. old_read_domains,
  2504. old_write_domain);
  2505. return 0;
  2506. }
  2507. /*
  2508. * Prepare buffer for display plane. Use uninterruptible for possible flush
  2509. * wait, as in modesetting process we're not supposed to be interrupted.
  2510. */
  2511. int
  2512. i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
  2513. struct intel_ring_buffer *pipelined)
  2514. {
  2515. uint32_t old_read_domains;
  2516. int ret;
  2517. /* Not valid to be called on unbound objects. */
  2518. if (obj->gtt_space == NULL)
  2519. return -EINVAL;
  2520. ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
  2521. if (ret)
  2522. return ret;
  2523. /* Currently, we are always called from an non-interruptible context. */
  2524. if (!pipelined) {
  2525. ret = i915_gem_object_wait_rendering(obj, false);
  2526. if (ret)
  2527. return ret;
  2528. }
  2529. i915_gem_object_flush_cpu_write_domain(obj);
  2530. old_read_domains = obj->base.read_domains;
  2531. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2532. trace_i915_gem_object_change_domain(obj,
  2533. old_read_domains,
  2534. obj->base.write_domain);
  2535. return 0;
  2536. }
  2537. int
  2538. i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
  2539. bool interruptible)
  2540. {
  2541. if (!obj->active)
  2542. return 0;
  2543. if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
  2544. i915_gem_flush_ring(obj->base.dev, obj->ring,
  2545. 0, obj->base.write_domain);
  2546. return i915_gem_object_wait_rendering(obj, interruptible);
  2547. }
  2548. /**
  2549. * Moves a single object to the CPU read, and possibly write domain.
  2550. *
  2551. * This function returns when the move is complete, including waiting on
  2552. * flushes to occur.
  2553. */
  2554. static int
  2555. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  2556. {
  2557. uint32_t old_write_domain, old_read_domains;
  2558. int ret;
  2559. ret = i915_gem_object_flush_gpu_write_domain(obj, false);
  2560. if (ret != 0)
  2561. return ret;
  2562. i915_gem_object_flush_gtt_write_domain(obj);
  2563. /* If we have a partially-valid cache of the object in the CPU,
  2564. * finish invalidating it and free the per-page flags.
  2565. */
  2566. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2567. if (write) {
  2568. ret = i915_gem_object_wait_rendering(obj, true);
  2569. if (ret)
  2570. return ret;
  2571. }
  2572. old_write_domain = obj->base.write_domain;
  2573. old_read_domains = obj->base.read_domains;
  2574. /* Flush the CPU cache if it's still invalid. */
  2575. if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2576. i915_gem_clflush_object(obj);
  2577. obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  2578. }
  2579. /* It should now be out of any other write domains, and we can update
  2580. * the domain values for our changes.
  2581. */
  2582. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2583. /* If we're writing through the CPU, then the GPU read domains will
  2584. * need to be invalidated at next use.
  2585. */
  2586. if (write) {
  2587. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2588. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2589. }
  2590. trace_i915_gem_object_change_domain(obj,
  2591. old_read_domains,
  2592. old_write_domain);
  2593. return 0;
  2594. }
  2595. /*
  2596. * Set the next domain for the specified object. This
  2597. * may not actually perform the necessary flushing/invaliding though,
  2598. * as that may want to be batched with other set_domain operations
  2599. *
  2600. * This is (we hope) the only really tricky part of gem. The goal
  2601. * is fairly simple -- track which caches hold bits of the object
  2602. * and make sure they remain coherent. A few concrete examples may
  2603. * help to explain how it works. For shorthand, we use the notation
  2604. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2605. * a pair of read and write domain masks.
  2606. *
  2607. * Case 1: the batch buffer
  2608. *
  2609. * 1. Allocated
  2610. * 2. Written by CPU
  2611. * 3. Mapped to GTT
  2612. * 4. Read by GPU
  2613. * 5. Unmapped from GTT
  2614. * 6. Freed
  2615. *
  2616. * Let's take these a step at a time
  2617. *
  2618. * 1. Allocated
  2619. * Pages allocated from the kernel may still have
  2620. * cache contents, so we set them to (CPU, CPU) always.
  2621. * 2. Written by CPU (using pwrite)
  2622. * The pwrite function calls set_domain (CPU, CPU) and
  2623. * this function does nothing (as nothing changes)
  2624. * 3. Mapped by GTT
  2625. * This function asserts that the object is not
  2626. * currently in any GPU-based read or write domains
  2627. * 4. Read by GPU
  2628. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2629. * As write_domain is zero, this function adds in the
  2630. * current read domains (CPU+COMMAND, 0).
  2631. * flush_domains is set to CPU.
  2632. * invalidate_domains is set to COMMAND
  2633. * clflush is run to get data out of the CPU caches
  2634. * then i915_dev_set_domain calls i915_gem_flush to
  2635. * emit an MI_FLUSH and drm_agp_chipset_flush
  2636. * 5. Unmapped from GTT
  2637. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2638. * flush_domains and invalidate_domains end up both zero
  2639. * so no flushing/invalidating happens
  2640. * 6. Freed
  2641. * yay, done
  2642. *
  2643. * Case 2: The shared render buffer
  2644. *
  2645. * 1. Allocated
  2646. * 2. Mapped to GTT
  2647. * 3. Read/written by GPU
  2648. * 4. set_domain to (CPU,CPU)
  2649. * 5. Read/written by CPU
  2650. * 6. Read/written by GPU
  2651. *
  2652. * 1. Allocated
  2653. * Same as last example, (CPU, CPU)
  2654. * 2. Mapped to GTT
  2655. * Nothing changes (assertions find that it is not in the GPU)
  2656. * 3. Read/written by GPU
  2657. * execbuffer calls set_domain (RENDER, RENDER)
  2658. * flush_domains gets CPU
  2659. * invalidate_domains gets GPU
  2660. * clflush (obj)
  2661. * MI_FLUSH and drm_agp_chipset_flush
  2662. * 4. set_domain (CPU, CPU)
  2663. * flush_domains gets GPU
  2664. * invalidate_domains gets CPU
  2665. * wait_rendering (obj) to make sure all drawing is complete.
  2666. * This will include an MI_FLUSH to get the data from GPU
  2667. * to memory
  2668. * clflush (obj) to invalidate the CPU cache
  2669. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2670. * 5. Read/written by CPU
  2671. * cache lines are loaded and dirtied
  2672. * 6. Read written by GPU
  2673. * Same as last GPU access
  2674. *
  2675. * Case 3: The constant buffer
  2676. *
  2677. * 1. Allocated
  2678. * 2. Written by CPU
  2679. * 3. Read by GPU
  2680. * 4. Updated (written) by CPU again
  2681. * 5. Read by GPU
  2682. *
  2683. * 1. Allocated
  2684. * (CPU, CPU)
  2685. * 2. Written by CPU
  2686. * (CPU, CPU)
  2687. * 3. Read by GPU
  2688. * (CPU+RENDER, 0)
  2689. * flush_domains = CPU
  2690. * invalidate_domains = RENDER
  2691. * clflush (obj)
  2692. * MI_FLUSH
  2693. * drm_agp_chipset_flush
  2694. * 4. Updated (written) by CPU again
  2695. * (CPU, CPU)
  2696. * flush_domains = 0 (no previous write domain)
  2697. * invalidate_domains = 0 (no new read domains)
  2698. * 5. Read by GPU
  2699. * (CPU+RENDER, 0)
  2700. * flush_domains = CPU
  2701. * invalidate_domains = RENDER
  2702. * clflush (obj)
  2703. * MI_FLUSH
  2704. * drm_agp_chipset_flush
  2705. */
  2706. static void
  2707. i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
  2708. struct intel_ring_buffer *ring,
  2709. struct change_domains *cd)
  2710. {
  2711. uint32_t invalidate_domains = 0, flush_domains = 0;
  2712. /*
  2713. * If the object isn't moving to a new write domain,
  2714. * let the object stay in multiple read domains
  2715. */
  2716. if (obj->base.pending_write_domain == 0)
  2717. obj->base.pending_read_domains |= obj->base.read_domains;
  2718. /*
  2719. * Flush the current write domain if
  2720. * the new read domains don't match. Invalidate
  2721. * any read domains which differ from the old
  2722. * write domain
  2723. */
  2724. if (obj->base.write_domain &&
  2725. (((obj->base.write_domain != obj->base.pending_read_domains ||
  2726. obj->ring != ring)) ||
  2727. (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
  2728. flush_domains |= obj->base.write_domain;
  2729. invalidate_domains |=
  2730. obj->base.pending_read_domains & ~obj->base.write_domain;
  2731. }
  2732. /*
  2733. * Invalidate any read caches which may have
  2734. * stale data. That is, any new read domains.
  2735. */
  2736. invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
  2737. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
  2738. i915_gem_clflush_object(obj);
  2739. /* blow away mappings if mapped through GTT */
  2740. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
  2741. i915_gem_release_mmap(obj);
  2742. /* The actual obj->write_domain will be updated with
  2743. * pending_write_domain after we emit the accumulated flush for all
  2744. * of our domain changes in execbuffers (which clears objects'
  2745. * write_domains). So if we have a current write domain that we
  2746. * aren't changing, set pending_write_domain to that.
  2747. */
  2748. if (flush_domains == 0 && obj->base.pending_write_domain == 0)
  2749. obj->base.pending_write_domain = obj->base.write_domain;
  2750. cd->invalidate_domains |= invalidate_domains;
  2751. cd->flush_domains |= flush_domains;
  2752. if (flush_domains & I915_GEM_GPU_DOMAINS)
  2753. cd->flush_rings |= obj->ring->id;
  2754. if (invalidate_domains & I915_GEM_GPU_DOMAINS)
  2755. cd->flush_rings |= ring->id;
  2756. }
  2757. /**
  2758. * Moves the object from a partially CPU read to a full one.
  2759. *
  2760. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2761. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2762. */
  2763. static void
  2764. i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
  2765. {
  2766. if (!obj->page_cpu_valid)
  2767. return;
  2768. /* If we're partially in the CPU read domain, finish moving it in.
  2769. */
  2770. if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
  2771. int i;
  2772. for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
  2773. if (obj->page_cpu_valid[i])
  2774. continue;
  2775. drm_clflush_pages(obj->pages + i, 1);
  2776. }
  2777. }
  2778. /* Free the page_cpu_valid mappings which are now stale, whether
  2779. * or not we've got I915_GEM_DOMAIN_CPU.
  2780. */
  2781. kfree(obj->page_cpu_valid);
  2782. obj->page_cpu_valid = NULL;
  2783. }
  2784. /**
  2785. * Set the CPU read domain on a range of the object.
  2786. *
  2787. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2788. * not entirely valid. The page_cpu_valid member of the object flags which
  2789. * pages have been flushed, and will be respected by
  2790. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2791. * of the whole object.
  2792. *
  2793. * This function returns when the move is complete, including waiting on
  2794. * flushes to occur.
  2795. */
  2796. static int
  2797. i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
  2798. uint64_t offset, uint64_t size)
  2799. {
  2800. uint32_t old_read_domains;
  2801. int i, ret;
  2802. if (offset == 0 && size == obj->base.size)
  2803. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2804. ret = i915_gem_object_flush_gpu_write_domain(obj, false);
  2805. if (ret != 0)
  2806. return ret;
  2807. i915_gem_object_flush_gtt_write_domain(obj);
  2808. /* If we're already fully in the CPU read domain, we're done. */
  2809. if (obj->page_cpu_valid == NULL &&
  2810. (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2811. return 0;
  2812. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2813. * newly adding I915_GEM_DOMAIN_CPU
  2814. */
  2815. if (obj->page_cpu_valid == NULL) {
  2816. obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
  2817. GFP_KERNEL);
  2818. if (obj->page_cpu_valid == NULL)
  2819. return -ENOMEM;
  2820. } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2821. memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
  2822. /* Flush the cache on any pages that are still invalid from the CPU's
  2823. * perspective.
  2824. */
  2825. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2826. i++) {
  2827. if (obj->page_cpu_valid[i])
  2828. continue;
  2829. drm_clflush_pages(obj->pages + i, 1);
  2830. obj->page_cpu_valid[i] = 1;
  2831. }
  2832. /* It should now be out of any other write domains, and we can update
  2833. * the domain values for our changes.
  2834. */
  2835. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2836. old_read_domains = obj->base.read_domains;
  2837. obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  2838. trace_i915_gem_object_change_domain(obj,
  2839. old_read_domains,
  2840. obj->base.write_domain);
  2841. return 0;
  2842. }
  2843. static int
  2844. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  2845. struct drm_file *file_priv,
  2846. struct drm_i915_gem_exec_object2 *entry,
  2847. struct drm_i915_gem_relocation_entry *reloc)
  2848. {
  2849. struct drm_device *dev = obj->base.dev;
  2850. struct drm_gem_object *target_obj;
  2851. uint32_t target_offset;
  2852. int ret = -EINVAL;
  2853. target_obj = drm_gem_object_lookup(dev, file_priv,
  2854. reloc->target_handle);
  2855. if (target_obj == NULL)
  2856. return -ENOENT;
  2857. target_offset = to_intel_bo(target_obj)->gtt_offset;
  2858. #if WATCH_RELOC
  2859. DRM_INFO("%s: obj %p offset %08x target %d "
  2860. "read %08x write %08x gtt %08x "
  2861. "presumed %08x delta %08x\n",
  2862. __func__,
  2863. obj,
  2864. (int) reloc->offset,
  2865. (int) reloc->target_handle,
  2866. (int) reloc->read_domains,
  2867. (int) reloc->write_domain,
  2868. (int) target_offset,
  2869. (int) reloc->presumed_offset,
  2870. reloc->delta);
  2871. #endif
  2872. /* The target buffer should have appeared before us in the
  2873. * exec_object list, so it should have a GTT space bound by now.
  2874. */
  2875. if (target_offset == 0) {
  2876. DRM_ERROR("No GTT space found for object %d\n",
  2877. reloc->target_handle);
  2878. goto err;
  2879. }
  2880. /* Validate that the target is in a valid r/w GPU domain */
  2881. if (reloc->write_domain & (reloc->write_domain - 1)) {
  2882. DRM_ERROR("reloc with multiple write domains: "
  2883. "obj %p target %d offset %d "
  2884. "read %08x write %08x",
  2885. obj, reloc->target_handle,
  2886. (int) reloc->offset,
  2887. reloc->read_domains,
  2888. reloc->write_domain);
  2889. goto err;
  2890. }
  2891. if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
  2892. reloc->read_domains & I915_GEM_DOMAIN_CPU) {
  2893. DRM_ERROR("reloc with read/write CPU domains: "
  2894. "obj %p target %d offset %d "
  2895. "read %08x write %08x",
  2896. obj, reloc->target_handle,
  2897. (int) reloc->offset,
  2898. reloc->read_domains,
  2899. reloc->write_domain);
  2900. goto err;
  2901. }
  2902. if (reloc->write_domain && target_obj->pending_write_domain &&
  2903. reloc->write_domain != target_obj->pending_write_domain) {
  2904. DRM_ERROR("Write domain conflict: "
  2905. "obj %p target %d offset %d "
  2906. "new %08x old %08x\n",
  2907. obj, reloc->target_handle,
  2908. (int) reloc->offset,
  2909. reloc->write_domain,
  2910. target_obj->pending_write_domain);
  2911. goto err;
  2912. }
  2913. target_obj->pending_read_domains |= reloc->read_domains;
  2914. target_obj->pending_write_domain |= reloc->write_domain;
  2915. /* If the relocation already has the right value in it, no
  2916. * more work needs to be done.
  2917. */
  2918. if (target_offset == reloc->presumed_offset)
  2919. goto out;
  2920. /* Check that the relocation address is valid... */
  2921. if (reloc->offset > obj->base.size - 4) {
  2922. DRM_ERROR("Relocation beyond object bounds: "
  2923. "obj %p target %d offset %d size %d.\n",
  2924. obj, reloc->target_handle,
  2925. (int) reloc->offset,
  2926. (int) obj->base.size);
  2927. goto err;
  2928. }
  2929. if (reloc->offset & 3) {
  2930. DRM_ERROR("Relocation not 4-byte aligned: "
  2931. "obj %p target %d offset %d.\n",
  2932. obj, reloc->target_handle,
  2933. (int) reloc->offset);
  2934. goto err;
  2935. }
  2936. /* and points to somewhere within the target object. */
  2937. if (reloc->delta >= target_obj->size) {
  2938. DRM_ERROR("Relocation beyond target object bounds: "
  2939. "obj %p target %d delta %d size %d.\n",
  2940. obj, reloc->target_handle,
  2941. (int) reloc->delta,
  2942. (int) target_obj->size);
  2943. goto err;
  2944. }
  2945. reloc->delta += target_offset;
  2946. if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
  2947. uint32_t page_offset = reloc->offset & ~PAGE_MASK;
  2948. char *vaddr;
  2949. vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
  2950. *(uint32_t *)(vaddr + page_offset) = reloc->delta;
  2951. kunmap_atomic(vaddr);
  2952. } else {
  2953. struct drm_i915_private *dev_priv = dev->dev_private;
  2954. uint32_t __iomem *reloc_entry;
  2955. void __iomem *reloc_page;
  2956. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  2957. if (ret)
  2958. goto err;
  2959. /* Map the page containing the relocation we're going to perform. */
  2960. reloc->offset += obj->gtt_offset;
  2961. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2962. reloc->offset & PAGE_MASK);
  2963. reloc_entry = (uint32_t __iomem *)
  2964. (reloc_page + (reloc->offset & ~PAGE_MASK));
  2965. iowrite32(reloc->delta, reloc_entry);
  2966. io_mapping_unmap_atomic(reloc_page);
  2967. }
  2968. /* and update the user's relocation entry */
  2969. reloc->presumed_offset = target_offset;
  2970. out:
  2971. ret = 0;
  2972. err:
  2973. drm_gem_object_unreference(target_obj);
  2974. return ret;
  2975. }
  2976. static int
  2977. i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
  2978. struct drm_file *file_priv,
  2979. struct drm_i915_gem_exec_object2 *entry)
  2980. {
  2981. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2982. int i, ret;
  2983. user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
  2984. for (i = 0; i < entry->relocation_count; i++) {
  2985. struct drm_i915_gem_relocation_entry reloc;
  2986. if (__copy_from_user_inatomic(&reloc,
  2987. user_relocs+i,
  2988. sizeof(reloc)))
  2989. return -EFAULT;
  2990. ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
  2991. if (ret)
  2992. return ret;
  2993. if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
  2994. &reloc.presumed_offset,
  2995. sizeof(reloc.presumed_offset)))
  2996. return -EFAULT;
  2997. }
  2998. return 0;
  2999. }
  3000. static int
  3001. i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
  3002. struct drm_file *file_priv,
  3003. struct drm_i915_gem_exec_object2 *entry,
  3004. struct drm_i915_gem_relocation_entry *relocs)
  3005. {
  3006. int i, ret;
  3007. for (i = 0; i < entry->relocation_count; i++) {
  3008. ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
  3009. if (ret)
  3010. return ret;
  3011. }
  3012. return 0;
  3013. }
  3014. static int
  3015. i915_gem_execbuffer_relocate(struct drm_device *dev,
  3016. struct drm_file *file,
  3017. struct drm_i915_gem_object **object_list,
  3018. struct drm_i915_gem_exec_object2 *exec_list,
  3019. int count)
  3020. {
  3021. int i, ret;
  3022. for (i = 0; i < count; i++) {
  3023. struct drm_i915_gem_object *obj = object_list[i];
  3024. obj->base.pending_read_domains = 0;
  3025. obj->base.pending_write_domain = 0;
  3026. ret = i915_gem_execbuffer_relocate_object(obj, file,
  3027. &exec_list[i]);
  3028. if (ret)
  3029. return ret;
  3030. }
  3031. return 0;
  3032. }
  3033. static int
  3034. i915_gem_execbuffer_reserve(struct drm_device *dev,
  3035. struct drm_file *file,
  3036. struct drm_i915_gem_object **object_list,
  3037. struct drm_i915_gem_exec_object2 *exec_list,
  3038. int count)
  3039. {
  3040. int ret, i, retry;
  3041. /* Attempt to pin all of the buffers into the GTT.
  3042. * This is done in 3 phases:
  3043. *
  3044. * 1a. Unbind all objects that do not match the GTT constraints for
  3045. * the execbuffer (fenceable, mappable, alignment etc).
  3046. * 1b. Increment pin count for already bound objects.
  3047. * 2. Bind new objects.
  3048. * 3. Decrement pin count.
  3049. *
  3050. * This avoid unnecessary unbinding of later objects in order to makr
  3051. * room for the earlier objects *unless* we need to defragment.
  3052. */
  3053. retry = 0;
  3054. do {
  3055. ret = 0;
  3056. /* Unbind any ill-fitting objects or pin. */
  3057. for (i = 0; i < count; i++) {
  3058. struct drm_i915_gem_object *obj = object_list[i];
  3059. struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
  3060. bool need_fence, need_mappable;
  3061. if (!obj->gtt_space)
  3062. continue;
  3063. need_fence =
  3064. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  3065. obj->tiling_mode != I915_TILING_NONE;
  3066. need_mappable =
  3067. entry->relocation_count ? true : need_fence;
  3068. if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
  3069. (need_mappable && !obj->map_and_fenceable))
  3070. ret = i915_gem_object_unbind(obj);
  3071. else
  3072. ret = i915_gem_object_pin(obj,
  3073. entry->alignment,
  3074. need_mappable);
  3075. if (ret) {
  3076. count = i;
  3077. goto err;
  3078. }
  3079. }
  3080. /* Bind fresh objects */
  3081. for (i = 0; i < count; i++) {
  3082. struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
  3083. struct drm_i915_gem_object *obj = object_list[i];
  3084. bool need_fence;
  3085. need_fence =
  3086. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  3087. obj->tiling_mode != I915_TILING_NONE;
  3088. if (!obj->gtt_space) {
  3089. bool need_mappable =
  3090. entry->relocation_count ? true : need_fence;
  3091. ret = i915_gem_object_pin(obj,
  3092. entry->alignment,
  3093. need_mappable);
  3094. if (ret)
  3095. break;
  3096. }
  3097. if (need_fence) {
  3098. ret = i915_gem_object_get_fence_reg(obj, true);
  3099. if (ret)
  3100. break;
  3101. obj->pending_fenced_gpu_access = true;
  3102. }
  3103. entry->offset = obj->gtt_offset;
  3104. }
  3105. err: /* Decrement pin count for bound objects */
  3106. for (i = 0; i < count; i++) {
  3107. struct drm_i915_gem_object *obj = object_list[i];
  3108. if (obj->gtt_space)
  3109. i915_gem_object_unpin(obj);
  3110. }
  3111. if (ret != -ENOSPC || retry > 1)
  3112. return ret;
  3113. /* First attempt, just clear anything that is purgeable.
  3114. * Second attempt, clear the entire GTT.
  3115. */
  3116. ret = i915_gem_evict_everything(dev, retry == 0);
  3117. if (ret)
  3118. return ret;
  3119. retry++;
  3120. } while (1);
  3121. }
  3122. static int
  3123. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  3124. struct drm_file *file,
  3125. struct drm_i915_gem_object **object_list,
  3126. struct drm_i915_gem_exec_object2 *exec_list,
  3127. int count)
  3128. {
  3129. struct drm_i915_gem_relocation_entry *reloc;
  3130. int i, total, ret;
  3131. for (i = 0; i < count; i++)
  3132. object_list[i]->in_execbuffer = false;
  3133. mutex_unlock(&dev->struct_mutex);
  3134. total = 0;
  3135. for (i = 0; i < count; i++)
  3136. total += exec_list[i].relocation_count;
  3137. reloc = drm_malloc_ab(total, sizeof(*reloc));
  3138. if (reloc == NULL) {
  3139. mutex_lock(&dev->struct_mutex);
  3140. return -ENOMEM;
  3141. }
  3142. total = 0;
  3143. for (i = 0; i < count; i++) {
  3144. struct drm_i915_gem_relocation_entry __user *user_relocs;
  3145. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  3146. if (copy_from_user(reloc+total, user_relocs,
  3147. exec_list[i].relocation_count *
  3148. sizeof(*reloc))) {
  3149. ret = -EFAULT;
  3150. mutex_lock(&dev->struct_mutex);
  3151. goto err;
  3152. }
  3153. total += exec_list[i].relocation_count;
  3154. }
  3155. ret = i915_mutex_lock_interruptible(dev);
  3156. if (ret) {
  3157. mutex_lock(&dev->struct_mutex);
  3158. goto err;
  3159. }
  3160. ret = i915_gem_execbuffer_reserve(dev, file,
  3161. object_list, exec_list,
  3162. count);
  3163. if (ret)
  3164. goto err;
  3165. total = 0;
  3166. for (i = 0; i < count; i++) {
  3167. struct drm_i915_gem_object *obj = object_list[i];
  3168. obj->base.pending_read_domains = 0;
  3169. obj->base.pending_write_domain = 0;
  3170. ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
  3171. &exec_list[i],
  3172. reloc + total);
  3173. if (ret)
  3174. goto err;
  3175. total += exec_list[i].relocation_count;
  3176. }
  3177. /* Leave the user relocations as are, this is the painfully slow path,
  3178. * and we want to avoid the complication of dropping the lock whilst
  3179. * having buffers reserved in the aperture and so causing spurious
  3180. * ENOSPC for random operations.
  3181. */
  3182. err:
  3183. drm_free_large(reloc);
  3184. return ret;
  3185. }
  3186. static int
  3187. i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
  3188. struct drm_file *file,
  3189. struct intel_ring_buffer *ring,
  3190. struct drm_i915_gem_object **objects,
  3191. int count)
  3192. {
  3193. struct change_domains cd;
  3194. int ret, i;
  3195. cd.invalidate_domains = 0;
  3196. cd.flush_domains = 0;
  3197. cd.flush_rings = 0;
  3198. for (i = 0; i < count; i++)
  3199. i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd);
  3200. if (cd.invalidate_domains | cd.flush_domains) {
  3201. #if WATCH_EXEC
  3202. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3203. __func__,
  3204. cd.invalidate_domains,
  3205. cd.flush_domains);
  3206. #endif
  3207. i915_gem_flush(dev,
  3208. cd.invalidate_domains,
  3209. cd.flush_domains,
  3210. cd.flush_rings);
  3211. }
  3212. for (i = 0; i < count; i++) {
  3213. struct drm_i915_gem_object *obj = objects[i];
  3214. /* XXX replace with semaphores */
  3215. if (obj->ring && ring != obj->ring) {
  3216. ret = i915_gem_object_wait_rendering(obj, true);
  3217. if (ret)
  3218. return ret;
  3219. }
  3220. }
  3221. return 0;
  3222. }
  3223. /* Throttle our rendering by waiting until the ring has completed our requests
  3224. * emitted over 20 msec ago.
  3225. *
  3226. * Note that if we were to use the current jiffies each time around the loop,
  3227. * we wouldn't escape the function with any frames outstanding if the time to
  3228. * render a frame was over 20ms.
  3229. *
  3230. * This should get us reasonable parallelism between CPU and GPU but also
  3231. * relatively low latency when blocking on a particular request to finish.
  3232. */
  3233. static int
  3234. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  3235. {
  3236. struct drm_i915_private *dev_priv = dev->dev_private;
  3237. struct drm_i915_file_private *file_priv = file->driver_priv;
  3238. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  3239. struct drm_i915_gem_request *request;
  3240. struct intel_ring_buffer *ring = NULL;
  3241. u32 seqno = 0;
  3242. int ret;
  3243. spin_lock(&file_priv->mm.lock);
  3244. list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  3245. if (time_after_eq(request->emitted_jiffies, recent_enough))
  3246. break;
  3247. ring = request->ring;
  3248. seqno = request->seqno;
  3249. }
  3250. spin_unlock(&file_priv->mm.lock);
  3251. if (seqno == 0)
  3252. return 0;
  3253. ret = 0;
  3254. if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
  3255. /* And wait for the seqno passing without holding any locks and
  3256. * causing extra latency for others. This is safe as the irq
  3257. * generation is designed to be run atomically and so is
  3258. * lockless.
  3259. */
  3260. ring->user_irq_get(ring);
  3261. ret = wait_event_interruptible(ring->irq_queue,
  3262. i915_seqno_passed(ring->get_seqno(ring), seqno)
  3263. || atomic_read(&dev_priv->mm.wedged));
  3264. ring->user_irq_put(ring);
  3265. if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
  3266. ret = -EIO;
  3267. }
  3268. if (ret == 0)
  3269. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  3270. return ret;
  3271. }
  3272. static int
  3273. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
  3274. uint64_t exec_offset)
  3275. {
  3276. uint32_t exec_start, exec_len;
  3277. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  3278. exec_len = (uint32_t) exec->batch_len;
  3279. if ((exec_start | exec_len) & 0x7)
  3280. return -EINVAL;
  3281. if (!exec_start)
  3282. return -EINVAL;
  3283. return 0;
  3284. }
  3285. static int
  3286. validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
  3287. int count)
  3288. {
  3289. int i;
  3290. for (i = 0; i < count; i++) {
  3291. char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
  3292. int length; /* limited by fault_in_pages_readable() */
  3293. /* First check for malicious input causing overflow */
  3294. if (exec[i].relocation_count >
  3295. INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
  3296. return -EINVAL;
  3297. length = exec[i].relocation_count *
  3298. sizeof(struct drm_i915_gem_relocation_entry);
  3299. if (!access_ok(VERIFY_READ, ptr, length))
  3300. return -EFAULT;
  3301. /* we may also need to update the presumed offsets */
  3302. if (!access_ok(VERIFY_WRITE, ptr, length))
  3303. return -EFAULT;
  3304. if (fault_in_pages_readable(ptr, length))
  3305. return -EFAULT;
  3306. }
  3307. return 0;
  3308. }
  3309. static int
  3310. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  3311. struct drm_file *file,
  3312. struct drm_i915_gem_execbuffer2 *args,
  3313. struct drm_i915_gem_exec_object2 *exec_list)
  3314. {
  3315. drm_i915_private_t *dev_priv = dev->dev_private;
  3316. struct drm_i915_gem_object **object_list = NULL;
  3317. struct drm_i915_gem_object *batch_obj;
  3318. struct drm_clip_rect *cliprects = NULL;
  3319. struct drm_i915_gem_request *request = NULL;
  3320. int ret, i, flips;
  3321. uint64_t exec_offset;
  3322. struct intel_ring_buffer *ring = NULL;
  3323. ret = i915_gem_check_is_wedged(dev);
  3324. if (ret)
  3325. return ret;
  3326. ret = validate_exec_list(exec_list, args->buffer_count);
  3327. if (ret)
  3328. return ret;
  3329. #if WATCH_EXEC
  3330. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3331. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3332. #endif
  3333. switch (args->flags & I915_EXEC_RING_MASK) {
  3334. case I915_EXEC_DEFAULT:
  3335. case I915_EXEC_RENDER:
  3336. ring = &dev_priv->render_ring;
  3337. break;
  3338. case I915_EXEC_BSD:
  3339. if (!HAS_BSD(dev)) {
  3340. DRM_ERROR("execbuf with invalid ring (BSD)\n");
  3341. return -EINVAL;
  3342. }
  3343. ring = &dev_priv->bsd_ring;
  3344. break;
  3345. case I915_EXEC_BLT:
  3346. if (!HAS_BLT(dev)) {
  3347. DRM_ERROR("execbuf with invalid ring (BLT)\n");
  3348. return -EINVAL;
  3349. }
  3350. ring = &dev_priv->blt_ring;
  3351. break;
  3352. default:
  3353. DRM_ERROR("execbuf with unknown ring: %d\n",
  3354. (int)(args->flags & I915_EXEC_RING_MASK));
  3355. return -EINVAL;
  3356. }
  3357. if (args->buffer_count < 1) {
  3358. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3359. return -EINVAL;
  3360. }
  3361. object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
  3362. if (object_list == NULL) {
  3363. DRM_ERROR("Failed to allocate object list for %d buffers\n",
  3364. args->buffer_count);
  3365. ret = -ENOMEM;
  3366. goto pre_mutex_err;
  3367. }
  3368. if (args->num_cliprects != 0) {
  3369. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3370. GFP_KERNEL);
  3371. if (cliprects == NULL) {
  3372. ret = -ENOMEM;
  3373. goto pre_mutex_err;
  3374. }
  3375. ret = copy_from_user(cliprects,
  3376. (struct drm_clip_rect __user *)
  3377. (uintptr_t) args->cliprects_ptr,
  3378. sizeof(*cliprects) * args->num_cliprects);
  3379. if (ret != 0) {
  3380. DRM_ERROR("copy %d cliprects failed: %d\n",
  3381. args->num_cliprects, ret);
  3382. ret = -EFAULT;
  3383. goto pre_mutex_err;
  3384. }
  3385. }
  3386. request = kzalloc(sizeof(*request), GFP_KERNEL);
  3387. if (request == NULL) {
  3388. ret = -ENOMEM;
  3389. goto pre_mutex_err;
  3390. }
  3391. ret = i915_mutex_lock_interruptible(dev);
  3392. if (ret)
  3393. goto pre_mutex_err;
  3394. if (dev_priv->mm.suspended) {
  3395. mutex_unlock(&dev->struct_mutex);
  3396. ret = -EBUSY;
  3397. goto pre_mutex_err;
  3398. }
  3399. /* Look up object handles */
  3400. for (i = 0; i < args->buffer_count; i++) {
  3401. struct drm_i915_gem_object *obj;
  3402. obj = to_intel_bo (drm_gem_object_lookup(dev, file,
  3403. exec_list[i].handle));
  3404. if (obj == NULL) {
  3405. DRM_ERROR("Invalid object handle %d at index %d\n",
  3406. exec_list[i].handle, i);
  3407. /* prevent error path from reading uninitialized data */
  3408. args->buffer_count = i;
  3409. ret = -ENOENT;
  3410. goto err;
  3411. }
  3412. object_list[i] = obj;
  3413. if (obj->in_execbuffer) {
  3414. DRM_ERROR("Object %p appears more than once in object list\n",
  3415. obj);
  3416. /* prevent error path from reading uninitialized data */
  3417. args->buffer_count = i + 1;
  3418. ret = -EINVAL;
  3419. goto err;
  3420. }
  3421. obj->in_execbuffer = true;
  3422. obj->pending_fenced_gpu_access = false;
  3423. }
  3424. /* Move the objects en-masse into the GTT, evicting if necessary. */
  3425. ret = i915_gem_execbuffer_reserve(dev, file,
  3426. object_list, exec_list,
  3427. args->buffer_count);
  3428. if (ret)
  3429. goto err;
  3430. /* The objects are in their final locations, apply the relocations. */
  3431. ret = i915_gem_execbuffer_relocate(dev, file,
  3432. object_list, exec_list,
  3433. args->buffer_count);
  3434. if (ret) {
  3435. if (ret == -EFAULT) {
  3436. ret = i915_gem_execbuffer_relocate_slow(dev, file,
  3437. object_list,
  3438. exec_list,
  3439. args->buffer_count);
  3440. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  3441. }
  3442. if (ret)
  3443. goto err;
  3444. }
  3445. /* Set the pending read domains for the batch buffer to COMMAND */
  3446. batch_obj = object_list[args->buffer_count-1];
  3447. if (batch_obj->base.pending_write_domain) {
  3448. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3449. ret = -EINVAL;
  3450. goto err;
  3451. }
  3452. batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3453. /* Sanity check the batch buffer */
  3454. exec_offset = batch_obj->gtt_offset;
  3455. ret = i915_gem_check_execbuffer(args, exec_offset);
  3456. if (ret != 0) {
  3457. DRM_ERROR("execbuf with invalid offset/length\n");
  3458. goto err;
  3459. }
  3460. ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
  3461. object_list, args->buffer_count);
  3462. if (ret)
  3463. goto err;
  3464. #if WATCH_COHERENCY
  3465. for (i = 0; i < args->buffer_count; i++) {
  3466. i915_gem_object_check_coherency(object_list[i],
  3467. exec_list[i].handle);
  3468. }
  3469. #endif
  3470. #if WATCH_EXEC
  3471. i915_gem_dump_object(batch_obj,
  3472. args->batch_len,
  3473. __func__,
  3474. ~0);
  3475. #endif
  3476. /* Check for any pending flips. As we only maintain a flip queue depth
  3477. * of 1, we can simply insert a WAIT for the next display flip prior
  3478. * to executing the batch and avoid stalling the CPU.
  3479. */
  3480. flips = 0;
  3481. for (i = 0; i < args->buffer_count; i++) {
  3482. if (object_list[i]->base.write_domain)
  3483. flips |= atomic_read(&object_list[i]->pending_flip);
  3484. }
  3485. if (flips) {
  3486. int plane, flip_mask;
  3487. for (plane = 0; flips >> plane; plane++) {
  3488. if (((flips >> plane) & 1) == 0)
  3489. continue;
  3490. if (plane)
  3491. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  3492. else
  3493. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  3494. ret = intel_ring_begin(ring, 2);
  3495. if (ret)
  3496. goto err;
  3497. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  3498. intel_ring_emit(ring, MI_NOOP);
  3499. intel_ring_advance(ring);
  3500. }
  3501. }
  3502. /* Exec the batchbuffer */
  3503. ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
  3504. if (ret) {
  3505. DRM_ERROR("dispatch failed %d\n", ret);
  3506. goto err;
  3507. }
  3508. for (i = 0; i < args->buffer_count; i++) {
  3509. struct drm_i915_gem_object *obj = object_list[i];
  3510. obj->base.read_domains = obj->base.pending_read_domains;
  3511. obj->base.write_domain = obj->base.pending_write_domain;
  3512. obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
  3513. i915_gem_object_move_to_active(obj, ring);
  3514. if (obj->base.write_domain) {
  3515. obj->dirty = 1;
  3516. list_move_tail(&obj->gpu_write_list,
  3517. &ring->gpu_write_list);
  3518. intel_mark_busy(dev, obj);
  3519. }
  3520. trace_i915_gem_object_change_domain(obj,
  3521. obj->base.read_domains,
  3522. obj->base.write_domain);
  3523. }
  3524. /*
  3525. * Ensure that the commands in the batch buffer are
  3526. * finished before the interrupt fires
  3527. */
  3528. i915_retire_commands(dev, ring);
  3529. if (i915_add_request(dev, file, request, ring))
  3530. i915_gem_next_request_seqno(dev, ring);
  3531. else
  3532. request = NULL;
  3533. err:
  3534. for (i = 0; i < args->buffer_count; i++) {
  3535. object_list[i]->in_execbuffer = false;
  3536. drm_gem_object_unreference(&object_list[i]->base);
  3537. }
  3538. mutex_unlock(&dev->struct_mutex);
  3539. pre_mutex_err:
  3540. drm_free_large(object_list);
  3541. kfree(cliprects);
  3542. kfree(request);
  3543. return ret;
  3544. }
  3545. /*
  3546. * Legacy execbuffer just creates an exec2 list from the original exec object
  3547. * list array and passes it to the real function.
  3548. */
  3549. int
  3550. i915_gem_execbuffer(struct drm_device *dev, void *data,
  3551. struct drm_file *file)
  3552. {
  3553. struct drm_i915_gem_execbuffer *args = data;
  3554. struct drm_i915_gem_execbuffer2 exec2;
  3555. struct drm_i915_gem_exec_object *exec_list = NULL;
  3556. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3557. int ret, i;
  3558. #if WATCH_EXEC
  3559. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3560. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3561. #endif
  3562. if (args->buffer_count < 1) {
  3563. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3564. return -EINVAL;
  3565. }
  3566. /* Copy in the exec list from userland */
  3567. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  3568. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3569. if (exec_list == NULL || exec2_list == NULL) {
  3570. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3571. args->buffer_count);
  3572. drm_free_large(exec_list);
  3573. drm_free_large(exec2_list);
  3574. return -ENOMEM;
  3575. }
  3576. ret = copy_from_user(exec_list,
  3577. (struct drm_i915_relocation_entry __user *)
  3578. (uintptr_t) args->buffers_ptr,
  3579. sizeof(*exec_list) * args->buffer_count);
  3580. if (ret != 0) {
  3581. DRM_ERROR("copy %d exec entries failed %d\n",
  3582. args->buffer_count, ret);
  3583. drm_free_large(exec_list);
  3584. drm_free_large(exec2_list);
  3585. return -EFAULT;
  3586. }
  3587. for (i = 0; i < args->buffer_count; i++) {
  3588. exec2_list[i].handle = exec_list[i].handle;
  3589. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  3590. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  3591. exec2_list[i].alignment = exec_list[i].alignment;
  3592. exec2_list[i].offset = exec_list[i].offset;
  3593. if (INTEL_INFO(dev)->gen < 4)
  3594. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  3595. else
  3596. exec2_list[i].flags = 0;
  3597. }
  3598. exec2.buffers_ptr = args->buffers_ptr;
  3599. exec2.buffer_count = args->buffer_count;
  3600. exec2.batch_start_offset = args->batch_start_offset;
  3601. exec2.batch_len = args->batch_len;
  3602. exec2.DR1 = args->DR1;
  3603. exec2.DR4 = args->DR4;
  3604. exec2.num_cliprects = args->num_cliprects;
  3605. exec2.cliprects_ptr = args->cliprects_ptr;
  3606. exec2.flags = I915_EXEC_RENDER;
  3607. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  3608. if (!ret) {
  3609. /* Copy the new buffer offsets back to the user's exec list. */
  3610. for (i = 0; i < args->buffer_count; i++)
  3611. exec_list[i].offset = exec2_list[i].offset;
  3612. /* ... and back out to userspace */
  3613. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3614. (uintptr_t) args->buffers_ptr,
  3615. exec_list,
  3616. sizeof(*exec_list) * args->buffer_count);
  3617. if (ret) {
  3618. ret = -EFAULT;
  3619. DRM_ERROR("failed to copy %d exec entries "
  3620. "back to user (%d)\n",
  3621. args->buffer_count, ret);
  3622. }
  3623. }
  3624. drm_free_large(exec_list);
  3625. drm_free_large(exec2_list);
  3626. return ret;
  3627. }
  3628. int
  3629. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  3630. struct drm_file *file)
  3631. {
  3632. struct drm_i915_gem_execbuffer2 *args = data;
  3633. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3634. int ret;
  3635. #if WATCH_EXEC
  3636. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3637. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3638. #endif
  3639. if (args->buffer_count < 1) {
  3640. DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
  3641. return -EINVAL;
  3642. }
  3643. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3644. if (exec2_list == NULL) {
  3645. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3646. args->buffer_count);
  3647. return -ENOMEM;
  3648. }
  3649. ret = copy_from_user(exec2_list,
  3650. (struct drm_i915_relocation_entry __user *)
  3651. (uintptr_t) args->buffers_ptr,
  3652. sizeof(*exec2_list) * args->buffer_count);
  3653. if (ret != 0) {
  3654. DRM_ERROR("copy %d exec entries failed %d\n",
  3655. args->buffer_count, ret);
  3656. drm_free_large(exec2_list);
  3657. return -EFAULT;
  3658. }
  3659. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  3660. if (!ret) {
  3661. /* Copy the new buffer offsets back to the user's exec list. */
  3662. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3663. (uintptr_t) args->buffers_ptr,
  3664. exec2_list,
  3665. sizeof(*exec2_list) * args->buffer_count);
  3666. if (ret) {
  3667. ret = -EFAULT;
  3668. DRM_ERROR("failed to copy %d exec entries "
  3669. "back to user (%d)\n",
  3670. args->buffer_count, ret);
  3671. }
  3672. }
  3673. drm_free_large(exec2_list);
  3674. return ret;
  3675. }
  3676. int
  3677. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  3678. uint32_t alignment,
  3679. bool map_and_fenceable)
  3680. {
  3681. struct drm_device *dev = obj->base.dev;
  3682. struct drm_i915_private *dev_priv = dev->dev_private;
  3683. int ret;
  3684. BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
  3685. WARN_ON(i915_verify_lists(dev));
  3686. if (obj->gtt_space != NULL) {
  3687. if ((alignment && obj->gtt_offset & (alignment - 1)) ||
  3688. (map_and_fenceable && !obj->map_and_fenceable)) {
  3689. WARN(obj->pin_count,
  3690. "bo is already pinned with incorrect alignment:"
  3691. " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
  3692. " obj->map_and_fenceable=%d\n",
  3693. obj->gtt_offset, alignment,
  3694. map_and_fenceable,
  3695. obj->map_and_fenceable);
  3696. ret = i915_gem_object_unbind(obj);
  3697. if (ret)
  3698. return ret;
  3699. }
  3700. }
  3701. if (obj->gtt_space == NULL) {
  3702. ret = i915_gem_object_bind_to_gtt(obj, alignment,
  3703. map_and_fenceable);
  3704. if (ret)
  3705. return ret;
  3706. }
  3707. if (obj->pin_count++ == 0) {
  3708. i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable);
  3709. if (!obj->active)
  3710. list_move_tail(&obj->mm_list,
  3711. &dev_priv->mm.pinned_list);
  3712. }
  3713. BUG_ON(!obj->pin_mappable && map_and_fenceable);
  3714. WARN_ON(i915_verify_lists(dev));
  3715. return 0;
  3716. }
  3717. void
  3718. i915_gem_object_unpin(struct drm_i915_gem_object *obj)
  3719. {
  3720. struct drm_device *dev = obj->base.dev;
  3721. drm_i915_private_t *dev_priv = dev->dev_private;
  3722. WARN_ON(i915_verify_lists(dev));
  3723. BUG_ON(obj->pin_count == 0);
  3724. BUG_ON(obj->gtt_space == NULL);
  3725. if (--obj->pin_count == 0) {
  3726. if (!obj->active)
  3727. list_move_tail(&obj->mm_list,
  3728. &dev_priv->mm.inactive_list);
  3729. i915_gem_info_remove_pin(dev_priv, obj);
  3730. }
  3731. WARN_ON(i915_verify_lists(dev));
  3732. }
  3733. int
  3734. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3735. struct drm_file *file)
  3736. {
  3737. struct drm_i915_gem_pin *args = data;
  3738. struct drm_i915_gem_object *obj;
  3739. int ret;
  3740. ret = i915_mutex_lock_interruptible(dev);
  3741. if (ret)
  3742. return ret;
  3743. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3744. if (obj == NULL) {
  3745. ret = -ENOENT;
  3746. goto unlock;
  3747. }
  3748. if (obj->madv != I915_MADV_WILLNEED) {
  3749. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3750. ret = -EINVAL;
  3751. goto out;
  3752. }
  3753. if (obj->pin_filp != NULL && obj->pin_filp != file) {
  3754. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3755. args->handle);
  3756. ret = -EINVAL;
  3757. goto out;
  3758. }
  3759. obj->user_pin_count++;
  3760. obj->pin_filp = file;
  3761. if (obj->user_pin_count == 1) {
  3762. ret = i915_gem_object_pin(obj, args->alignment, true);
  3763. if (ret)
  3764. goto out;
  3765. }
  3766. /* XXX - flush the CPU caches for pinned objects
  3767. * as the X server doesn't manage domains yet
  3768. */
  3769. i915_gem_object_flush_cpu_write_domain(obj);
  3770. args->offset = obj->gtt_offset;
  3771. out:
  3772. drm_gem_object_unreference(&obj->base);
  3773. unlock:
  3774. mutex_unlock(&dev->struct_mutex);
  3775. return ret;
  3776. }
  3777. int
  3778. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3779. struct drm_file *file)
  3780. {
  3781. struct drm_i915_gem_pin *args = data;
  3782. struct drm_i915_gem_object *obj;
  3783. int ret;
  3784. ret = i915_mutex_lock_interruptible(dev);
  3785. if (ret)
  3786. return ret;
  3787. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3788. if (obj == NULL) {
  3789. ret = -ENOENT;
  3790. goto unlock;
  3791. }
  3792. if (obj->pin_filp != file) {
  3793. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3794. args->handle);
  3795. ret = -EINVAL;
  3796. goto out;
  3797. }
  3798. obj->user_pin_count--;
  3799. if (obj->user_pin_count == 0) {
  3800. obj->pin_filp = NULL;
  3801. i915_gem_object_unpin(obj);
  3802. }
  3803. out:
  3804. drm_gem_object_unreference(&obj->base);
  3805. unlock:
  3806. mutex_unlock(&dev->struct_mutex);
  3807. return ret;
  3808. }
  3809. int
  3810. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3811. struct drm_file *file)
  3812. {
  3813. struct drm_i915_gem_busy *args = data;
  3814. struct drm_i915_gem_object *obj;
  3815. int ret;
  3816. ret = i915_mutex_lock_interruptible(dev);
  3817. if (ret)
  3818. return ret;
  3819. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3820. if (obj == NULL) {
  3821. ret = -ENOENT;
  3822. goto unlock;
  3823. }
  3824. /* Count all active objects as busy, even if they are currently not used
  3825. * by the gpu. Users of this interface expect objects to eventually
  3826. * become non-busy without any further actions, therefore emit any
  3827. * necessary flushes here.
  3828. */
  3829. args->busy = obj->active;
  3830. if (args->busy) {
  3831. /* Unconditionally flush objects, even when the gpu still uses this
  3832. * object. Userspace calling this function indicates that it wants to
  3833. * use this buffer rather sooner than later, so issuing the required
  3834. * flush earlier is beneficial.
  3835. */
  3836. if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
  3837. i915_gem_flush_ring(dev, obj->ring,
  3838. 0, obj->base.write_domain);
  3839. /* Update the active list for the hardware's current position.
  3840. * Otherwise this only updates on a delayed timer or when irqs
  3841. * are actually unmasked, and our working set ends up being
  3842. * larger than required.
  3843. */
  3844. i915_gem_retire_requests_ring(dev, obj->ring);
  3845. args->busy = obj->active;
  3846. }
  3847. drm_gem_object_unreference(&obj->base);
  3848. unlock:
  3849. mutex_unlock(&dev->struct_mutex);
  3850. return ret;
  3851. }
  3852. int
  3853. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3854. struct drm_file *file_priv)
  3855. {
  3856. return i915_gem_ring_throttle(dev, file_priv);
  3857. }
  3858. int
  3859. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3860. struct drm_file *file_priv)
  3861. {
  3862. struct drm_i915_gem_madvise *args = data;
  3863. struct drm_i915_gem_object *obj;
  3864. int ret;
  3865. switch (args->madv) {
  3866. case I915_MADV_DONTNEED:
  3867. case I915_MADV_WILLNEED:
  3868. break;
  3869. default:
  3870. return -EINVAL;
  3871. }
  3872. ret = i915_mutex_lock_interruptible(dev);
  3873. if (ret)
  3874. return ret;
  3875. obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  3876. if (obj == NULL) {
  3877. ret = -ENOENT;
  3878. goto unlock;
  3879. }
  3880. if (obj->pin_count) {
  3881. ret = -EINVAL;
  3882. goto out;
  3883. }
  3884. if (obj->madv != __I915_MADV_PURGED)
  3885. obj->madv = args->madv;
  3886. /* if the object is no longer bound, discard its backing storage */
  3887. if (i915_gem_object_is_purgeable(obj) &&
  3888. obj->gtt_space == NULL)
  3889. i915_gem_object_truncate(obj);
  3890. args->retained = obj->madv != __I915_MADV_PURGED;
  3891. out:
  3892. drm_gem_object_unreference(&obj->base);
  3893. unlock:
  3894. mutex_unlock(&dev->struct_mutex);
  3895. return ret;
  3896. }
  3897. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  3898. size_t size)
  3899. {
  3900. struct drm_i915_private *dev_priv = dev->dev_private;
  3901. struct drm_i915_gem_object *obj;
  3902. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  3903. if (obj == NULL)
  3904. return NULL;
  3905. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3906. kfree(obj);
  3907. return NULL;
  3908. }
  3909. i915_gem_info_add_obj(dev_priv, size);
  3910. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3911. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3912. obj->agp_type = AGP_USER_MEMORY;
  3913. obj->base.driver_private = NULL;
  3914. obj->fence_reg = I915_FENCE_REG_NONE;
  3915. INIT_LIST_HEAD(&obj->mm_list);
  3916. INIT_LIST_HEAD(&obj->gtt_list);
  3917. INIT_LIST_HEAD(&obj->ring_list);
  3918. INIT_LIST_HEAD(&obj->gpu_write_list);
  3919. obj->madv = I915_MADV_WILLNEED;
  3920. /* Avoid an unnecessary call to unbind on the first bind. */
  3921. obj->map_and_fenceable = true;
  3922. return obj;
  3923. }
  3924. int i915_gem_init_object(struct drm_gem_object *obj)
  3925. {
  3926. BUG();
  3927. return 0;
  3928. }
  3929. static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
  3930. {
  3931. struct drm_device *dev = obj->base.dev;
  3932. drm_i915_private_t *dev_priv = dev->dev_private;
  3933. int ret;
  3934. ret = i915_gem_object_unbind(obj);
  3935. if (ret == -ERESTARTSYS) {
  3936. list_move(&obj->mm_list,
  3937. &dev_priv->mm.deferred_free_list);
  3938. return;
  3939. }
  3940. if (obj->base.map_list.map)
  3941. i915_gem_free_mmap_offset(obj);
  3942. drm_gem_object_release(&obj->base);
  3943. i915_gem_info_remove_obj(dev_priv, obj->base.size);
  3944. kfree(obj->page_cpu_valid);
  3945. kfree(obj->bit_17);
  3946. kfree(obj);
  3947. }
  3948. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  3949. {
  3950. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  3951. struct drm_device *dev = obj->base.dev;
  3952. trace_i915_gem_object_destroy(obj);
  3953. while (obj->pin_count > 0)
  3954. i915_gem_object_unpin(obj);
  3955. if (obj->phys_obj)
  3956. i915_gem_detach_phys_object(dev, obj);
  3957. i915_gem_free_object_tail(obj);
  3958. }
  3959. int
  3960. i915_gem_idle(struct drm_device *dev)
  3961. {
  3962. drm_i915_private_t *dev_priv = dev->dev_private;
  3963. int ret;
  3964. mutex_lock(&dev->struct_mutex);
  3965. if (dev_priv->mm.suspended) {
  3966. mutex_unlock(&dev->struct_mutex);
  3967. return 0;
  3968. }
  3969. ret = i915_gpu_idle(dev);
  3970. if (ret) {
  3971. mutex_unlock(&dev->struct_mutex);
  3972. return ret;
  3973. }
  3974. /* Under UMS, be paranoid and evict. */
  3975. if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
  3976. ret = i915_gem_evict_inactive(dev, false);
  3977. if (ret) {
  3978. mutex_unlock(&dev->struct_mutex);
  3979. return ret;
  3980. }
  3981. }
  3982. i915_gem_reset_fences(dev);
  3983. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3984. * We need to replace this with a semaphore, or something.
  3985. * And not confound mm.suspended!
  3986. */
  3987. dev_priv->mm.suspended = 1;
  3988. del_timer_sync(&dev_priv->hangcheck_timer);
  3989. i915_kernel_lost_context(dev);
  3990. i915_gem_cleanup_ringbuffer(dev);
  3991. mutex_unlock(&dev->struct_mutex);
  3992. /* Cancel the retire work handler, which should be idle now. */
  3993. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3994. return 0;
  3995. }
  3996. int
  3997. i915_gem_init_ringbuffer(struct drm_device *dev)
  3998. {
  3999. drm_i915_private_t *dev_priv = dev->dev_private;
  4000. int ret;
  4001. ret = intel_init_render_ring_buffer(dev);
  4002. if (ret)
  4003. return ret;
  4004. if (HAS_BSD(dev)) {
  4005. ret = intel_init_bsd_ring_buffer(dev);
  4006. if (ret)
  4007. goto cleanup_render_ring;
  4008. }
  4009. if (HAS_BLT(dev)) {
  4010. ret = intel_init_blt_ring_buffer(dev);
  4011. if (ret)
  4012. goto cleanup_bsd_ring;
  4013. }
  4014. dev_priv->next_seqno = 1;
  4015. return 0;
  4016. cleanup_bsd_ring:
  4017. intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
  4018. cleanup_render_ring:
  4019. intel_cleanup_ring_buffer(&dev_priv->render_ring);
  4020. return ret;
  4021. }
  4022. void
  4023. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  4024. {
  4025. drm_i915_private_t *dev_priv = dev->dev_private;
  4026. intel_cleanup_ring_buffer(&dev_priv->render_ring);
  4027. intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
  4028. intel_cleanup_ring_buffer(&dev_priv->blt_ring);
  4029. }
  4030. int
  4031. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  4032. struct drm_file *file_priv)
  4033. {
  4034. drm_i915_private_t *dev_priv = dev->dev_private;
  4035. int ret;
  4036. if (drm_core_check_feature(dev, DRIVER_MODESET))
  4037. return 0;
  4038. if (atomic_read(&dev_priv->mm.wedged)) {
  4039. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  4040. atomic_set(&dev_priv->mm.wedged, 0);
  4041. }
  4042. mutex_lock(&dev->struct_mutex);
  4043. dev_priv->mm.suspended = 0;
  4044. ret = i915_gem_init_ringbuffer(dev);
  4045. if (ret != 0) {
  4046. mutex_unlock(&dev->struct_mutex);
  4047. return ret;
  4048. }
  4049. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  4050. BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
  4051. BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
  4052. BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
  4053. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  4054. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  4055. BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
  4056. BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
  4057. BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
  4058. mutex_unlock(&dev->struct_mutex);
  4059. ret = drm_irq_install(dev);
  4060. if (ret)
  4061. goto cleanup_ringbuffer;
  4062. return 0;
  4063. cleanup_ringbuffer:
  4064. mutex_lock(&dev->struct_mutex);
  4065. i915_gem_cleanup_ringbuffer(dev);
  4066. dev_priv->mm.suspended = 1;
  4067. mutex_unlock(&dev->struct_mutex);
  4068. return ret;
  4069. }
  4070. int
  4071. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  4072. struct drm_file *file_priv)
  4073. {
  4074. if (drm_core_check_feature(dev, DRIVER_MODESET))
  4075. return 0;
  4076. drm_irq_uninstall(dev);
  4077. return i915_gem_idle(dev);
  4078. }
  4079. void
  4080. i915_gem_lastclose(struct drm_device *dev)
  4081. {
  4082. int ret;
  4083. if (drm_core_check_feature(dev, DRIVER_MODESET))
  4084. return;
  4085. ret = i915_gem_idle(dev);
  4086. if (ret)
  4087. DRM_ERROR("failed to idle hardware: %d\n", ret);
  4088. }
  4089. static void
  4090. init_ring_lists(struct intel_ring_buffer *ring)
  4091. {
  4092. INIT_LIST_HEAD(&ring->active_list);
  4093. INIT_LIST_HEAD(&ring->request_list);
  4094. INIT_LIST_HEAD(&ring->gpu_write_list);
  4095. }
  4096. void
  4097. i915_gem_load(struct drm_device *dev)
  4098. {
  4099. int i;
  4100. drm_i915_private_t *dev_priv = dev->dev_private;
  4101. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  4102. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  4103. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  4104. INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
  4105. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4106. INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
  4107. INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
  4108. init_ring_lists(&dev_priv->render_ring);
  4109. init_ring_lists(&dev_priv->bsd_ring);
  4110. init_ring_lists(&dev_priv->blt_ring);
  4111. for (i = 0; i < 16; i++)
  4112. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  4113. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  4114. i915_gem_retire_work_handler);
  4115. init_completion(&dev_priv->error_completion);
  4116. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  4117. if (IS_GEN3(dev)) {
  4118. u32 tmp = I915_READ(MI_ARB_STATE);
  4119. if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
  4120. /* arb state is a masked write, so set bit + bit in mask */
  4121. tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
  4122. I915_WRITE(MI_ARB_STATE, tmp);
  4123. }
  4124. }
  4125. /* Old X drivers will take 0-2 for front, back, depth buffers */
  4126. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  4127. dev_priv->fence_reg_start = 3;
  4128. if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4129. dev_priv->num_fence_regs = 16;
  4130. else
  4131. dev_priv->num_fence_regs = 8;
  4132. /* Initialize fence registers to zero */
  4133. switch (INTEL_INFO(dev)->gen) {
  4134. case 6:
  4135. for (i = 0; i < 16; i++)
  4136. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
  4137. break;
  4138. case 5:
  4139. case 4:
  4140. for (i = 0; i < 16; i++)
  4141. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  4142. break;
  4143. case 3:
  4144. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4145. for (i = 0; i < 8; i++)
  4146. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  4147. case 2:
  4148. for (i = 0; i < 8; i++)
  4149. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  4150. break;
  4151. }
  4152. i915_gem_detect_bit_6_swizzle(dev);
  4153. init_waitqueue_head(&dev_priv->pending_flip_queue);
  4154. dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
  4155. dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
  4156. register_shrinker(&dev_priv->mm.inactive_shrinker);
  4157. }
  4158. /*
  4159. * Create a physically contiguous memory object for this object
  4160. * e.g. for cursor + overlay regs
  4161. */
  4162. static int i915_gem_init_phys_object(struct drm_device *dev,
  4163. int id, int size, int align)
  4164. {
  4165. drm_i915_private_t *dev_priv = dev->dev_private;
  4166. struct drm_i915_gem_phys_object *phys_obj;
  4167. int ret;
  4168. if (dev_priv->mm.phys_objs[id - 1] || !size)
  4169. return 0;
  4170. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  4171. if (!phys_obj)
  4172. return -ENOMEM;
  4173. phys_obj->id = id;
  4174. phys_obj->handle = drm_pci_alloc(dev, size, align);
  4175. if (!phys_obj->handle) {
  4176. ret = -ENOMEM;
  4177. goto kfree_obj;
  4178. }
  4179. #ifdef CONFIG_X86
  4180. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4181. #endif
  4182. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  4183. return 0;
  4184. kfree_obj:
  4185. kfree(phys_obj);
  4186. return ret;
  4187. }
  4188. static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  4189. {
  4190. drm_i915_private_t *dev_priv = dev->dev_private;
  4191. struct drm_i915_gem_phys_object *phys_obj;
  4192. if (!dev_priv->mm.phys_objs[id - 1])
  4193. return;
  4194. phys_obj = dev_priv->mm.phys_objs[id - 1];
  4195. if (phys_obj->cur_obj) {
  4196. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  4197. }
  4198. #ifdef CONFIG_X86
  4199. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4200. #endif
  4201. drm_pci_free(dev, phys_obj->handle);
  4202. kfree(phys_obj);
  4203. dev_priv->mm.phys_objs[id - 1] = NULL;
  4204. }
  4205. void i915_gem_free_all_phys_object(struct drm_device *dev)
  4206. {
  4207. int i;
  4208. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  4209. i915_gem_free_phys_object(dev, i);
  4210. }
  4211. void i915_gem_detach_phys_object(struct drm_device *dev,
  4212. struct drm_i915_gem_object *obj)
  4213. {
  4214. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  4215. char *vaddr;
  4216. int i;
  4217. int page_count;
  4218. if (!obj->phys_obj)
  4219. return;
  4220. vaddr = obj->phys_obj->handle->vaddr;
  4221. page_count = obj->base.size / PAGE_SIZE;
  4222. for (i = 0; i < page_count; i++) {
  4223. struct page *page = read_cache_page_gfp(mapping, i,
  4224. GFP_HIGHUSER | __GFP_RECLAIMABLE);
  4225. if (!IS_ERR(page)) {
  4226. char *dst = kmap_atomic(page);
  4227. memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
  4228. kunmap_atomic(dst);
  4229. drm_clflush_pages(&page, 1);
  4230. set_page_dirty(page);
  4231. mark_page_accessed(page);
  4232. page_cache_release(page);
  4233. }
  4234. }
  4235. intel_gtt_chipset_flush();
  4236. obj->phys_obj->cur_obj = NULL;
  4237. obj->phys_obj = NULL;
  4238. }
  4239. int
  4240. i915_gem_attach_phys_object(struct drm_device *dev,
  4241. struct drm_i915_gem_object *obj,
  4242. int id,
  4243. int align)
  4244. {
  4245. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  4246. drm_i915_private_t *dev_priv = dev->dev_private;
  4247. int ret = 0;
  4248. int page_count;
  4249. int i;
  4250. if (id > I915_MAX_PHYS_OBJECT)
  4251. return -EINVAL;
  4252. if (obj->phys_obj) {
  4253. if (obj->phys_obj->id == id)
  4254. return 0;
  4255. i915_gem_detach_phys_object(dev, obj);
  4256. }
  4257. /* create a new object */
  4258. if (!dev_priv->mm.phys_objs[id - 1]) {
  4259. ret = i915_gem_init_phys_object(dev, id,
  4260. obj->base.size, align);
  4261. if (ret) {
  4262. DRM_ERROR("failed to init phys object %d size: %zu\n",
  4263. id, obj->base.size);
  4264. return ret;
  4265. }
  4266. }
  4267. /* bind to the object */
  4268. obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4269. obj->phys_obj->cur_obj = obj;
  4270. page_count = obj->base.size / PAGE_SIZE;
  4271. for (i = 0; i < page_count; i++) {
  4272. struct page *page;
  4273. char *dst, *src;
  4274. page = read_cache_page_gfp(mapping, i,
  4275. GFP_HIGHUSER | __GFP_RECLAIMABLE);
  4276. if (IS_ERR(page))
  4277. return PTR_ERR(page);
  4278. src = kmap_atomic(page);
  4279. dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4280. memcpy(dst, src, PAGE_SIZE);
  4281. kunmap_atomic(src);
  4282. mark_page_accessed(page);
  4283. page_cache_release(page);
  4284. }
  4285. return 0;
  4286. }
  4287. static int
  4288. i915_gem_phys_pwrite(struct drm_device *dev,
  4289. struct drm_i915_gem_object *obj,
  4290. struct drm_i915_gem_pwrite *args,
  4291. struct drm_file *file_priv)
  4292. {
  4293. void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
  4294. char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
  4295. if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
  4296. unsigned long unwritten;
  4297. /* The physical object once assigned is fixed for the lifetime
  4298. * of the obj, so we can safely drop the lock and continue
  4299. * to access vaddr.
  4300. */
  4301. mutex_unlock(&dev->struct_mutex);
  4302. unwritten = copy_from_user(vaddr, user_data, args->size);
  4303. mutex_lock(&dev->struct_mutex);
  4304. if (unwritten)
  4305. return -EFAULT;
  4306. }
  4307. intel_gtt_chipset_flush();
  4308. return 0;
  4309. }
  4310. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  4311. {
  4312. struct drm_i915_file_private *file_priv = file->driver_priv;
  4313. /* Clean up our request list when the client is going away, so that
  4314. * later retire_requests won't dereference our soon-to-be-gone
  4315. * file_priv.
  4316. */
  4317. spin_lock(&file_priv->mm.lock);
  4318. while (!list_empty(&file_priv->mm.request_list)) {
  4319. struct drm_i915_gem_request *request;
  4320. request = list_first_entry(&file_priv->mm.request_list,
  4321. struct drm_i915_gem_request,
  4322. client_list);
  4323. list_del(&request->client_list);
  4324. request->file_priv = NULL;
  4325. }
  4326. spin_unlock(&file_priv->mm.lock);
  4327. }
  4328. static int
  4329. i915_gpu_is_active(struct drm_device *dev)
  4330. {
  4331. drm_i915_private_t *dev_priv = dev->dev_private;
  4332. int lists_empty;
  4333. lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
  4334. list_empty(&dev_priv->mm.active_list);
  4335. return !lists_empty;
  4336. }
  4337. static int
  4338. i915_gem_inactive_shrink(struct shrinker *shrinker,
  4339. int nr_to_scan,
  4340. gfp_t gfp_mask)
  4341. {
  4342. struct drm_i915_private *dev_priv =
  4343. container_of(shrinker,
  4344. struct drm_i915_private,
  4345. mm.inactive_shrinker);
  4346. struct drm_device *dev = dev_priv->dev;
  4347. struct drm_i915_gem_object *obj, *next;
  4348. int cnt;
  4349. if (!mutex_trylock(&dev->struct_mutex))
  4350. return 0;
  4351. /* "fast-path" to count number of available objects */
  4352. if (nr_to_scan == 0) {
  4353. cnt = 0;
  4354. list_for_each_entry(obj,
  4355. &dev_priv->mm.inactive_list,
  4356. mm_list)
  4357. cnt++;
  4358. mutex_unlock(&dev->struct_mutex);
  4359. return cnt / 100 * sysctl_vfs_cache_pressure;
  4360. }
  4361. rescan:
  4362. /* first scan for clean buffers */
  4363. i915_gem_retire_requests(dev);
  4364. list_for_each_entry_safe(obj, next,
  4365. &dev_priv->mm.inactive_list,
  4366. mm_list) {
  4367. if (i915_gem_object_is_purgeable(obj)) {
  4368. if (i915_gem_object_unbind(obj) == 0 &&
  4369. --nr_to_scan == 0)
  4370. break;
  4371. }
  4372. }
  4373. /* second pass, evict/count anything still on the inactive list */
  4374. cnt = 0;
  4375. list_for_each_entry_safe(obj, next,
  4376. &dev_priv->mm.inactive_list,
  4377. mm_list) {
  4378. if (nr_to_scan &&
  4379. i915_gem_object_unbind(obj) == 0)
  4380. nr_to_scan--;
  4381. else
  4382. cnt++;
  4383. }
  4384. if (nr_to_scan && i915_gpu_is_active(dev)) {
  4385. /*
  4386. * We are desperate for pages, so as a last resort, wait
  4387. * for the GPU to finish and discard whatever we can.
  4388. * This has a dramatic impact to reduce the number of
  4389. * OOM-killer events whilst running the GPU aggressively.
  4390. */
  4391. if (i915_gpu_idle(dev) == 0)
  4392. goto rescan;
  4393. }
  4394. mutex_unlock(&dev->struct_mutex);
  4395. return cnt / 100 * sysctl_vfs_cache_pressure;
  4396. }