i915_gem.c 135 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/swap.h>
  34. #include <linux/pci.h>
  35. #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  36. static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
  37. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  38. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  39. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  40. int write);
  41. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  42. uint64_t offset,
  43. uint64_t size);
  44. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  45. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  46. static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
  47. unsigned alignment);
  48. static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
  49. static int i915_gem_evict_something(struct drm_device *dev, int min_size);
  50. static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
  51. static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  52. struct drm_i915_gem_pwrite *args,
  53. struct drm_file *file_priv);
  54. static LIST_HEAD(shrink_list);
  55. static DEFINE_SPINLOCK(shrink_list_lock);
  56. int i915_gem_do_init(struct drm_device *dev, unsigned long start,
  57. unsigned long end)
  58. {
  59. drm_i915_private_t *dev_priv = dev->dev_private;
  60. if (start >= end ||
  61. (start & (PAGE_SIZE - 1)) != 0 ||
  62. (end & (PAGE_SIZE - 1)) != 0) {
  63. return -EINVAL;
  64. }
  65. drm_mm_init(&dev_priv->mm.gtt_space, start,
  66. end - start);
  67. dev->gtt_total = (uint32_t) (end - start);
  68. return 0;
  69. }
  70. int
  71. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  72. struct drm_file *file_priv)
  73. {
  74. struct drm_i915_gem_init *args = data;
  75. int ret;
  76. mutex_lock(&dev->struct_mutex);
  77. ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
  78. mutex_unlock(&dev->struct_mutex);
  79. return ret;
  80. }
  81. int
  82. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  83. struct drm_file *file_priv)
  84. {
  85. struct drm_i915_gem_get_aperture *args = data;
  86. if (!(dev->driver->driver_features & DRIVER_GEM))
  87. return -ENODEV;
  88. args->aper_size = dev->gtt_total;
  89. args->aper_available_size = (args->aper_size -
  90. atomic_read(&dev->pin_memory));
  91. return 0;
  92. }
  93. /**
  94. * Creates a new mm object and returns a handle to it.
  95. */
  96. int
  97. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  98. struct drm_file *file_priv)
  99. {
  100. struct drm_i915_gem_create *args = data;
  101. struct drm_gem_object *obj;
  102. int ret;
  103. u32 handle;
  104. args->size = roundup(args->size, PAGE_SIZE);
  105. /* Allocate the new object */
  106. obj = drm_gem_object_alloc(dev, args->size);
  107. if (obj == NULL)
  108. return -ENOMEM;
  109. ret = drm_gem_handle_create(file_priv, obj, &handle);
  110. drm_gem_object_handle_unreference_unlocked(obj);
  111. if (ret)
  112. return ret;
  113. args->handle = handle;
  114. return 0;
  115. }
  116. static inline int
  117. fast_shmem_read(struct page **pages,
  118. loff_t page_base, int page_offset,
  119. char __user *data,
  120. int length)
  121. {
  122. char __iomem *vaddr;
  123. int unwritten;
  124. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  125. if (vaddr == NULL)
  126. return -ENOMEM;
  127. unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
  128. kunmap_atomic(vaddr, KM_USER0);
  129. if (unwritten)
  130. return -EFAULT;
  131. return 0;
  132. }
  133. static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
  134. {
  135. drm_i915_private_t *dev_priv = obj->dev->dev_private;
  136. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  137. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  138. obj_priv->tiling_mode != I915_TILING_NONE;
  139. }
  140. static inline int
  141. slow_shmem_copy(struct page *dst_page,
  142. int dst_offset,
  143. struct page *src_page,
  144. int src_offset,
  145. int length)
  146. {
  147. char *dst_vaddr, *src_vaddr;
  148. dst_vaddr = kmap_atomic(dst_page, KM_USER0);
  149. if (dst_vaddr == NULL)
  150. return -ENOMEM;
  151. src_vaddr = kmap_atomic(src_page, KM_USER1);
  152. if (src_vaddr == NULL) {
  153. kunmap_atomic(dst_vaddr, KM_USER0);
  154. return -ENOMEM;
  155. }
  156. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  157. kunmap_atomic(src_vaddr, KM_USER1);
  158. kunmap_atomic(dst_vaddr, KM_USER0);
  159. return 0;
  160. }
  161. static inline int
  162. slow_shmem_bit17_copy(struct page *gpu_page,
  163. int gpu_offset,
  164. struct page *cpu_page,
  165. int cpu_offset,
  166. int length,
  167. int is_read)
  168. {
  169. char *gpu_vaddr, *cpu_vaddr;
  170. /* Use the unswizzled path if this page isn't affected. */
  171. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  172. if (is_read)
  173. return slow_shmem_copy(cpu_page, cpu_offset,
  174. gpu_page, gpu_offset, length);
  175. else
  176. return slow_shmem_copy(gpu_page, gpu_offset,
  177. cpu_page, cpu_offset, length);
  178. }
  179. gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
  180. if (gpu_vaddr == NULL)
  181. return -ENOMEM;
  182. cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
  183. if (cpu_vaddr == NULL) {
  184. kunmap_atomic(gpu_vaddr, KM_USER0);
  185. return -ENOMEM;
  186. }
  187. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  188. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  189. */
  190. while (length > 0) {
  191. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  192. int this_length = min(cacheline_end - gpu_offset, length);
  193. int swizzled_gpu_offset = gpu_offset ^ 64;
  194. if (is_read) {
  195. memcpy(cpu_vaddr + cpu_offset,
  196. gpu_vaddr + swizzled_gpu_offset,
  197. this_length);
  198. } else {
  199. memcpy(gpu_vaddr + swizzled_gpu_offset,
  200. cpu_vaddr + cpu_offset,
  201. this_length);
  202. }
  203. cpu_offset += this_length;
  204. gpu_offset += this_length;
  205. length -= this_length;
  206. }
  207. kunmap_atomic(cpu_vaddr, KM_USER1);
  208. kunmap_atomic(gpu_vaddr, KM_USER0);
  209. return 0;
  210. }
  211. /**
  212. * This is the fast shmem pread path, which attempts to copy_from_user directly
  213. * from the backing pages of the object to the user's address space. On a
  214. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  215. */
  216. static int
  217. i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  218. struct drm_i915_gem_pread *args,
  219. struct drm_file *file_priv)
  220. {
  221. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  222. ssize_t remain;
  223. loff_t offset, page_base;
  224. char __user *user_data;
  225. int page_offset, page_length;
  226. int ret;
  227. user_data = (char __user *) (uintptr_t) args->data_ptr;
  228. remain = args->size;
  229. mutex_lock(&dev->struct_mutex);
  230. ret = i915_gem_object_get_pages(obj, 0);
  231. if (ret != 0)
  232. goto fail_unlock;
  233. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  234. args->size);
  235. if (ret != 0)
  236. goto fail_put_pages;
  237. obj_priv = obj->driver_private;
  238. offset = args->offset;
  239. while (remain > 0) {
  240. /* Operation in this page
  241. *
  242. * page_base = page offset within aperture
  243. * page_offset = offset within page
  244. * page_length = bytes to copy for this page
  245. */
  246. page_base = (offset & ~(PAGE_SIZE-1));
  247. page_offset = offset & (PAGE_SIZE-1);
  248. page_length = remain;
  249. if ((page_offset + remain) > PAGE_SIZE)
  250. page_length = PAGE_SIZE - page_offset;
  251. ret = fast_shmem_read(obj_priv->pages,
  252. page_base, page_offset,
  253. user_data, page_length);
  254. if (ret)
  255. goto fail_put_pages;
  256. remain -= page_length;
  257. user_data += page_length;
  258. offset += page_length;
  259. }
  260. fail_put_pages:
  261. i915_gem_object_put_pages(obj);
  262. fail_unlock:
  263. mutex_unlock(&dev->struct_mutex);
  264. return ret;
  265. }
  266. static int
  267. i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  268. {
  269. int ret;
  270. ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
  271. /* If we've insufficient memory to map in the pages, attempt
  272. * to make some space by throwing out some old buffers.
  273. */
  274. if (ret == -ENOMEM) {
  275. struct drm_device *dev = obj->dev;
  276. ret = i915_gem_evict_something(dev, obj->size);
  277. if (ret)
  278. return ret;
  279. ret = i915_gem_object_get_pages(obj, 0);
  280. }
  281. return ret;
  282. }
  283. /**
  284. * This is the fallback shmem pread path, which allocates temporary storage
  285. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  286. * can copy out of the object's backing pages while holding the struct mutex
  287. * and not take page faults.
  288. */
  289. static int
  290. i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
  291. struct drm_i915_gem_pread *args,
  292. struct drm_file *file_priv)
  293. {
  294. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  295. struct mm_struct *mm = current->mm;
  296. struct page **user_pages;
  297. ssize_t remain;
  298. loff_t offset, pinned_pages, i;
  299. loff_t first_data_page, last_data_page, num_pages;
  300. int shmem_page_index, shmem_page_offset;
  301. int data_page_index, data_page_offset;
  302. int page_length;
  303. int ret;
  304. uint64_t data_ptr = args->data_ptr;
  305. int do_bit17_swizzling;
  306. remain = args->size;
  307. /* Pin the user pages containing the data. We can't fault while
  308. * holding the struct mutex, yet we want to hold it while
  309. * dereferencing the user data.
  310. */
  311. first_data_page = data_ptr / PAGE_SIZE;
  312. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  313. num_pages = last_data_page - first_data_page + 1;
  314. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  315. if (user_pages == NULL)
  316. return -ENOMEM;
  317. down_read(&mm->mmap_sem);
  318. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  319. num_pages, 1, 0, user_pages, NULL);
  320. up_read(&mm->mmap_sem);
  321. if (pinned_pages < num_pages) {
  322. ret = -EFAULT;
  323. goto fail_put_user_pages;
  324. }
  325. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  326. mutex_lock(&dev->struct_mutex);
  327. ret = i915_gem_object_get_pages_or_evict(obj);
  328. if (ret)
  329. goto fail_unlock;
  330. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  331. args->size);
  332. if (ret != 0)
  333. goto fail_put_pages;
  334. obj_priv = obj->driver_private;
  335. offset = args->offset;
  336. while (remain > 0) {
  337. /* Operation in this page
  338. *
  339. * shmem_page_index = page number within shmem file
  340. * shmem_page_offset = offset within page in shmem file
  341. * data_page_index = page number in get_user_pages return
  342. * data_page_offset = offset with data_page_index page.
  343. * page_length = bytes to copy for this page
  344. */
  345. shmem_page_index = offset / PAGE_SIZE;
  346. shmem_page_offset = offset & ~PAGE_MASK;
  347. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  348. data_page_offset = data_ptr & ~PAGE_MASK;
  349. page_length = remain;
  350. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  351. page_length = PAGE_SIZE - shmem_page_offset;
  352. if ((data_page_offset + page_length) > PAGE_SIZE)
  353. page_length = PAGE_SIZE - data_page_offset;
  354. if (do_bit17_swizzling) {
  355. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  356. shmem_page_offset,
  357. user_pages[data_page_index],
  358. data_page_offset,
  359. page_length,
  360. 1);
  361. } else {
  362. ret = slow_shmem_copy(user_pages[data_page_index],
  363. data_page_offset,
  364. obj_priv->pages[shmem_page_index],
  365. shmem_page_offset,
  366. page_length);
  367. }
  368. if (ret)
  369. goto fail_put_pages;
  370. remain -= page_length;
  371. data_ptr += page_length;
  372. offset += page_length;
  373. }
  374. fail_put_pages:
  375. i915_gem_object_put_pages(obj);
  376. fail_unlock:
  377. mutex_unlock(&dev->struct_mutex);
  378. fail_put_user_pages:
  379. for (i = 0; i < pinned_pages; i++) {
  380. SetPageDirty(user_pages[i]);
  381. page_cache_release(user_pages[i]);
  382. }
  383. drm_free_large(user_pages);
  384. return ret;
  385. }
  386. /**
  387. * Reads data from the object referenced by handle.
  388. *
  389. * On error, the contents of *data are undefined.
  390. */
  391. int
  392. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  393. struct drm_file *file_priv)
  394. {
  395. struct drm_i915_gem_pread *args = data;
  396. struct drm_gem_object *obj;
  397. struct drm_i915_gem_object *obj_priv;
  398. int ret;
  399. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  400. if (obj == NULL)
  401. return -EBADF;
  402. obj_priv = obj->driver_private;
  403. /* Bounds check source.
  404. *
  405. * XXX: This could use review for overflow issues...
  406. */
  407. if (args->offset > obj->size || args->size > obj->size ||
  408. args->offset + args->size > obj->size) {
  409. drm_gem_object_unreference_unlocked(obj);
  410. return -EINVAL;
  411. }
  412. if (i915_gem_object_needs_bit17_swizzle(obj)) {
  413. ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  414. } else {
  415. ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
  416. if (ret != 0)
  417. ret = i915_gem_shmem_pread_slow(dev, obj, args,
  418. file_priv);
  419. }
  420. drm_gem_object_unreference_unlocked(obj);
  421. return ret;
  422. }
  423. /* This is the fast write path which cannot handle
  424. * page faults in the source data
  425. */
  426. static inline int
  427. fast_user_write(struct io_mapping *mapping,
  428. loff_t page_base, int page_offset,
  429. char __user *user_data,
  430. int length)
  431. {
  432. char *vaddr_atomic;
  433. unsigned long unwritten;
  434. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  435. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  436. user_data, length);
  437. io_mapping_unmap_atomic(vaddr_atomic);
  438. if (unwritten)
  439. return -EFAULT;
  440. return 0;
  441. }
  442. /* Here's the write path which can sleep for
  443. * page faults
  444. */
  445. static inline int
  446. slow_kernel_write(struct io_mapping *mapping,
  447. loff_t gtt_base, int gtt_offset,
  448. struct page *user_page, int user_offset,
  449. int length)
  450. {
  451. char *src_vaddr, *dst_vaddr;
  452. unsigned long unwritten;
  453. dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
  454. src_vaddr = kmap_atomic(user_page, KM_USER1);
  455. unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
  456. src_vaddr + user_offset,
  457. length);
  458. kunmap_atomic(src_vaddr, KM_USER1);
  459. io_mapping_unmap_atomic(dst_vaddr);
  460. if (unwritten)
  461. return -EFAULT;
  462. return 0;
  463. }
  464. static inline int
  465. fast_shmem_write(struct page **pages,
  466. loff_t page_base, int page_offset,
  467. char __user *data,
  468. int length)
  469. {
  470. char __iomem *vaddr;
  471. unsigned long unwritten;
  472. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  473. if (vaddr == NULL)
  474. return -ENOMEM;
  475. unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
  476. kunmap_atomic(vaddr, KM_USER0);
  477. if (unwritten)
  478. return -EFAULT;
  479. return 0;
  480. }
  481. /**
  482. * This is the fast pwrite path, where we copy the data directly from the
  483. * user into the GTT, uncached.
  484. */
  485. static int
  486. i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  487. struct drm_i915_gem_pwrite *args,
  488. struct drm_file *file_priv)
  489. {
  490. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  491. drm_i915_private_t *dev_priv = dev->dev_private;
  492. ssize_t remain;
  493. loff_t offset, page_base;
  494. char __user *user_data;
  495. int page_offset, page_length;
  496. int ret;
  497. user_data = (char __user *) (uintptr_t) args->data_ptr;
  498. remain = args->size;
  499. if (!access_ok(VERIFY_READ, user_data, remain))
  500. return -EFAULT;
  501. mutex_lock(&dev->struct_mutex);
  502. ret = i915_gem_object_pin(obj, 0);
  503. if (ret) {
  504. mutex_unlock(&dev->struct_mutex);
  505. return ret;
  506. }
  507. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  508. if (ret)
  509. goto fail;
  510. obj_priv = obj->driver_private;
  511. offset = obj_priv->gtt_offset + args->offset;
  512. while (remain > 0) {
  513. /* Operation in this page
  514. *
  515. * page_base = page offset within aperture
  516. * page_offset = offset within page
  517. * page_length = bytes to copy for this page
  518. */
  519. page_base = (offset & ~(PAGE_SIZE-1));
  520. page_offset = offset & (PAGE_SIZE-1);
  521. page_length = remain;
  522. if ((page_offset + remain) > PAGE_SIZE)
  523. page_length = PAGE_SIZE - page_offset;
  524. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  525. page_offset, user_data, page_length);
  526. /* If we get a fault while copying data, then (presumably) our
  527. * source page isn't available. Return the error and we'll
  528. * retry in the slow path.
  529. */
  530. if (ret)
  531. goto fail;
  532. remain -= page_length;
  533. user_data += page_length;
  534. offset += page_length;
  535. }
  536. fail:
  537. i915_gem_object_unpin(obj);
  538. mutex_unlock(&dev->struct_mutex);
  539. return ret;
  540. }
  541. /**
  542. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  543. * the memory and maps it using kmap_atomic for copying.
  544. *
  545. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  546. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  547. */
  548. static int
  549. i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  550. struct drm_i915_gem_pwrite *args,
  551. struct drm_file *file_priv)
  552. {
  553. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  554. drm_i915_private_t *dev_priv = dev->dev_private;
  555. ssize_t remain;
  556. loff_t gtt_page_base, offset;
  557. loff_t first_data_page, last_data_page, num_pages;
  558. loff_t pinned_pages, i;
  559. struct page **user_pages;
  560. struct mm_struct *mm = current->mm;
  561. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  562. int ret;
  563. uint64_t data_ptr = args->data_ptr;
  564. remain = args->size;
  565. /* Pin the user pages containing the data. We can't fault while
  566. * holding the struct mutex, and all of the pwrite implementations
  567. * want to hold it while dereferencing the user data.
  568. */
  569. first_data_page = data_ptr / PAGE_SIZE;
  570. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  571. num_pages = last_data_page - first_data_page + 1;
  572. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  573. if (user_pages == NULL)
  574. return -ENOMEM;
  575. down_read(&mm->mmap_sem);
  576. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  577. num_pages, 0, 0, user_pages, NULL);
  578. up_read(&mm->mmap_sem);
  579. if (pinned_pages < num_pages) {
  580. ret = -EFAULT;
  581. goto out_unpin_pages;
  582. }
  583. mutex_lock(&dev->struct_mutex);
  584. ret = i915_gem_object_pin(obj, 0);
  585. if (ret)
  586. goto out_unlock;
  587. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  588. if (ret)
  589. goto out_unpin_object;
  590. obj_priv = obj->driver_private;
  591. offset = obj_priv->gtt_offset + args->offset;
  592. while (remain > 0) {
  593. /* Operation in this page
  594. *
  595. * gtt_page_base = page offset within aperture
  596. * gtt_page_offset = offset within page in aperture
  597. * data_page_index = page number in get_user_pages return
  598. * data_page_offset = offset with data_page_index page.
  599. * page_length = bytes to copy for this page
  600. */
  601. gtt_page_base = offset & PAGE_MASK;
  602. gtt_page_offset = offset & ~PAGE_MASK;
  603. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  604. data_page_offset = data_ptr & ~PAGE_MASK;
  605. page_length = remain;
  606. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  607. page_length = PAGE_SIZE - gtt_page_offset;
  608. if ((data_page_offset + page_length) > PAGE_SIZE)
  609. page_length = PAGE_SIZE - data_page_offset;
  610. ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
  611. gtt_page_base, gtt_page_offset,
  612. user_pages[data_page_index],
  613. data_page_offset,
  614. page_length);
  615. /* If we get a fault while copying data, then (presumably) our
  616. * source page isn't available. Return the error and we'll
  617. * retry in the slow path.
  618. */
  619. if (ret)
  620. goto out_unpin_object;
  621. remain -= page_length;
  622. offset += page_length;
  623. data_ptr += page_length;
  624. }
  625. out_unpin_object:
  626. i915_gem_object_unpin(obj);
  627. out_unlock:
  628. mutex_unlock(&dev->struct_mutex);
  629. out_unpin_pages:
  630. for (i = 0; i < pinned_pages; i++)
  631. page_cache_release(user_pages[i]);
  632. drm_free_large(user_pages);
  633. return ret;
  634. }
  635. /**
  636. * This is the fast shmem pwrite path, which attempts to directly
  637. * copy_from_user into the kmapped pages backing the object.
  638. */
  639. static int
  640. i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  641. struct drm_i915_gem_pwrite *args,
  642. struct drm_file *file_priv)
  643. {
  644. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  645. ssize_t remain;
  646. loff_t offset, page_base;
  647. char __user *user_data;
  648. int page_offset, page_length;
  649. int ret;
  650. user_data = (char __user *) (uintptr_t) args->data_ptr;
  651. remain = args->size;
  652. mutex_lock(&dev->struct_mutex);
  653. ret = i915_gem_object_get_pages(obj, 0);
  654. if (ret != 0)
  655. goto fail_unlock;
  656. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  657. if (ret != 0)
  658. goto fail_put_pages;
  659. obj_priv = obj->driver_private;
  660. offset = args->offset;
  661. obj_priv->dirty = 1;
  662. while (remain > 0) {
  663. /* Operation in this page
  664. *
  665. * page_base = page offset within aperture
  666. * page_offset = offset within page
  667. * page_length = bytes to copy for this page
  668. */
  669. page_base = (offset & ~(PAGE_SIZE-1));
  670. page_offset = offset & (PAGE_SIZE-1);
  671. page_length = remain;
  672. if ((page_offset + remain) > PAGE_SIZE)
  673. page_length = PAGE_SIZE - page_offset;
  674. ret = fast_shmem_write(obj_priv->pages,
  675. page_base, page_offset,
  676. user_data, page_length);
  677. if (ret)
  678. goto fail_put_pages;
  679. remain -= page_length;
  680. user_data += page_length;
  681. offset += page_length;
  682. }
  683. fail_put_pages:
  684. i915_gem_object_put_pages(obj);
  685. fail_unlock:
  686. mutex_unlock(&dev->struct_mutex);
  687. return ret;
  688. }
  689. /**
  690. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  691. * the memory and maps it using kmap_atomic for copying.
  692. *
  693. * This avoids taking mmap_sem for faulting on the user's address while the
  694. * struct_mutex is held.
  695. */
  696. static int
  697. i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  698. struct drm_i915_gem_pwrite *args,
  699. struct drm_file *file_priv)
  700. {
  701. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  702. struct mm_struct *mm = current->mm;
  703. struct page **user_pages;
  704. ssize_t remain;
  705. loff_t offset, pinned_pages, i;
  706. loff_t first_data_page, last_data_page, num_pages;
  707. int shmem_page_index, shmem_page_offset;
  708. int data_page_index, data_page_offset;
  709. int page_length;
  710. int ret;
  711. uint64_t data_ptr = args->data_ptr;
  712. int do_bit17_swizzling;
  713. remain = args->size;
  714. /* Pin the user pages containing the data. We can't fault while
  715. * holding the struct mutex, and all of the pwrite implementations
  716. * want to hold it while dereferencing the user data.
  717. */
  718. first_data_page = data_ptr / PAGE_SIZE;
  719. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  720. num_pages = last_data_page - first_data_page + 1;
  721. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  722. if (user_pages == NULL)
  723. return -ENOMEM;
  724. down_read(&mm->mmap_sem);
  725. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  726. num_pages, 0, 0, user_pages, NULL);
  727. up_read(&mm->mmap_sem);
  728. if (pinned_pages < num_pages) {
  729. ret = -EFAULT;
  730. goto fail_put_user_pages;
  731. }
  732. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  733. mutex_lock(&dev->struct_mutex);
  734. ret = i915_gem_object_get_pages_or_evict(obj);
  735. if (ret)
  736. goto fail_unlock;
  737. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  738. if (ret != 0)
  739. goto fail_put_pages;
  740. obj_priv = obj->driver_private;
  741. offset = args->offset;
  742. obj_priv->dirty = 1;
  743. while (remain > 0) {
  744. /* Operation in this page
  745. *
  746. * shmem_page_index = page number within shmem file
  747. * shmem_page_offset = offset within page in shmem file
  748. * data_page_index = page number in get_user_pages return
  749. * data_page_offset = offset with data_page_index page.
  750. * page_length = bytes to copy for this page
  751. */
  752. shmem_page_index = offset / PAGE_SIZE;
  753. shmem_page_offset = offset & ~PAGE_MASK;
  754. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  755. data_page_offset = data_ptr & ~PAGE_MASK;
  756. page_length = remain;
  757. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  758. page_length = PAGE_SIZE - shmem_page_offset;
  759. if ((data_page_offset + page_length) > PAGE_SIZE)
  760. page_length = PAGE_SIZE - data_page_offset;
  761. if (do_bit17_swizzling) {
  762. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  763. shmem_page_offset,
  764. user_pages[data_page_index],
  765. data_page_offset,
  766. page_length,
  767. 0);
  768. } else {
  769. ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
  770. shmem_page_offset,
  771. user_pages[data_page_index],
  772. data_page_offset,
  773. page_length);
  774. }
  775. if (ret)
  776. goto fail_put_pages;
  777. remain -= page_length;
  778. data_ptr += page_length;
  779. offset += page_length;
  780. }
  781. fail_put_pages:
  782. i915_gem_object_put_pages(obj);
  783. fail_unlock:
  784. mutex_unlock(&dev->struct_mutex);
  785. fail_put_user_pages:
  786. for (i = 0; i < pinned_pages; i++)
  787. page_cache_release(user_pages[i]);
  788. drm_free_large(user_pages);
  789. return ret;
  790. }
  791. /**
  792. * Writes data to the object referenced by handle.
  793. *
  794. * On error, the contents of the buffer that were to be modified are undefined.
  795. */
  796. int
  797. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  798. struct drm_file *file_priv)
  799. {
  800. struct drm_i915_gem_pwrite *args = data;
  801. struct drm_gem_object *obj;
  802. struct drm_i915_gem_object *obj_priv;
  803. int ret = 0;
  804. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  805. if (obj == NULL)
  806. return -EBADF;
  807. obj_priv = obj->driver_private;
  808. /* Bounds check destination.
  809. *
  810. * XXX: This could use review for overflow issues...
  811. */
  812. if (args->offset > obj->size || args->size > obj->size ||
  813. args->offset + args->size > obj->size) {
  814. drm_gem_object_unreference_unlocked(obj);
  815. return -EINVAL;
  816. }
  817. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  818. * it would end up going through the fenced access, and we'll get
  819. * different detiling behavior between reading and writing.
  820. * pread/pwrite currently are reading and writing from the CPU
  821. * perspective, requiring manual detiling by the client.
  822. */
  823. if (obj_priv->phys_obj)
  824. ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
  825. else if (obj_priv->tiling_mode == I915_TILING_NONE &&
  826. dev->gtt_total != 0) {
  827. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
  828. if (ret == -EFAULT) {
  829. ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
  830. file_priv);
  831. }
  832. } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
  833. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
  834. } else {
  835. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
  836. if (ret == -EFAULT) {
  837. ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
  838. file_priv);
  839. }
  840. }
  841. #if WATCH_PWRITE
  842. if (ret)
  843. DRM_INFO("pwrite failed %d\n", ret);
  844. #endif
  845. drm_gem_object_unreference_unlocked(obj);
  846. return ret;
  847. }
  848. /**
  849. * Called when user space prepares to use an object with the CPU, either
  850. * through the mmap ioctl's mapping or a GTT mapping.
  851. */
  852. int
  853. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  854. struct drm_file *file_priv)
  855. {
  856. struct drm_i915_private *dev_priv = dev->dev_private;
  857. struct drm_i915_gem_set_domain *args = data;
  858. struct drm_gem_object *obj;
  859. struct drm_i915_gem_object *obj_priv;
  860. uint32_t read_domains = args->read_domains;
  861. uint32_t write_domain = args->write_domain;
  862. int ret;
  863. if (!(dev->driver->driver_features & DRIVER_GEM))
  864. return -ENODEV;
  865. /* Only handle setting domains to types used by the CPU. */
  866. if (write_domain & I915_GEM_GPU_DOMAINS)
  867. return -EINVAL;
  868. if (read_domains & I915_GEM_GPU_DOMAINS)
  869. return -EINVAL;
  870. /* Having something in the write domain implies it's in the read
  871. * domain, and only that read domain. Enforce that in the request.
  872. */
  873. if (write_domain != 0 && read_domains != write_domain)
  874. return -EINVAL;
  875. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  876. if (obj == NULL)
  877. return -EBADF;
  878. obj_priv = obj->driver_private;
  879. mutex_lock(&dev->struct_mutex);
  880. intel_mark_busy(dev, obj);
  881. #if WATCH_BUF
  882. DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
  883. obj, obj->size, read_domains, write_domain);
  884. #endif
  885. if (read_domains & I915_GEM_DOMAIN_GTT) {
  886. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  887. /* Update the LRU on the fence for the CPU access that's
  888. * about to occur.
  889. */
  890. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  891. list_move_tail(&obj_priv->fence_list,
  892. &dev_priv->mm.fence_list);
  893. }
  894. /* Silently promote "you're not bound, there was nothing to do"
  895. * to success, since the client was just asking us to
  896. * make sure everything was done.
  897. */
  898. if (ret == -EINVAL)
  899. ret = 0;
  900. } else {
  901. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  902. }
  903. drm_gem_object_unreference(obj);
  904. mutex_unlock(&dev->struct_mutex);
  905. return ret;
  906. }
  907. /**
  908. * Called when user space has done writes to this buffer
  909. */
  910. int
  911. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  912. struct drm_file *file_priv)
  913. {
  914. struct drm_i915_gem_sw_finish *args = data;
  915. struct drm_gem_object *obj;
  916. struct drm_i915_gem_object *obj_priv;
  917. int ret = 0;
  918. if (!(dev->driver->driver_features & DRIVER_GEM))
  919. return -ENODEV;
  920. mutex_lock(&dev->struct_mutex);
  921. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  922. if (obj == NULL) {
  923. mutex_unlock(&dev->struct_mutex);
  924. return -EBADF;
  925. }
  926. #if WATCH_BUF
  927. DRM_INFO("%s: sw_finish %d (%p %zd)\n",
  928. __func__, args->handle, obj, obj->size);
  929. #endif
  930. obj_priv = obj->driver_private;
  931. /* Pinned buffers may be scanout, so flush the cache */
  932. if (obj_priv->pin_count)
  933. i915_gem_object_flush_cpu_write_domain(obj);
  934. drm_gem_object_unreference(obj);
  935. mutex_unlock(&dev->struct_mutex);
  936. return ret;
  937. }
  938. /**
  939. * Maps the contents of an object, returning the address it is mapped
  940. * into.
  941. *
  942. * While the mapping holds a reference on the contents of the object, it doesn't
  943. * imply a ref on the object itself.
  944. */
  945. int
  946. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  947. struct drm_file *file_priv)
  948. {
  949. struct drm_i915_gem_mmap *args = data;
  950. struct drm_gem_object *obj;
  951. loff_t offset;
  952. unsigned long addr;
  953. if (!(dev->driver->driver_features & DRIVER_GEM))
  954. return -ENODEV;
  955. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  956. if (obj == NULL)
  957. return -EBADF;
  958. offset = args->offset;
  959. down_write(&current->mm->mmap_sem);
  960. addr = do_mmap(obj->filp, 0, args->size,
  961. PROT_READ | PROT_WRITE, MAP_SHARED,
  962. args->offset);
  963. up_write(&current->mm->mmap_sem);
  964. drm_gem_object_unreference_unlocked(obj);
  965. if (IS_ERR((void *)addr))
  966. return addr;
  967. args->addr_ptr = (uint64_t) addr;
  968. return 0;
  969. }
  970. /**
  971. * i915_gem_fault - fault a page into the GTT
  972. * vma: VMA in question
  973. * vmf: fault info
  974. *
  975. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  976. * from userspace. The fault handler takes care of binding the object to
  977. * the GTT (if needed), allocating and programming a fence register (again,
  978. * only if needed based on whether the old reg is still valid or the object
  979. * is tiled) and inserting a new PTE into the faulting process.
  980. *
  981. * Note that the faulting process may involve evicting existing objects
  982. * from the GTT and/or fence registers to make room. So performance may
  983. * suffer if the GTT working set is large or there are few fence registers
  984. * left.
  985. */
  986. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  987. {
  988. struct drm_gem_object *obj = vma->vm_private_data;
  989. struct drm_device *dev = obj->dev;
  990. struct drm_i915_private *dev_priv = dev->dev_private;
  991. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  992. pgoff_t page_offset;
  993. unsigned long pfn;
  994. int ret = 0;
  995. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  996. /* We don't use vmf->pgoff since that has the fake offset */
  997. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  998. PAGE_SHIFT;
  999. /* Now bind it into the GTT if needed */
  1000. mutex_lock(&dev->struct_mutex);
  1001. if (!obj_priv->gtt_space) {
  1002. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1003. if (ret)
  1004. goto unlock;
  1005. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1006. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1007. if (ret)
  1008. goto unlock;
  1009. }
  1010. /* Need a new fence register? */
  1011. if (obj_priv->tiling_mode != I915_TILING_NONE) {
  1012. ret = i915_gem_object_get_fence_reg(obj);
  1013. if (ret)
  1014. goto unlock;
  1015. }
  1016. pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
  1017. page_offset;
  1018. /* Finally, remap it using the new GTT offset */
  1019. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1020. unlock:
  1021. mutex_unlock(&dev->struct_mutex);
  1022. switch (ret) {
  1023. case 0:
  1024. case -ERESTARTSYS:
  1025. return VM_FAULT_NOPAGE;
  1026. case -ENOMEM:
  1027. case -EAGAIN:
  1028. return VM_FAULT_OOM;
  1029. default:
  1030. return VM_FAULT_SIGBUS;
  1031. }
  1032. }
  1033. /**
  1034. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1035. * @obj: obj in question
  1036. *
  1037. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1038. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1039. * up the object based on the offset and sets up the various memory mapping
  1040. * structures.
  1041. *
  1042. * This routine allocates and attaches a fake offset for @obj.
  1043. */
  1044. static int
  1045. i915_gem_create_mmap_offset(struct drm_gem_object *obj)
  1046. {
  1047. struct drm_device *dev = obj->dev;
  1048. struct drm_gem_mm *mm = dev->mm_private;
  1049. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1050. struct drm_map_list *list;
  1051. struct drm_local_map *map;
  1052. int ret = 0;
  1053. /* Set the object up for mmap'ing */
  1054. list = &obj->map_list;
  1055. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1056. if (!list->map)
  1057. return -ENOMEM;
  1058. map = list->map;
  1059. map->type = _DRM_GEM;
  1060. map->size = obj->size;
  1061. map->handle = obj;
  1062. /* Get a DRM GEM mmap offset allocated... */
  1063. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1064. obj->size / PAGE_SIZE, 0, 0);
  1065. if (!list->file_offset_node) {
  1066. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  1067. ret = -ENOMEM;
  1068. goto out_free_list;
  1069. }
  1070. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1071. obj->size / PAGE_SIZE, 0);
  1072. if (!list->file_offset_node) {
  1073. ret = -ENOMEM;
  1074. goto out_free_list;
  1075. }
  1076. list->hash.key = list->file_offset_node->start;
  1077. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  1078. DRM_ERROR("failed to add to map hash\n");
  1079. ret = -ENOMEM;
  1080. goto out_free_mm;
  1081. }
  1082. /* By now we should be all set, any drm_mmap request on the offset
  1083. * below will get to our mmap & fault handler */
  1084. obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  1085. return 0;
  1086. out_free_mm:
  1087. drm_mm_put_block(list->file_offset_node);
  1088. out_free_list:
  1089. kfree(list->map);
  1090. return ret;
  1091. }
  1092. /**
  1093. * i915_gem_release_mmap - remove physical page mappings
  1094. * @obj: obj in question
  1095. *
  1096. * Preserve the reservation of the mmapping with the DRM core code, but
  1097. * relinquish ownership of the pages back to the system.
  1098. *
  1099. * It is vital that we remove the page mapping if we have mapped a tiled
  1100. * object through the GTT and then lose the fence register due to
  1101. * resource pressure. Similarly if the object has been moved out of the
  1102. * aperture, than pages mapped into userspace must be revoked. Removing the
  1103. * mapping will then trigger a page fault on the next user access, allowing
  1104. * fixup by i915_gem_fault().
  1105. */
  1106. void
  1107. i915_gem_release_mmap(struct drm_gem_object *obj)
  1108. {
  1109. struct drm_device *dev = obj->dev;
  1110. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1111. if (dev->dev_mapping)
  1112. unmap_mapping_range(dev->dev_mapping,
  1113. obj_priv->mmap_offset, obj->size, 1);
  1114. }
  1115. static void
  1116. i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  1117. {
  1118. struct drm_device *dev = obj->dev;
  1119. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1120. struct drm_gem_mm *mm = dev->mm_private;
  1121. struct drm_map_list *list;
  1122. list = &obj->map_list;
  1123. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1124. if (list->file_offset_node) {
  1125. drm_mm_put_block(list->file_offset_node);
  1126. list->file_offset_node = NULL;
  1127. }
  1128. if (list->map) {
  1129. kfree(list->map);
  1130. list->map = NULL;
  1131. }
  1132. obj_priv->mmap_offset = 0;
  1133. }
  1134. /**
  1135. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1136. * @obj: object to check
  1137. *
  1138. * Return the required GTT alignment for an object, taking into account
  1139. * potential fence register mapping if needed.
  1140. */
  1141. static uint32_t
  1142. i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
  1143. {
  1144. struct drm_device *dev = obj->dev;
  1145. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1146. int start, i;
  1147. /*
  1148. * Minimum alignment is 4k (GTT page size), but might be greater
  1149. * if a fence register is needed for the object.
  1150. */
  1151. if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
  1152. return 4096;
  1153. /*
  1154. * Previous chips need to be aligned to the size of the smallest
  1155. * fence register that can contain the object.
  1156. */
  1157. if (IS_I9XX(dev))
  1158. start = 1024*1024;
  1159. else
  1160. start = 512*1024;
  1161. for (i = start; i < obj->size; i <<= 1)
  1162. ;
  1163. return i;
  1164. }
  1165. /**
  1166. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1167. * @dev: DRM device
  1168. * @data: GTT mapping ioctl data
  1169. * @file_priv: GEM object info
  1170. *
  1171. * Simply returns the fake offset to userspace so it can mmap it.
  1172. * The mmap call will end up in drm_gem_mmap(), which will set things
  1173. * up so we can get faults in the handler above.
  1174. *
  1175. * The fault handler will take care of binding the object into the GTT
  1176. * (since it may have been evicted to make room for something), allocating
  1177. * a fence register, and mapping the appropriate aperture address into
  1178. * userspace.
  1179. */
  1180. int
  1181. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1182. struct drm_file *file_priv)
  1183. {
  1184. struct drm_i915_gem_mmap_gtt *args = data;
  1185. struct drm_i915_private *dev_priv = dev->dev_private;
  1186. struct drm_gem_object *obj;
  1187. struct drm_i915_gem_object *obj_priv;
  1188. int ret;
  1189. if (!(dev->driver->driver_features & DRIVER_GEM))
  1190. return -ENODEV;
  1191. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1192. if (obj == NULL)
  1193. return -EBADF;
  1194. mutex_lock(&dev->struct_mutex);
  1195. obj_priv = obj->driver_private;
  1196. if (obj_priv->madv != I915_MADV_WILLNEED) {
  1197. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1198. drm_gem_object_unreference(obj);
  1199. mutex_unlock(&dev->struct_mutex);
  1200. return -EINVAL;
  1201. }
  1202. if (!obj_priv->mmap_offset) {
  1203. ret = i915_gem_create_mmap_offset(obj);
  1204. if (ret) {
  1205. drm_gem_object_unreference(obj);
  1206. mutex_unlock(&dev->struct_mutex);
  1207. return ret;
  1208. }
  1209. }
  1210. args->offset = obj_priv->mmap_offset;
  1211. /*
  1212. * Pull it into the GTT so that we have a page list (makes the
  1213. * initial fault faster and any subsequent flushing possible).
  1214. */
  1215. if (!obj_priv->agp_mem) {
  1216. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1217. if (ret) {
  1218. drm_gem_object_unreference(obj);
  1219. mutex_unlock(&dev->struct_mutex);
  1220. return ret;
  1221. }
  1222. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1223. }
  1224. drm_gem_object_unreference(obj);
  1225. mutex_unlock(&dev->struct_mutex);
  1226. return 0;
  1227. }
  1228. void
  1229. i915_gem_object_put_pages(struct drm_gem_object *obj)
  1230. {
  1231. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1232. int page_count = obj->size / PAGE_SIZE;
  1233. int i;
  1234. BUG_ON(obj_priv->pages_refcount == 0);
  1235. BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
  1236. if (--obj_priv->pages_refcount != 0)
  1237. return;
  1238. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1239. i915_gem_object_save_bit_17_swizzle(obj);
  1240. if (obj_priv->madv == I915_MADV_DONTNEED)
  1241. obj_priv->dirty = 0;
  1242. for (i = 0; i < page_count; i++) {
  1243. if (obj_priv->pages[i] == NULL)
  1244. break;
  1245. if (obj_priv->dirty)
  1246. set_page_dirty(obj_priv->pages[i]);
  1247. if (obj_priv->madv == I915_MADV_WILLNEED)
  1248. mark_page_accessed(obj_priv->pages[i]);
  1249. page_cache_release(obj_priv->pages[i]);
  1250. }
  1251. obj_priv->dirty = 0;
  1252. drm_free_large(obj_priv->pages);
  1253. obj_priv->pages = NULL;
  1254. }
  1255. static void
  1256. i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
  1257. {
  1258. struct drm_device *dev = obj->dev;
  1259. drm_i915_private_t *dev_priv = dev->dev_private;
  1260. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1261. /* Add a reference if we're newly entering the active list. */
  1262. if (!obj_priv->active) {
  1263. drm_gem_object_reference(obj);
  1264. obj_priv->active = 1;
  1265. }
  1266. /* Move from whatever list we were on to the tail of execution. */
  1267. spin_lock(&dev_priv->mm.active_list_lock);
  1268. list_move_tail(&obj_priv->list,
  1269. &dev_priv->mm.active_list);
  1270. spin_unlock(&dev_priv->mm.active_list_lock);
  1271. obj_priv->last_rendering_seqno = seqno;
  1272. }
  1273. static void
  1274. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  1275. {
  1276. struct drm_device *dev = obj->dev;
  1277. drm_i915_private_t *dev_priv = dev->dev_private;
  1278. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1279. BUG_ON(!obj_priv->active);
  1280. list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
  1281. obj_priv->last_rendering_seqno = 0;
  1282. }
  1283. /* Immediately discard the backing storage */
  1284. static void
  1285. i915_gem_object_truncate(struct drm_gem_object *obj)
  1286. {
  1287. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1288. struct inode *inode;
  1289. inode = obj->filp->f_path.dentry->d_inode;
  1290. if (inode->i_op->truncate)
  1291. inode->i_op->truncate (inode);
  1292. obj_priv->madv = __I915_MADV_PURGED;
  1293. }
  1294. static inline int
  1295. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
  1296. {
  1297. return obj_priv->madv == I915_MADV_DONTNEED;
  1298. }
  1299. static void
  1300. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  1301. {
  1302. struct drm_device *dev = obj->dev;
  1303. drm_i915_private_t *dev_priv = dev->dev_private;
  1304. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1305. i915_verify_inactive(dev, __FILE__, __LINE__);
  1306. if (obj_priv->pin_count != 0)
  1307. list_del_init(&obj_priv->list);
  1308. else
  1309. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1310. BUG_ON(!list_empty(&obj_priv->gpu_write_list));
  1311. obj_priv->last_rendering_seqno = 0;
  1312. if (obj_priv->active) {
  1313. obj_priv->active = 0;
  1314. drm_gem_object_unreference(obj);
  1315. }
  1316. i915_verify_inactive(dev, __FILE__, __LINE__);
  1317. }
  1318. static void
  1319. i915_gem_process_flushing_list(struct drm_device *dev,
  1320. uint32_t flush_domains, uint32_t seqno)
  1321. {
  1322. drm_i915_private_t *dev_priv = dev->dev_private;
  1323. struct drm_i915_gem_object *obj_priv, *next;
  1324. list_for_each_entry_safe(obj_priv, next,
  1325. &dev_priv->mm.gpu_write_list,
  1326. gpu_write_list) {
  1327. struct drm_gem_object *obj = obj_priv->obj;
  1328. if ((obj->write_domain & flush_domains) ==
  1329. obj->write_domain) {
  1330. uint32_t old_write_domain = obj->write_domain;
  1331. obj->write_domain = 0;
  1332. list_del_init(&obj_priv->gpu_write_list);
  1333. i915_gem_object_move_to_active(obj, seqno);
  1334. /* update the fence lru list */
  1335. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1336. list_move_tail(&obj_priv->fence_list,
  1337. &dev_priv->mm.fence_list);
  1338. trace_i915_gem_object_change_domain(obj,
  1339. obj->read_domains,
  1340. old_write_domain);
  1341. }
  1342. }
  1343. }
  1344. /**
  1345. * Creates a new sequence number, emitting a write of it to the status page
  1346. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  1347. *
  1348. * Must be called with struct_lock held.
  1349. *
  1350. * Returned sequence numbers are nonzero on success.
  1351. */
  1352. uint32_t
  1353. i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  1354. uint32_t flush_domains)
  1355. {
  1356. drm_i915_private_t *dev_priv = dev->dev_private;
  1357. struct drm_i915_file_private *i915_file_priv = NULL;
  1358. struct drm_i915_gem_request *request;
  1359. uint32_t seqno;
  1360. int was_empty;
  1361. RING_LOCALS;
  1362. if (file_priv != NULL)
  1363. i915_file_priv = file_priv->driver_priv;
  1364. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1365. if (request == NULL)
  1366. return 0;
  1367. /* Grab the seqno we're going to make this request be, and bump the
  1368. * next (skipping 0 so it can be the reserved no-seqno value).
  1369. */
  1370. seqno = dev_priv->mm.next_gem_seqno;
  1371. dev_priv->mm.next_gem_seqno++;
  1372. if (dev_priv->mm.next_gem_seqno == 0)
  1373. dev_priv->mm.next_gem_seqno++;
  1374. BEGIN_LP_RING(4);
  1375. OUT_RING(MI_STORE_DWORD_INDEX);
  1376. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1377. OUT_RING(seqno);
  1378. OUT_RING(MI_USER_INTERRUPT);
  1379. ADVANCE_LP_RING();
  1380. DRM_DEBUG_DRIVER("%d\n", seqno);
  1381. request->seqno = seqno;
  1382. request->emitted_jiffies = jiffies;
  1383. was_empty = list_empty(&dev_priv->mm.request_list);
  1384. list_add_tail(&request->list, &dev_priv->mm.request_list);
  1385. if (i915_file_priv) {
  1386. list_add_tail(&request->client_list,
  1387. &i915_file_priv->mm.request_list);
  1388. } else {
  1389. INIT_LIST_HEAD(&request->client_list);
  1390. }
  1391. /* Associate any objects on the flushing list matching the write
  1392. * domain we're flushing with our flush.
  1393. */
  1394. if (flush_domains != 0)
  1395. i915_gem_process_flushing_list(dev, flush_domains, seqno);
  1396. if (!dev_priv->mm.suspended) {
  1397. mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
  1398. if (was_empty)
  1399. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1400. }
  1401. return seqno;
  1402. }
  1403. /**
  1404. * Command execution barrier
  1405. *
  1406. * Ensures that all commands in the ring are finished
  1407. * before signalling the CPU
  1408. */
  1409. static uint32_t
  1410. i915_retire_commands(struct drm_device *dev)
  1411. {
  1412. drm_i915_private_t *dev_priv = dev->dev_private;
  1413. uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1414. uint32_t flush_domains = 0;
  1415. RING_LOCALS;
  1416. /* The sampler always gets flushed on i965 (sigh) */
  1417. if (IS_I965G(dev))
  1418. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1419. BEGIN_LP_RING(2);
  1420. OUT_RING(cmd);
  1421. OUT_RING(0); /* noop */
  1422. ADVANCE_LP_RING();
  1423. return flush_domains;
  1424. }
  1425. /**
  1426. * Moves buffers associated only with the given active seqno from the active
  1427. * to inactive list, potentially freeing them.
  1428. */
  1429. static void
  1430. i915_gem_retire_request(struct drm_device *dev,
  1431. struct drm_i915_gem_request *request)
  1432. {
  1433. drm_i915_private_t *dev_priv = dev->dev_private;
  1434. trace_i915_gem_request_retire(dev, request->seqno);
  1435. /* Move any buffers on the active list that are no longer referenced
  1436. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1437. */
  1438. spin_lock(&dev_priv->mm.active_list_lock);
  1439. while (!list_empty(&dev_priv->mm.active_list)) {
  1440. struct drm_gem_object *obj;
  1441. struct drm_i915_gem_object *obj_priv;
  1442. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  1443. struct drm_i915_gem_object,
  1444. list);
  1445. obj = obj_priv->obj;
  1446. /* If the seqno being retired doesn't match the oldest in the
  1447. * list, then the oldest in the list must still be newer than
  1448. * this seqno.
  1449. */
  1450. if (obj_priv->last_rendering_seqno != request->seqno)
  1451. goto out;
  1452. #if WATCH_LRU
  1453. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  1454. __func__, request->seqno, obj);
  1455. #endif
  1456. if (obj->write_domain != 0)
  1457. i915_gem_object_move_to_flushing(obj);
  1458. else {
  1459. /* Take a reference on the object so it won't be
  1460. * freed while the spinlock is held. The list
  1461. * protection for this spinlock is safe when breaking
  1462. * the lock like this since the next thing we do
  1463. * is just get the head of the list again.
  1464. */
  1465. drm_gem_object_reference(obj);
  1466. i915_gem_object_move_to_inactive(obj);
  1467. spin_unlock(&dev_priv->mm.active_list_lock);
  1468. drm_gem_object_unreference(obj);
  1469. spin_lock(&dev_priv->mm.active_list_lock);
  1470. }
  1471. }
  1472. out:
  1473. spin_unlock(&dev_priv->mm.active_list_lock);
  1474. }
  1475. /**
  1476. * Returns true if seq1 is later than seq2.
  1477. */
  1478. bool
  1479. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1480. {
  1481. return (int32_t)(seq1 - seq2) >= 0;
  1482. }
  1483. uint32_t
  1484. i915_get_gem_seqno(struct drm_device *dev)
  1485. {
  1486. drm_i915_private_t *dev_priv = dev->dev_private;
  1487. return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
  1488. }
  1489. /**
  1490. * This function clears the request list as sequence numbers are passed.
  1491. */
  1492. void
  1493. i915_gem_retire_requests(struct drm_device *dev)
  1494. {
  1495. drm_i915_private_t *dev_priv = dev->dev_private;
  1496. uint32_t seqno;
  1497. if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
  1498. return;
  1499. seqno = i915_get_gem_seqno(dev);
  1500. while (!list_empty(&dev_priv->mm.request_list)) {
  1501. struct drm_i915_gem_request *request;
  1502. uint32_t retiring_seqno;
  1503. request = list_first_entry(&dev_priv->mm.request_list,
  1504. struct drm_i915_gem_request,
  1505. list);
  1506. retiring_seqno = request->seqno;
  1507. if (i915_seqno_passed(seqno, retiring_seqno) ||
  1508. atomic_read(&dev_priv->mm.wedged)) {
  1509. i915_gem_retire_request(dev, request);
  1510. list_del(&request->list);
  1511. list_del(&request->client_list);
  1512. kfree(request);
  1513. } else
  1514. break;
  1515. }
  1516. if (unlikely (dev_priv->trace_irq_seqno &&
  1517. i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
  1518. i915_user_irq_put(dev);
  1519. dev_priv->trace_irq_seqno = 0;
  1520. }
  1521. }
  1522. void
  1523. i915_gem_retire_work_handler(struct work_struct *work)
  1524. {
  1525. drm_i915_private_t *dev_priv;
  1526. struct drm_device *dev;
  1527. dev_priv = container_of(work, drm_i915_private_t,
  1528. mm.retire_work.work);
  1529. dev = dev_priv->dev;
  1530. mutex_lock(&dev->struct_mutex);
  1531. i915_gem_retire_requests(dev);
  1532. if (!dev_priv->mm.suspended &&
  1533. !list_empty(&dev_priv->mm.request_list))
  1534. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1535. mutex_unlock(&dev->struct_mutex);
  1536. }
  1537. int
  1538. i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  1539. {
  1540. drm_i915_private_t *dev_priv = dev->dev_private;
  1541. u32 ier;
  1542. int ret = 0;
  1543. BUG_ON(seqno == 0);
  1544. if (atomic_read(&dev_priv->mm.wedged))
  1545. return -EIO;
  1546. if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
  1547. if (HAS_PCH_SPLIT(dev))
  1548. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1549. else
  1550. ier = I915_READ(IER);
  1551. if (!ier) {
  1552. DRM_ERROR("something (likely vbetool) disabled "
  1553. "interrupts, re-enabling\n");
  1554. i915_driver_irq_preinstall(dev);
  1555. i915_driver_irq_postinstall(dev);
  1556. }
  1557. trace_i915_gem_request_wait_begin(dev, seqno);
  1558. dev_priv->mm.waiting_gem_seqno = seqno;
  1559. i915_user_irq_get(dev);
  1560. if (interruptible)
  1561. ret = wait_event_interruptible(dev_priv->irq_queue,
  1562. i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
  1563. atomic_read(&dev_priv->mm.wedged));
  1564. else
  1565. wait_event(dev_priv->irq_queue,
  1566. i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
  1567. atomic_read(&dev_priv->mm.wedged));
  1568. i915_user_irq_put(dev);
  1569. dev_priv->mm.waiting_gem_seqno = 0;
  1570. trace_i915_gem_request_wait_end(dev, seqno);
  1571. }
  1572. if (atomic_read(&dev_priv->mm.wedged))
  1573. ret = -EIO;
  1574. if (ret && ret != -ERESTARTSYS)
  1575. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  1576. __func__, ret, seqno, i915_get_gem_seqno(dev));
  1577. /* Directly dispatch request retiring. While we have the work queue
  1578. * to handle this, the waiter on a request often wants an associated
  1579. * buffer to have made it to the inactive list, and we would need
  1580. * a separate wait queue to handle that.
  1581. */
  1582. if (ret == 0)
  1583. i915_gem_retire_requests(dev);
  1584. return ret;
  1585. }
  1586. /**
  1587. * Waits for a sequence number to be signaled, and cleans up the
  1588. * request and object lists appropriately for that event.
  1589. */
  1590. static int
  1591. i915_wait_request(struct drm_device *dev, uint32_t seqno)
  1592. {
  1593. return i915_do_wait_request(dev, seqno, 1);
  1594. }
  1595. static void
  1596. i915_gem_flush(struct drm_device *dev,
  1597. uint32_t invalidate_domains,
  1598. uint32_t flush_domains)
  1599. {
  1600. drm_i915_private_t *dev_priv = dev->dev_private;
  1601. uint32_t cmd;
  1602. RING_LOCALS;
  1603. #if WATCH_EXEC
  1604. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  1605. invalidate_domains, flush_domains);
  1606. #endif
  1607. trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
  1608. invalidate_domains, flush_domains);
  1609. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1610. drm_agp_chipset_flush(dev);
  1611. if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
  1612. /*
  1613. * read/write caches:
  1614. *
  1615. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  1616. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  1617. * also flushed at 2d versus 3d pipeline switches.
  1618. *
  1619. * read-only caches:
  1620. *
  1621. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  1622. * MI_READ_FLUSH is set, and is always flushed on 965.
  1623. *
  1624. * I915_GEM_DOMAIN_COMMAND may not exist?
  1625. *
  1626. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  1627. * invalidated when MI_EXE_FLUSH is set.
  1628. *
  1629. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  1630. * invalidated with every MI_FLUSH.
  1631. *
  1632. * TLBs:
  1633. *
  1634. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  1635. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  1636. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  1637. * are flushed at any MI_FLUSH.
  1638. */
  1639. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1640. if ((invalidate_domains|flush_domains) &
  1641. I915_GEM_DOMAIN_RENDER)
  1642. cmd &= ~MI_NO_WRITE_FLUSH;
  1643. if (!IS_I965G(dev)) {
  1644. /*
  1645. * On the 965, the sampler cache always gets flushed
  1646. * and this bit is reserved.
  1647. */
  1648. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  1649. cmd |= MI_READ_FLUSH;
  1650. }
  1651. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  1652. cmd |= MI_EXE_FLUSH;
  1653. #if WATCH_EXEC
  1654. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  1655. #endif
  1656. BEGIN_LP_RING(2);
  1657. OUT_RING(cmd);
  1658. OUT_RING(MI_NOOP);
  1659. ADVANCE_LP_RING();
  1660. }
  1661. }
  1662. /**
  1663. * Ensures that all rendering to the object has completed and the object is
  1664. * safe to unbind from the GTT or access from the CPU.
  1665. */
  1666. static int
  1667. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  1668. {
  1669. struct drm_device *dev = obj->dev;
  1670. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1671. int ret;
  1672. /* This function only exists to support waiting for existing rendering,
  1673. * not for emitting required flushes.
  1674. */
  1675. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1676. /* If there is rendering queued on the buffer being evicted, wait for
  1677. * it.
  1678. */
  1679. if (obj_priv->active) {
  1680. #if WATCH_BUF
  1681. DRM_INFO("%s: object %p wait for seqno %08x\n",
  1682. __func__, obj, obj_priv->last_rendering_seqno);
  1683. #endif
  1684. ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
  1685. if (ret != 0)
  1686. return ret;
  1687. }
  1688. return 0;
  1689. }
  1690. /**
  1691. * Unbinds an object from the GTT aperture.
  1692. */
  1693. int
  1694. i915_gem_object_unbind(struct drm_gem_object *obj)
  1695. {
  1696. struct drm_device *dev = obj->dev;
  1697. drm_i915_private_t *dev_priv = dev->dev_private;
  1698. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1699. int ret = 0;
  1700. #if WATCH_BUF
  1701. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  1702. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  1703. #endif
  1704. if (obj_priv->gtt_space == NULL)
  1705. return 0;
  1706. if (obj_priv->pin_count != 0) {
  1707. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1708. return -EINVAL;
  1709. }
  1710. /* blow away mappings if mapped through GTT */
  1711. i915_gem_release_mmap(obj);
  1712. /* Move the object to the CPU domain to ensure that
  1713. * any possible CPU writes while it's not in the GTT
  1714. * are flushed when we go to remap it. This will
  1715. * also ensure that all pending GPU writes are finished
  1716. * before we unbind.
  1717. */
  1718. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1719. if (ret) {
  1720. if (ret != -ERESTARTSYS)
  1721. DRM_ERROR("set_domain failed: %d\n", ret);
  1722. return ret;
  1723. }
  1724. BUG_ON(obj_priv->active);
  1725. /* release the fence reg _after_ flushing */
  1726. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1727. i915_gem_clear_fence_reg(obj);
  1728. if (obj_priv->agp_mem != NULL) {
  1729. drm_unbind_agp(obj_priv->agp_mem);
  1730. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  1731. obj_priv->agp_mem = NULL;
  1732. }
  1733. i915_gem_object_put_pages(obj);
  1734. BUG_ON(obj_priv->pages_refcount);
  1735. if (obj_priv->gtt_space) {
  1736. atomic_dec(&dev->gtt_count);
  1737. atomic_sub(obj->size, &dev->gtt_memory);
  1738. drm_mm_put_block(obj_priv->gtt_space);
  1739. obj_priv->gtt_space = NULL;
  1740. }
  1741. /* Remove ourselves from the LRU list if present. */
  1742. spin_lock(&dev_priv->mm.active_list_lock);
  1743. if (!list_empty(&obj_priv->list))
  1744. list_del_init(&obj_priv->list);
  1745. spin_unlock(&dev_priv->mm.active_list_lock);
  1746. if (i915_gem_object_is_purgeable(obj_priv))
  1747. i915_gem_object_truncate(obj);
  1748. trace_i915_gem_object_unbind(obj);
  1749. return 0;
  1750. }
  1751. static struct drm_gem_object *
  1752. i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
  1753. {
  1754. drm_i915_private_t *dev_priv = dev->dev_private;
  1755. struct drm_i915_gem_object *obj_priv;
  1756. struct drm_gem_object *best = NULL;
  1757. struct drm_gem_object *first = NULL;
  1758. /* Try to find the smallest clean object */
  1759. list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
  1760. struct drm_gem_object *obj = obj_priv->obj;
  1761. if (obj->size >= min_size) {
  1762. if ((!obj_priv->dirty ||
  1763. i915_gem_object_is_purgeable(obj_priv)) &&
  1764. (!best || obj->size < best->size)) {
  1765. best = obj;
  1766. if (best->size == min_size)
  1767. return best;
  1768. }
  1769. if (!first)
  1770. first = obj;
  1771. }
  1772. }
  1773. return best ? best : first;
  1774. }
  1775. static int
  1776. i915_gpu_idle(struct drm_device *dev)
  1777. {
  1778. drm_i915_private_t *dev_priv = dev->dev_private;
  1779. bool lists_empty;
  1780. uint32_t seqno;
  1781. spin_lock(&dev_priv->mm.active_list_lock);
  1782. lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
  1783. list_empty(&dev_priv->mm.active_list);
  1784. spin_unlock(&dev_priv->mm.active_list_lock);
  1785. if (lists_empty)
  1786. return 0;
  1787. /* Flush everything onto the inactive list. */
  1788. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1789. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  1790. if (seqno == 0)
  1791. return -ENOMEM;
  1792. return i915_wait_request(dev, seqno);
  1793. }
  1794. static int
  1795. i915_gem_evict_everything(struct drm_device *dev)
  1796. {
  1797. drm_i915_private_t *dev_priv = dev->dev_private;
  1798. int ret;
  1799. bool lists_empty;
  1800. spin_lock(&dev_priv->mm.active_list_lock);
  1801. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1802. list_empty(&dev_priv->mm.flushing_list) &&
  1803. list_empty(&dev_priv->mm.active_list));
  1804. spin_unlock(&dev_priv->mm.active_list_lock);
  1805. if (lists_empty)
  1806. return -ENOSPC;
  1807. /* Flush everything (on to the inactive lists) and evict */
  1808. ret = i915_gpu_idle(dev);
  1809. if (ret)
  1810. return ret;
  1811. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  1812. ret = i915_gem_evict_from_inactive_list(dev);
  1813. if (ret)
  1814. return ret;
  1815. spin_lock(&dev_priv->mm.active_list_lock);
  1816. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1817. list_empty(&dev_priv->mm.flushing_list) &&
  1818. list_empty(&dev_priv->mm.active_list));
  1819. spin_unlock(&dev_priv->mm.active_list_lock);
  1820. BUG_ON(!lists_empty);
  1821. return 0;
  1822. }
  1823. static int
  1824. i915_gem_evict_something(struct drm_device *dev, int min_size)
  1825. {
  1826. drm_i915_private_t *dev_priv = dev->dev_private;
  1827. struct drm_gem_object *obj;
  1828. int ret;
  1829. for (;;) {
  1830. i915_gem_retire_requests(dev);
  1831. /* If there's an inactive buffer available now, grab it
  1832. * and be done.
  1833. */
  1834. obj = i915_gem_find_inactive_object(dev, min_size);
  1835. if (obj) {
  1836. struct drm_i915_gem_object *obj_priv;
  1837. #if WATCH_LRU
  1838. DRM_INFO("%s: evicting %p\n", __func__, obj);
  1839. #endif
  1840. obj_priv = obj->driver_private;
  1841. BUG_ON(obj_priv->pin_count != 0);
  1842. BUG_ON(obj_priv->active);
  1843. /* Wait on the rendering and unbind the buffer. */
  1844. return i915_gem_object_unbind(obj);
  1845. }
  1846. /* If we didn't get anything, but the ring is still processing
  1847. * things, wait for the next to finish and hopefully leave us
  1848. * a buffer to evict.
  1849. */
  1850. if (!list_empty(&dev_priv->mm.request_list)) {
  1851. struct drm_i915_gem_request *request;
  1852. request = list_first_entry(&dev_priv->mm.request_list,
  1853. struct drm_i915_gem_request,
  1854. list);
  1855. ret = i915_wait_request(dev, request->seqno);
  1856. if (ret)
  1857. return ret;
  1858. continue;
  1859. }
  1860. /* If we didn't have anything on the request list but there
  1861. * are buffers awaiting a flush, emit one and try again.
  1862. * When we wait on it, those buffers waiting for that flush
  1863. * will get moved to inactive.
  1864. */
  1865. if (!list_empty(&dev_priv->mm.flushing_list)) {
  1866. struct drm_i915_gem_object *obj_priv;
  1867. /* Find an object that we can immediately reuse */
  1868. list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
  1869. obj = obj_priv->obj;
  1870. if (obj->size >= min_size)
  1871. break;
  1872. obj = NULL;
  1873. }
  1874. if (obj != NULL) {
  1875. uint32_t seqno;
  1876. i915_gem_flush(dev,
  1877. obj->write_domain,
  1878. obj->write_domain);
  1879. seqno = i915_add_request(dev, NULL, obj->write_domain);
  1880. if (seqno == 0)
  1881. return -ENOMEM;
  1882. ret = i915_wait_request(dev, seqno);
  1883. if (ret)
  1884. return ret;
  1885. continue;
  1886. }
  1887. }
  1888. /* If we didn't do any of the above, there's no single buffer
  1889. * large enough to swap out for the new one, so just evict
  1890. * everything and start again. (This should be rare.)
  1891. */
  1892. if (!list_empty (&dev_priv->mm.inactive_list))
  1893. return i915_gem_evict_from_inactive_list(dev);
  1894. else
  1895. return i915_gem_evict_everything(dev);
  1896. }
  1897. }
  1898. int
  1899. i915_gem_object_get_pages(struct drm_gem_object *obj,
  1900. gfp_t gfpmask)
  1901. {
  1902. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1903. int page_count, i;
  1904. struct address_space *mapping;
  1905. struct inode *inode;
  1906. struct page *page;
  1907. int ret;
  1908. if (obj_priv->pages_refcount++ != 0)
  1909. return 0;
  1910. /* Get the list of pages out of our struct file. They'll be pinned
  1911. * at this point until we release them.
  1912. */
  1913. page_count = obj->size / PAGE_SIZE;
  1914. BUG_ON(obj_priv->pages != NULL);
  1915. obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
  1916. if (obj_priv->pages == NULL) {
  1917. obj_priv->pages_refcount--;
  1918. return -ENOMEM;
  1919. }
  1920. inode = obj->filp->f_path.dentry->d_inode;
  1921. mapping = inode->i_mapping;
  1922. for (i = 0; i < page_count; i++) {
  1923. page = read_cache_page_gfp(mapping, i,
  1924. mapping_gfp_mask (mapping) |
  1925. __GFP_COLD |
  1926. gfpmask);
  1927. if (IS_ERR(page)) {
  1928. ret = PTR_ERR(page);
  1929. i915_gem_object_put_pages(obj);
  1930. return ret;
  1931. }
  1932. obj_priv->pages[i] = page;
  1933. }
  1934. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1935. i915_gem_object_do_bit_17_swizzle(obj);
  1936. return 0;
  1937. }
  1938. static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
  1939. {
  1940. struct drm_gem_object *obj = reg->obj;
  1941. struct drm_device *dev = obj->dev;
  1942. drm_i915_private_t *dev_priv = dev->dev_private;
  1943. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1944. int regnum = obj_priv->fence_reg;
  1945. uint64_t val;
  1946. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1947. 0xfffff000) << 32;
  1948. val |= obj_priv->gtt_offset & 0xfffff000;
  1949. val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
  1950. SANDYBRIDGE_FENCE_PITCH_SHIFT;
  1951. if (obj_priv->tiling_mode == I915_TILING_Y)
  1952. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1953. val |= I965_FENCE_REG_VALID;
  1954. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
  1955. }
  1956. static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
  1957. {
  1958. struct drm_gem_object *obj = reg->obj;
  1959. struct drm_device *dev = obj->dev;
  1960. drm_i915_private_t *dev_priv = dev->dev_private;
  1961. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1962. int regnum = obj_priv->fence_reg;
  1963. uint64_t val;
  1964. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1965. 0xfffff000) << 32;
  1966. val |= obj_priv->gtt_offset & 0xfffff000;
  1967. val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1968. if (obj_priv->tiling_mode == I915_TILING_Y)
  1969. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1970. val |= I965_FENCE_REG_VALID;
  1971. I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
  1972. }
  1973. static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
  1974. {
  1975. struct drm_gem_object *obj = reg->obj;
  1976. struct drm_device *dev = obj->dev;
  1977. drm_i915_private_t *dev_priv = dev->dev_private;
  1978. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1979. int regnum = obj_priv->fence_reg;
  1980. int tile_width;
  1981. uint32_t fence_reg, val;
  1982. uint32_t pitch_val;
  1983. if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
  1984. (obj_priv->gtt_offset & (obj->size - 1))) {
  1985. WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
  1986. __func__, obj_priv->gtt_offset, obj->size);
  1987. return;
  1988. }
  1989. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1990. HAS_128_BYTE_Y_TILING(dev))
  1991. tile_width = 128;
  1992. else
  1993. tile_width = 512;
  1994. /* Note: pitch better be a power of two tile widths */
  1995. pitch_val = obj_priv->stride / tile_width;
  1996. pitch_val = ffs(pitch_val) - 1;
  1997. val = obj_priv->gtt_offset;
  1998. if (obj_priv->tiling_mode == I915_TILING_Y)
  1999. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2000. val |= I915_FENCE_SIZE_BITS(obj->size);
  2001. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2002. val |= I830_FENCE_REG_VALID;
  2003. if (regnum < 8)
  2004. fence_reg = FENCE_REG_830_0 + (regnum * 4);
  2005. else
  2006. fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
  2007. I915_WRITE(fence_reg, val);
  2008. }
  2009. static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  2010. {
  2011. struct drm_gem_object *obj = reg->obj;
  2012. struct drm_device *dev = obj->dev;
  2013. drm_i915_private_t *dev_priv = dev->dev_private;
  2014. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2015. int regnum = obj_priv->fence_reg;
  2016. uint32_t val;
  2017. uint32_t pitch_val;
  2018. uint32_t fence_size_bits;
  2019. if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
  2020. (obj_priv->gtt_offset & (obj->size - 1))) {
  2021. WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
  2022. __func__, obj_priv->gtt_offset);
  2023. return;
  2024. }
  2025. pitch_val = obj_priv->stride / 128;
  2026. pitch_val = ffs(pitch_val) - 1;
  2027. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  2028. val = obj_priv->gtt_offset;
  2029. if (obj_priv->tiling_mode == I915_TILING_Y)
  2030. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2031. fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
  2032. WARN_ON(fence_size_bits & ~0x00000f00);
  2033. val |= fence_size_bits;
  2034. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2035. val |= I830_FENCE_REG_VALID;
  2036. I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  2037. }
  2038. static int i915_find_fence_reg(struct drm_device *dev)
  2039. {
  2040. struct drm_i915_fence_reg *reg = NULL;
  2041. struct drm_i915_gem_object *obj_priv = NULL;
  2042. struct drm_i915_private *dev_priv = dev->dev_private;
  2043. struct drm_gem_object *obj = NULL;
  2044. int i, avail, ret;
  2045. /* First try to find a free reg */
  2046. avail = 0;
  2047. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2048. reg = &dev_priv->fence_regs[i];
  2049. if (!reg->obj)
  2050. return i;
  2051. obj_priv = reg->obj->driver_private;
  2052. if (!obj_priv->pin_count)
  2053. avail++;
  2054. }
  2055. if (avail == 0)
  2056. return -ENOSPC;
  2057. /* None available, try to steal one or wait for a user to finish */
  2058. i = I915_FENCE_REG_NONE;
  2059. list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
  2060. fence_list) {
  2061. obj = obj_priv->obj;
  2062. if (obj_priv->pin_count)
  2063. continue;
  2064. /* found one! */
  2065. i = obj_priv->fence_reg;
  2066. break;
  2067. }
  2068. BUG_ON(i == I915_FENCE_REG_NONE);
  2069. /* We only have a reference on obj from the active list. put_fence_reg
  2070. * might drop that one, causing a use-after-free in it. So hold a
  2071. * private reference to obj like the other callers of put_fence_reg
  2072. * (set_tiling ioctl) do. */
  2073. drm_gem_object_reference(obj);
  2074. ret = i915_gem_object_put_fence_reg(obj);
  2075. drm_gem_object_unreference(obj);
  2076. if (ret != 0)
  2077. return ret;
  2078. return i;
  2079. }
  2080. /**
  2081. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  2082. * @obj: object to map through a fence reg
  2083. *
  2084. * When mapping objects through the GTT, userspace wants to be able to write
  2085. * to them without having to worry about swizzling if the object is tiled.
  2086. *
  2087. * This function walks the fence regs looking for a free one for @obj,
  2088. * stealing one if it can't find any.
  2089. *
  2090. * It then sets up the reg based on the object's properties: address, pitch
  2091. * and tiling format.
  2092. */
  2093. int
  2094. i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
  2095. {
  2096. struct drm_device *dev = obj->dev;
  2097. struct drm_i915_private *dev_priv = dev->dev_private;
  2098. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2099. struct drm_i915_fence_reg *reg = NULL;
  2100. int ret;
  2101. /* Just update our place in the LRU if our fence is getting used. */
  2102. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  2103. list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2104. return 0;
  2105. }
  2106. switch (obj_priv->tiling_mode) {
  2107. case I915_TILING_NONE:
  2108. WARN(1, "allocating a fence for non-tiled object?\n");
  2109. break;
  2110. case I915_TILING_X:
  2111. if (!obj_priv->stride)
  2112. return -EINVAL;
  2113. WARN((obj_priv->stride & (512 - 1)),
  2114. "object 0x%08x is X tiled but has non-512B pitch\n",
  2115. obj_priv->gtt_offset);
  2116. break;
  2117. case I915_TILING_Y:
  2118. if (!obj_priv->stride)
  2119. return -EINVAL;
  2120. WARN((obj_priv->stride & (128 - 1)),
  2121. "object 0x%08x is Y tiled but has non-128B pitch\n",
  2122. obj_priv->gtt_offset);
  2123. break;
  2124. }
  2125. ret = i915_find_fence_reg(dev);
  2126. if (ret < 0)
  2127. return ret;
  2128. obj_priv->fence_reg = ret;
  2129. reg = &dev_priv->fence_regs[obj_priv->fence_reg];
  2130. list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2131. reg->obj = obj;
  2132. if (IS_GEN6(dev))
  2133. sandybridge_write_fence_reg(reg);
  2134. else if (IS_I965G(dev))
  2135. i965_write_fence_reg(reg);
  2136. else if (IS_I9XX(dev))
  2137. i915_write_fence_reg(reg);
  2138. else
  2139. i830_write_fence_reg(reg);
  2140. trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
  2141. obj_priv->tiling_mode);
  2142. return 0;
  2143. }
  2144. /**
  2145. * i915_gem_clear_fence_reg - clear out fence register info
  2146. * @obj: object to clear
  2147. *
  2148. * Zeroes out the fence register itself and clears out the associated
  2149. * data structures in dev_priv and obj_priv.
  2150. */
  2151. static void
  2152. i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  2153. {
  2154. struct drm_device *dev = obj->dev;
  2155. drm_i915_private_t *dev_priv = dev->dev_private;
  2156. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2157. if (IS_GEN6(dev)) {
  2158. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
  2159. (obj_priv->fence_reg * 8), 0);
  2160. } else if (IS_I965G(dev)) {
  2161. I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
  2162. } else {
  2163. uint32_t fence_reg;
  2164. if (obj_priv->fence_reg < 8)
  2165. fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
  2166. else
  2167. fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
  2168. 8) * 4;
  2169. I915_WRITE(fence_reg, 0);
  2170. }
  2171. dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
  2172. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2173. list_del_init(&obj_priv->fence_list);
  2174. }
  2175. /**
  2176. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2177. * to the buffer to finish, and then resets the fence register.
  2178. * @obj: tiled object holding a fence register.
  2179. *
  2180. * Zeroes out the fence register itself and clears out the associated
  2181. * data structures in dev_priv and obj_priv.
  2182. */
  2183. int
  2184. i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
  2185. {
  2186. struct drm_device *dev = obj->dev;
  2187. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2188. if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
  2189. return 0;
  2190. /* If we've changed tiling, GTT-mappings of the object
  2191. * need to re-fault to ensure that the correct fence register
  2192. * setup is in place.
  2193. */
  2194. i915_gem_release_mmap(obj);
  2195. /* On the i915, GPU access to tiled buffers is via a fence,
  2196. * therefore we must wait for any outstanding access to complete
  2197. * before clearing the fence.
  2198. */
  2199. if (!IS_I965G(dev)) {
  2200. int ret;
  2201. i915_gem_object_flush_gpu_write_domain(obj);
  2202. ret = i915_gem_object_wait_rendering(obj);
  2203. if (ret != 0)
  2204. return ret;
  2205. }
  2206. i915_gem_object_flush_gtt_write_domain(obj);
  2207. i915_gem_clear_fence_reg (obj);
  2208. return 0;
  2209. }
  2210. /**
  2211. * Finds free space in the GTT aperture and binds the object there.
  2212. */
  2213. static int
  2214. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  2215. {
  2216. struct drm_device *dev = obj->dev;
  2217. drm_i915_private_t *dev_priv = dev->dev_private;
  2218. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2219. struct drm_mm_node *free_space;
  2220. gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
  2221. int ret;
  2222. if (obj_priv->madv != I915_MADV_WILLNEED) {
  2223. DRM_ERROR("Attempting to bind a purgeable object\n");
  2224. return -EINVAL;
  2225. }
  2226. if (alignment == 0)
  2227. alignment = i915_gem_get_gtt_alignment(obj);
  2228. if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
  2229. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2230. return -EINVAL;
  2231. }
  2232. search_free:
  2233. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2234. obj->size, alignment, 0);
  2235. if (free_space != NULL) {
  2236. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  2237. alignment);
  2238. if (obj_priv->gtt_space != NULL) {
  2239. obj_priv->gtt_space->private = obj;
  2240. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  2241. }
  2242. }
  2243. if (obj_priv->gtt_space == NULL) {
  2244. /* If the gtt is empty and we're still having trouble
  2245. * fitting our object in, we're out of memory.
  2246. */
  2247. #if WATCH_LRU
  2248. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  2249. #endif
  2250. ret = i915_gem_evict_something(dev, obj->size);
  2251. if (ret)
  2252. return ret;
  2253. goto search_free;
  2254. }
  2255. #if WATCH_BUF
  2256. DRM_INFO("Binding object of size %zd at 0x%08x\n",
  2257. obj->size, obj_priv->gtt_offset);
  2258. #endif
  2259. ret = i915_gem_object_get_pages(obj, gfpmask);
  2260. if (ret) {
  2261. drm_mm_put_block(obj_priv->gtt_space);
  2262. obj_priv->gtt_space = NULL;
  2263. if (ret == -ENOMEM) {
  2264. /* first try to clear up some space from the GTT */
  2265. ret = i915_gem_evict_something(dev, obj->size);
  2266. if (ret) {
  2267. /* now try to shrink everyone else */
  2268. if (gfpmask) {
  2269. gfpmask = 0;
  2270. goto search_free;
  2271. }
  2272. return ret;
  2273. }
  2274. goto search_free;
  2275. }
  2276. return ret;
  2277. }
  2278. /* Create an AGP memory structure pointing at our pages, and bind it
  2279. * into the GTT.
  2280. */
  2281. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  2282. obj_priv->pages,
  2283. obj->size >> PAGE_SHIFT,
  2284. obj_priv->gtt_offset,
  2285. obj_priv->agp_type);
  2286. if (obj_priv->agp_mem == NULL) {
  2287. i915_gem_object_put_pages(obj);
  2288. drm_mm_put_block(obj_priv->gtt_space);
  2289. obj_priv->gtt_space = NULL;
  2290. ret = i915_gem_evict_something(dev, obj->size);
  2291. if (ret)
  2292. return ret;
  2293. goto search_free;
  2294. }
  2295. atomic_inc(&dev->gtt_count);
  2296. atomic_add(obj->size, &dev->gtt_memory);
  2297. /* Assert that the object is not currently in any GPU domain. As it
  2298. * wasn't in the GTT, there shouldn't be any way it could have been in
  2299. * a GPU cache
  2300. */
  2301. BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2302. BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2303. trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  2304. return 0;
  2305. }
  2306. void
  2307. i915_gem_clflush_object(struct drm_gem_object *obj)
  2308. {
  2309. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2310. /* If we don't have a page list set up, then we're not pinned
  2311. * to GPU, and we can ignore the cache flush because it'll happen
  2312. * again at bind time.
  2313. */
  2314. if (obj_priv->pages == NULL)
  2315. return;
  2316. trace_i915_gem_object_clflush(obj);
  2317. drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  2318. }
  2319. /** Flushes any GPU write domain for the object if it's dirty. */
  2320. static void
  2321. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
  2322. {
  2323. struct drm_device *dev = obj->dev;
  2324. uint32_t old_write_domain;
  2325. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2326. return;
  2327. /* Queue the GPU write cache flushing we need. */
  2328. old_write_domain = obj->write_domain;
  2329. i915_gem_flush(dev, 0, obj->write_domain);
  2330. (void) i915_add_request(dev, NULL, obj->write_domain);
  2331. BUG_ON(obj->write_domain);
  2332. trace_i915_gem_object_change_domain(obj,
  2333. obj->read_domains,
  2334. old_write_domain);
  2335. }
  2336. /** Flushes the GTT write domain for the object if it's dirty. */
  2337. static void
  2338. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  2339. {
  2340. uint32_t old_write_domain;
  2341. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  2342. return;
  2343. /* No actual flushing is required for the GTT write domain. Writes
  2344. * to it immediately go to main memory as far as we know, so there's
  2345. * no chipset flush. It also doesn't land in render cache.
  2346. */
  2347. old_write_domain = obj->write_domain;
  2348. obj->write_domain = 0;
  2349. trace_i915_gem_object_change_domain(obj,
  2350. obj->read_domains,
  2351. old_write_domain);
  2352. }
  2353. /** Flushes the CPU write domain for the object if it's dirty. */
  2354. static void
  2355. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  2356. {
  2357. struct drm_device *dev = obj->dev;
  2358. uint32_t old_write_domain;
  2359. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  2360. return;
  2361. i915_gem_clflush_object(obj);
  2362. drm_agp_chipset_flush(dev);
  2363. old_write_domain = obj->write_domain;
  2364. obj->write_domain = 0;
  2365. trace_i915_gem_object_change_domain(obj,
  2366. obj->read_domains,
  2367. old_write_domain);
  2368. }
  2369. void
  2370. i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
  2371. {
  2372. switch (obj->write_domain) {
  2373. case I915_GEM_DOMAIN_GTT:
  2374. i915_gem_object_flush_gtt_write_domain(obj);
  2375. break;
  2376. case I915_GEM_DOMAIN_CPU:
  2377. i915_gem_object_flush_cpu_write_domain(obj);
  2378. break;
  2379. default:
  2380. i915_gem_object_flush_gpu_write_domain(obj);
  2381. break;
  2382. }
  2383. }
  2384. /**
  2385. * Moves a single object to the GTT read, and possibly write domain.
  2386. *
  2387. * This function returns when the move is complete, including waiting on
  2388. * flushes to occur.
  2389. */
  2390. int
  2391. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  2392. {
  2393. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2394. uint32_t old_write_domain, old_read_domains;
  2395. int ret;
  2396. /* Not valid to be called on unbound objects. */
  2397. if (obj_priv->gtt_space == NULL)
  2398. return -EINVAL;
  2399. i915_gem_object_flush_gpu_write_domain(obj);
  2400. /* Wait on any GPU rendering and flushing to occur. */
  2401. ret = i915_gem_object_wait_rendering(obj);
  2402. if (ret != 0)
  2403. return ret;
  2404. old_write_domain = obj->write_domain;
  2405. old_read_domains = obj->read_domains;
  2406. /* If we're writing through the GTT domain, then CPU and GPU caches
  2407. * will need to be invalidated at next use.
  2408. */
  2409. if (write)
  2410. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  2411. i915_gem_object_flush_cpu_write_domain(obj);
  2412. /* It should now be out of any other write domains, and we can update
  2413. * the domain values for our changes.
  2414. */
  2415. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2416. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2417. if (write) {
  2418. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2419. obj_priv->dirty = 1;
  2420. }
  2421. trace_i915_gem_object_change_domain(obj,
  2422. old_read_domains,
  2423. old_write_domain);
  2424. return 0;
  2425. }
  2426. /*
  2427. * Prepare buffer for display plane. Use uninterruptible for possible flush
  2428. * wait, as in modesetting process we're not supposed to be interrupted.
  2429. */
  2430. int
  2431. i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
  2432. {
  2433. struct drm_device *dev = obj->dev;
  2434. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2435. uint32_t old_write_domain, old_read_domains;
  2436. int ret;
  2437. /* Not valid to be called on unbound objects. */
  2438. if (obj_priv->gtt_space == NULL)
  2439. return -EINVAL;
  2440. i915_gem_object_flush_gpu_write_domain(obj);
  2441. /* Wait on any GPU rendering and flushing to occur. */
  2442. if (obj_priv->active) {
  2443. #if WATCH_BUF
  2444. DRM_INFO("%s: object %p wait for seqno %08x\n",
  2445. __func__, obj, obj_priv->last_rendering_seqno);
  2446. #endif
  2447. ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
  2448. if (ret != 0)
  2449. return ret;
  2450. }
  2451. old_write_domain = obj->write_domain;
  2452. old_read_domains = obj->read_domains;
  2453. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  2454. i915_gem_object_flush_cpu_write_domain(obj);
  2455. /* It should now be out of any other write domains, and we can update
  2456. * the domain values for our changes.
  2457. */
  2458. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2459. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2460. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2461. obj_priv->dirty = 1;
  2462. trace_i915_gem_object_change_domain(obj,
  2463. old_read_domains,
  2464. old_write_domain);
  2465. return 0;
  2466. }
  2467. /**
  2468. * Moves a single object to the CPU read, and possibly write domain.
  2469. *
  2470. * This function returns when the move is complete, including waiting on
  2471. * flushes to occur.
  2472. */
  2473. static int
  2474. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  2475. {
  2476. uint32_t old_write_domain, old_read_domains;
  2477. int ret;
  2478. i915_gem_object_flush_gpu_write_domain(obj);
  2479. /* Wait on any GPU rendering and flushing to occur. */
  2480. ret = i915_gem_object_wait_rendering(obj);
  2481. if (ret != 0)
  2482. return ret;
  2483. i915_gem_object_flush_gtt_write_domain(obj);
  2484. /* If we have a partially-valid cache of the object in the CPU,
  2485. * finish invalidating it and free the per-page flags.
  2486. */
  2487. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2488. old_write_domain = obj->write_domain;
  2489. old_read_domains = obj->read_domains;
  2490. /* Flush the CPU cache if it's still invalid. */
  2491. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2492. i915_gem_clflush_object(obj);
  2493. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2494. }
  2495. /* It should now be out of any other write domains, and we can update
  2496. * the domain values for our changes.
  2497. */
  2498. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2499. /* If we're writing through the CPU, then the GPU read domains will
  2500. * need to be invalidated at next use.
  2501. */
  2502. if (write) {
  2503. obj->read_domains &= I915_GEM_DOMAIN_CPU;
  2504. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2505. }
  2506. trace_i915_gem_object_change_domain(obj,
  2507. old_read_domains,
  2508. old_write_domain);
  2509. return 0;
  2510. }
  2511. /*
  2512. * Set the next domain for the specified object. This
  2513. * may not actually perform the necessary flushing/invaliding though,
  2514. * as that may want to be batched with other set_domain operations
  2515. *
  2516. * This is (we hope) the only really tricky part of gem. The goal
  2517. * is fairly simple -- track which caches hold bits of the object
  2518. * and make sure they remain coherent. A few concrete examples may
  2519. * help to explain how it works. For shorthand, we use the notation
  2520. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2521. * a pair of read and write domain masks.
  2522. *
  2523. * Case 1: the batch buffer
  2524. *
  2525. * 1. Allocated
  2526. * 2. Written by CPU
  2527. * 3. Mapped to GTT
  2528. * 4. Read by GPU
  2529. * 5. Unmapped from GTT
  2530. * 6. Freed
  2531. *
  2532. * Let's take these a step at a time
  2533. *
  2534. * 1. Allocated
  2535. * Pages allocated from the kernel may still have
  2536. * cache contents, so we set them to (CPU, CPU) always.
  2537. * 2. Written by CPU (using pwrite)
  2538. * The pwrite function calls set_domain (CPU, CPU) and
  2539. * this function does nothing (as nothing changes)
  2540. * 3. Mapped by GTT
  2541. * This function asserts that the object is not
  2542. * currently in any GPU-based read or write domains
  2543. * 4. Read by GPU
  2544. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2545. * As write_domain is zero, this function adds in the
  2546. * current read domains (CPU+COMMAND, 0).
  2547. * flush_domains is set to CPU.
  2548. * invalidate_domains is set to COMMAND
  2549. * clflush is run to get data out of the CPU caches
  2550. * then i915_dev_set_domain calls i915_gem_flush to
  2551. * emit an MI_FLUSH and drm_agp_chipset_flush
  2552. * 5. Unmapped from GTT
  2553. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2554. * flush_domains and invalidate_domains end up both zero
  2555. * so no flushing/invalidating happens
  2556. * 6. Freed
  2557. * yay, done
  2558. *
  2559. * Case 2: The shared render buffer
  2560. *
  2561. * 1. Allocated
  2562. * 2. Mapped to GTT
  2563. * 3. Read/written by GPU
  2564. * 4. set_domain to (CPU,CPU)
  2565. * 5. Read/written by CPU
  2566. * 6. Read/written by GPU
  2567. *
  2568. * 1. Allocated
  2569. * Same as last example, (CPU, CPU)
  2570. * 2. Mapped to GTT
  2571. * Nothing changes (assertions find that it is not in the GPU)
  2572. * 3. Read/written by GPU
  2573. * execbuffer calls set_domain (RENDER, RENDER)
  2574. * flush_domains gets CPU
  2575. * invalidate_domains gets GPU
  2576. * clflush (obj)
  2577. * MI_FLUSH and drm_agp_chipset_flush
  2578. * 4. set_domain (CPU, CPU)
  2579. * flush_domains gets GPU
  2580. * invalidate_domains gets CPU
  2581. * wait_rendering (obj) to make sure all drawing is complete.
  2582. * This will include an MI_FLUSH to get the data from GPU
  2583. * to memory
  2584. * clflush (obj) to invalidate the CPU cache
  2585. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2586. * 5. Read/written by CPU
  2587. * cache lines are loaded and dirtied
  2588. * 6. Read written by GPU
  2589. * Same as last GPU access
  2590. *
  2591. * Case 3: The constant buffer
  2592. *
  2593. * 1. Allocated
  2594. * 2. Written by CPU
  2595. * 3. Read by GPU
  2596. * 4. Updated (written) by CPU again
  2597. * 5. Read by GPU
  2598. *
  2599. * 1. Allocated
  2600. * (CPU, CPU)
  2601. * 2. Written by CPU
  2602. * (CPU, CPU)
  2603. * 3. Read by GPU
  2604. * (CPU+RENDER, 0)
  2605. * flush_domains = CPU
  2606. * invalidate_domains = RENDER
  2607. * clflush (obj)
  2608. * MI_FLUSH
  2609. * drm_agp_chipset_flush
  2610. * 4. Updated (written) by CPU again
  2611. * (CPU, CPU)
  2612. * flush_domains = 0 (no previous write domain)
  2613. * invalidate_domains = 0 (no new read domains)
  2614. * 5. Read by GPU
  2615. * (CPU+RENDER, 0)
  2616. * flush_domains = CPU
  2617. * invalidate_domains = RENDER
  2618. * clflush (obj)
  2619. * MI_FLUSH
  2620. * drm_agp_chipset_flush
  2621. */
  2622. static void
  2623. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
  2624. {
  2625. struct drm_device *dev = obj->dev;
  2626. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2627. uint32_t invalidate_domains = 0;
  2628. uint32_t flush_domains = 0;
  2629. uint32_t old_read_domains;
  2630. BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
  2631. BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
  2632. intel_mark_busy(dev, obj);
  2633. #if WATCH_BUF
  2634. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  2635. __func__, obj,
  2636. obj->read_domains, obj->pending_read_domains,
  2637. obj->write_domain, obj->pending_write_domain);
  2638. #endif
  2639. /*
  2640. * If the object isn't moving to a new write domain,
  2641. * let the object stay in multiple read domains
  2642. */
  2643. if (obj->pending_write_domain == 0)
  2644. obj->pending_read_domains |= obj->read_domains;
  2645. else
  2646. obj_priv->dirty = 1;
  2647. /*
  2648. * Flush the current write domain if
  2649. * the new read domains don't match. Invalidate
  2650. * any read domains which differ from the old
  2651. * write domain
  2652. */
  2653. if (obj->write_domain &&
  2654. obj->write_domain != obj->pending_read_domains) {
  2655. flush_domains |= obj->write_domain;
  2656. invalidate_domains |=
  2657. obj->pending_read_domains & ~obj->write_domain;
  2658. }
  2659. /*
  2660. * Invalidate any read caches which may have
  2661. * stale data. That is, any new read domains.
  2662. */
  2663. invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
  2664. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  2665. #if WATCH_BUF
  2666. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  2667. __func__, flush_domains, invalidate_domains);
  2668. #endif
  2669. i915_gem_clflush_object(obj);
  2670. }
  2671. old_read_domains = obj->read_domains;
  2672. /* The actual obj->write_domain will be updated with
  2673. * pending_write_domain after we emit the accumulated flush for all
  2674. * of our domain changes in execbuffers (which clears objects'
  2675. * write_domains). So if we have a current write domain that we
  2676. * aren't changing, set pending_write_domain to that.
  2677. */
  2678. if (flush_domains == 0 && obj->pending_write_domain == 0)
  2679. obj->pending_write_domain = obj->write_domain;
  2680. obj->read_domains = obj->pending_read_domains;
  2681. dev->invalidate_domains |= invalidate_domains;
  2682. dev->flush_domains |= flush_domains;
  2683. #if WATCH_BUF
  2684. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  2685. __func__,
  2686. obj->read_domains, obj->write_domain,
  2687. dev->invalidate_domains, dev->flush_domains);
  2688. #endif
  2689. trace_i915_gem_object_change_domain(obj,
  2690. old_read_domains,
  2691. obj->write_domain);
  2692. }
  2693. /**
  2694. * Moves the object from a partially CPU read to a full one.
  2695. *
  2696. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2697. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2698. */
  2699. static void
  2700. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  2701. {
  2702. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2703. if (!obj_priv->page_cpu_valid)
  2704. return;
  2705. /* If we're partially in the CPU read domain, finish moving it in.
  2706. */
  2707. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  2708. int i;
  2709. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  2710. if (obj_priv->page_cpu_valid[i])
  2711. continue;
  2712. drm_clflush_pages(obj_priv->pages + i, 1);
  2713. }
  2714. }
  2715. /* Free the page_cpu_valid mappings which are now stale, whether
  2716. * or not we've got I915_GEM_DOMAIN_CPU.
  2717. */
  2718. kfree(obj_priv->page_cpu_valid);
  2719. obj_priv->page_cpu_valid = NULL;
  2720. }
  2721. /**
  2722. * Set the CPU read domain on a range of the object.
  2723. *
  2724. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2725. * not entirely valid. The page_cpu_valid member of the object flags which
  2726. * pages have been flushed, and will be respected by
  2727. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2728. * of the whole object.
  2729. *
  2730. * This function returns when the move is complete, including waiting on
  2731. * flushes to occur.
  2732. */
  2733. static int
  2734. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  2735. uint64_t offset, uint64_t size)
  2736. {
  2737. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2738. uint32_t old_read_domains;
  2739. int i, ret;
  2740. if (offset == 0 && size == obj->size)
  2741. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2742. i915_gem_object_flush_gpu_write_domain(obj);
  2743. /* Wait on any GPU rendering and flushing to occur. */
  2744. ret = i915_gem_object_wait_rendering(obj);
  2745. if (ret != 0)
  2746. return ret;
  2747. i915_gem_object_flush_gtt_write_domain(obj);
  2748. /* If we're already fully in the CPU read domain, we're done. */
  2749. if (obj_priv->page_cpu_valid == NULL &&
  2750. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2751. return 0;
  2752. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2753. * newly adding I915_GEM_DOMAIN_CPU
  2754. */
  2755. if (obj_priv->page_cpu_valid == NULL) {
  2756. obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
  2757. GFP_KERNEL);
  2758. if (obj_priv->page_cpu_valid == NULL)
  2759. return -ENOMEM;
  2760. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2761. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  2762. /* Flush the cache on any pages that are still invalid from the CPU's
  2763. * perspective.
  2764. */
  2765. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2766. i++) {
  2767. if (obj_priv->page_cpu_valid[i])
  2768. continue;
  2769. drm_clflush_pages(obj_priv->pages + i, 1);
  2770. obj_priv->page_cpu_valid[i] = 1;
  2771. }
  2772. /* It should now be out of any other write domains, and we can update
  2773. * the domain values for our changes.
  2774. */
  2775. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2776. old_read_domains = obj->read_domains;
  2777. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2778. trace_i915_gem_object_change_domain(obj,
  2779. old_read_domains,
  2780. obj->write_domain);
  2781. return 0;
  2782. }
  2783. /**
  2784. * Pin an object to the GTT and evaluate the relocations landing in it.
  2785. */
  2786. static int
  2787. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  2788. struct drm_file *file_priv,
  2789. struct drm_i915_gem_exec_object2 *entry,
  2790. struct drm_i915_gem_relocation_entry *relocs)
  2791. {
  2792. struct drm_device *dev = obj->dev;
  2793. drm_i915_private_t *dev_priv = dev->dev_private;
  2794. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2795. int i, ret;
  2796. void __iomem *reloc_page;
  2797. bool need_fence;
  2798. need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  2799. obj_priv->tiling_mode != I915_TILING_NONE;
  2800. /* Check fence reg constraints and rebind if necessary */
  2801. if (need_fence && !i915_gem_object_fence_offset_ok(obj,
  2802. obj_priv->tiling_mode))
  2803. i915_gem_object_unbind(obj);
  2804. /* Choose the GTT offset for our buffer and put it there. */
  2805. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  2806. if (ret)
  2807. return ret;
  2808. /*
  2809. * Pre-965 chips need a fence register set up in order to
  2810. * properly handle blits to/from tiled surfaces.
  2811. */
  2812. if (need_fence) {
  2813. ret = i915_gem_object_get_fence_reg(obj);
  2814. if (ret != 0) {
  2815. if (ret != -EBUSY && ret != -ERESTARTSYS)
  2816. DRM_ERROR("Failure to install fence: %d\n",
  2817. ret);
  2818. i915_gem_object_unpin(obj);
  2819. return ret;
  2820. }
  2821. }
  2822. entry->offset = obj_priv->gtt_offset;
  2823. /* Apply the relocations, using the GTT aperture to avoid cache
  2824. * flushing requirements.
  2825. */
  2826. for (i = 0; i < entry->relocation_count; i++) {
  2827. struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
  2828. struct drm_gem_object *target_obj;
  2829. struct drm_i915_gem_object *target_obj_priv;
  2830. uint32_t reloc_val, reloc_offset;
  2831. uint32_t __iomem *reloc_entry;
  2832. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  2833. reloc->target_handle);
  2834. if (target_obj == NULL) {
  2835. i915_gem_object_unpin(obj);
  2836. return -EBADF;
  2837. }
  2838. target_obj_priv = target_obj->driver_private;
  2839. #if WATCH_RELOC
  2840. DRM_INFO("%s: obj %p offset %08x target %d "
  2841. "read %08x write %08x gtt %08x "
  2842. "presumed %08x delta %08x\n",
  2843. __func__,
  2844. obj,
  2845. (int) reloc->offset,
  2846. (int) reloc->target_handle,
  2847. (int) reloc->read_domains,
  2848. (int) reloc->write_domain,
  2849. (int) target_obj_priv->gtt_offset,
  2850. (int) reloc->presumed_offset,
  2851. reloc->delta);
  2852. #endif
  2853. /* The target buffer should have appeared before us in the
  2854. * exec_object list, so it should have a GTT space bound by now.
  2855. */
  2856. if (target_obj_priv->gtt_space == NULL) {
  2857. DRM_ERROR("No GTT space found for object %d\n",
  2858. reloc->target_handle);
  2859. drm_gem_object_unreference(target_obj);
  2860. i915_gem_object_unpin(obj);
  2861. return -EINVAL;
  2862. }
  2863. /* Validate that the target is in a valid r/w GPU domain */
  2864. if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
  2865. reloc->read_domains & I915_GEM_DOMAIN_CPU) {
  2866. DRM_ERROR("reloc with read/write CPU domains: "
  2867. "obj %p target %d offset %d "
  2868. "read %08x write %08x",
  2869. obj, reloc->target_handle,
  2870. (int) reloc->offset,
  2871. reloc->read_domains,
  2872. reloc->write_domain);
  2873. drm_gem_object_unreference(target_obj);
  2874. i915_gem_object_unpin(obj);
  2875. return -EINVAL;
  2876. }
  2877. if (reloc->write_domain && target_obj->pending_write_domain &&
  2878. reloc->write_domain != target_obj->pending_write_domain) {
  2879. DRM_ERROR("Write domain conflict: "
  2880. "obj %p target %d offset %d "
  2881. "new %08x old %08x\n",
  2882. obj, reloc->target_handle,
  2883. (int) reloc->offset,
  2884. reloc->write_domain,
  2885. target_obj->pending_write_domain);
  2886. drm_gem_object_unreference(target_obj);
  2887. i915_gem_object_unpin(obj);
  2888. return -EINVAL;
  2889. }
  2890. target_obj->pending_read_domains |= reloc->read_domains;
  2891. target_obj->pending_write_domain |= reloc->write_domain;
  2892. /* If the relocation already has the right value in it, no
  2893. * more work needs to be done.
  2894. */
  2895. if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
  2896. drm_gem_object_unreference(target_obj);
  2897. continue;
  2898. }
  2899. /* Check that the relocation address is valid... */
  2900. if (reloc->offset > obj->size - 4) {
  2901. DRM_ERROR("Relocation beyond object bounds: "
  2902. "obj %p target %d offset %d size %d.\n",
  2903. obj, reloc->target_handle,
  2904. (int) reloc->offset, (int) obj->size);
  2905. drm_gem_object_unreference(target_obj);
  2906. i915_gem_object_unpin(obj);
  2907. return -EINVAL;
  2908. }
  2909. if (reloc->offset & 3) {
  2910. DRM_ERROR("Relocation not 4-byte aligned: "
  2911. "obj %p target %d offset %d.\n",
  2912. obj, reloc->target_handle,
  2913. (int) reloc->offset);
  2914. drm_gem_object_unreference(target_obj);
  2915. i915_gem_object_unpin(obj);
  2916. return -EINVAL;
  2917. }
  2918. /* and points to somewhere within the target object. */
  2919. if (reloc->delta >= target_obj->size) {
  2920. DRM_ERROR("Relocation beyond target object bounds: "
  2921. "obj %p target %d delta %d size %d.\n",
  2922. obj, reloc->target_handle,
  2923. (int) reloc->delta, (int) target_obj->size);
  2924. drm_gem_object_unreference(target_obj);
  2925. i915_gem_object_unpin(obj);
  2926. return -EINVAL;
  2927. }
  2928. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  2929. if (ret != 0) {
  2930. drm_gem_object_unreference(target_obj);
  2931. i915_gem_object_unpin(obj);
  2932. return -EINVAL;
  2933. }
  2934. /* Map the page containing the relocation we're going to
  2935. * perform.
  2936. */
  2937. reloc_offset = obj_priv->gtt_offset + reloc->offset;
  2938. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2939. (reloc_offset &
  2940. ~(PAGE_SIZE - 1)));
  2941. reloc_entry = (uint32_t __iomem *)(reloc_page +
  2942. (reloc_offset & (PAGE_SIZE - 1)));
  2943. reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  2944. #if WATCH_BUF
  2945. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  2946. obj, (unsigned int) reloc->offset,
  2947. readl(reloc_entry), reloc_val);
  2948. #endif
  2949. writel(reloc_val, reloc_entry);
  2950. io_mapping_unmap_atomic(reloc_page);
  2951. /* The updated presumed offset for this entry will be
  2952. * copied back out to the user.
  2953. */
  2954. reloc->presumed_offset = target_obj_priv->gtt_offset;
  2955. drm_gem_object_unreference(target_obj);
  2956. }
  2957. #if WATCH_BUF
  2958. if (0)
  2959. i915_gem_dump_object(obj, 128, __func__, ~0);
  2960. #endif
  2961. return 0;
  2962. }
  2963. /** Dispatch a batchbuffer to the ring
  2964. */
  2965. static int
  2966. i915_dispatch_gem_execbuffer(struct drm_device *dev,
  2967. struct drm_i915_gem_execbuffer2 *exec,
  2968. struct drm_clip_rect *cliprects,
  2969. uint64_t exec_offset)
  2970. {
  2971. drm_i915_private_t *dev_priv = dev->dev_private;
  2972. int nbox = exec->num_cliprects;
  2973. int i = 0, count;
  2974. uint32_t exec_start, exec_len;
  2975. RING_LOCALS;
  2976. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2977. exec_len = (uint32_t) exec->batch_len;
  2978. trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
  2979. count = nbox ? nbox : 1;
  2980. for (i = 0; i < count; i++) {
  2981. if (i < nbox) {
  2982. int ret = i915_emit_box(dev, cliprects, i,
  2983. exec->DR1, exec->DR4);
  2984. if (ret)
  2985. return ret;
  2986. }
  2987. if (IS_I830(dev) || IS_845G(dev)) {
  2988. BEGIN_LP_RING(4);
  2989. OUT_RING(MI_BATCH_BUFFER);
  2990. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2991. OUT_RING(exec_start + exec_len - 4);
  2992. OUT_RING(0);
  2993. ADVANCE_LP_RING();
  2994. } else {
  2995. BEGIN_LP_RING(2);
  2996. if (IS_I965G(dev)) {
  2997. OUT_RING(MI_BATCH_BUFFER_START |
  2998. (2 << 6) |
  2999. MI_BATCH_NON_SECURE_I965);
  3000. OUT_RING(exec_start);
  3001. } else {
  3002. OUT_RING(MI_BATCH_BUFFER_START |
  3003. (2 << 6));
  3004. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  3005. }
  3006. ADVANCE_LP_RING();
  3007. }
  3008. }
  3009. /* XXX breadcrumb */
  3010. return 0;
  3011. }
  3012. /* Throttle our rendering by waiting until the ring has completed our requests
  3013. * emitted over 20 msec ago.
  3014. *
  3015. * Note that if we were to use the current jiffies each time around the loop,
  3016. * we wouldn't escape the function with any frames outstanding if the time to
  3017. * render a frame was over 20ms.
  3018. *
  3019. * This should get us reasonable parallelism between CPU and GPU but also
  3020. * relatively low latency when blocking on a particular request to finish.
  3021. */
  3022. static int
  3023. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  3024. {
  3025. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  3026. int ret = 0;
  3027. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  3028. mutex_lock(&dev->struct_mutex);
  3029. while (!list_empty(&i915_file_priv->mm.request_list)) {
  3030. struct drm_i915_gem_request *request;
  3031. request = list_first_entry(&i915_file_priv->mm.request_list,
  3032. struct drm_i915_gem_request,
  3033. client_list);
  3034. if (time_after_eq(request->emitted_jiffies, recent_enough))
  3035. break;
  3036. ret = i915_wait_request(dev, request->seqno);
  3037. if (ret != 0)
  3038. break;
  3039. }
  3040. mutex_unlock(&dev->struct_mutex);
  3041. return ret;
  3042. }
  3043. static int
  3044. i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
  3045. uint32_t buffer_count,
  3046. struct drm_i915_gem_relocation_entry **relocs)
  3047. {
  3048. uint32_t reloc_count = 0, reloc_index = 0, i;
  3049. int ret;
  3050. *relocs = NULL;
  3051. for (i = 0; i < buffer_count; i++) {
  3052. if (reloc_count + exec_list[i].relocation_count < reloc_count)
  3053. return -EINVAL;
  3054. reloc_count += exec_list[i].relocation_count;
  3055. }
  3056. *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
  3057. if (*relocs == NULL) {
  3058. DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
  3059. return -ENOMEM;
  3060. }
  3061. for (i = 0; i < buffer_count; i++) {
  3062. struct drm_i915_gem_relocation_entry __user *user_relocs;
  3063. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  3064. ret = copy_from_user(&(*relocs)[reloc_index],
  3065. user_relocs,
  3066. exec_list[i].relocation_count *
  3067. sizeof(**relocs));
  3068. if (ret != 0) {
  3069. drm_free_large(*relocs);
  3070. *relocs = NULL;
  3071. return -EFAULT;
  3072. }
  3073. reloc_index += exec_list[i].relocation_count;
  3074. }
  3075. return 0;
  3076. }
  3077. static int
  3078. i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
  3079. uint32_t buffer_count,
  3080. struct drm_i915_gem_relocation_entry *relocs)
  3081. {
  3082. uint32_t reloc_count = 0, i;
  3083. int ret = 0;
  3084. if (relocs == NULL)
  3085. return 0;
  3086. for (i = 0; i < buffer_count; i++) {
  3087. struct drm_i915_gem_relocation_entry __user *user_relocs;
  3088. int unwritten;
  3089. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  3090. unwritten = copy_to_user(user_relocs,
  3091. &relocs[reloc_count],
  3092. exec_list[i].relocation_count *
  3093. sizeof(*relocs));
  3094. if (unwritten) {
  3095. ret = -EFAULT;
  3096. goto err;
  3097. }
  3098. reloc_count += exec_list[i].relocation_count;
  3099. }
  3100. err:
  3101. drm_free_large(relocs);
  3102. return ret;
  3103. }
  3104. static int
  3105. i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
  3106. uint64_t exec_offset)
  3107. {
  3108. uint32_t exec_start, exec_len;
  3109. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  3110. exec_len = (uint32_t) exec->batch_len;
  3111. if ((exec_start | exec_len) & 0x7)
  3112. return -EINVAL;
  3113. if (!exec_start)
  3114. return -EINVAL;
  3115. return 0;
  3116. }
  3117. static int
  3118. i915_gem_wait_for_pending_flip(struct drm_device *dev,
  3119. struct drm_gem_object **object_list,
  3120. int count)
  3121. {
  3122. drm_i915_private_t *dev_priv = dev->dev_private;
  3123. struct drm_i915_gem_object *obj_priv;
  3124. DEFINE_WAIT(wait);
  3125. int i, ret = 0;
  3126. for (;;) {
  3127. prepare_to_wait(&dev_priv->pending_flip_queue,
  3128. &wait, TASK_INTERRUPTIBLE);
  3129. for (i = 0; i < count; i++) {
  3130. obj_priv = object_list[i]->driver_private;
  3131. if (atomic_read(&obj_priv->pending_flip) > 0)
  3132. break;
  3133. }
  3134. if (i == count)
  3135. break;
  3136. if (!signal_pending(current)) {
  3137. mutex_unlock(&dev->struct_mutex);
  3138. schedule();
  3139. mutex_lock(&dev->struct_mutex);
  3140. continue;
  3141. }
  3142. ret = -ERESTARTSYS;
  3143. break;
  3144. }
  3145. finish_wait(&dev_priv->pending_flip_queue, &wait);
  3146. return ret;
  3147. }
  3148. int
  3149. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  3150. struct drm_file *file_priv,
  3151. struct drm_i915_gem_execbuffer2 *args,
  3152. struct drm_i915_gem_exec_object2 *exec_list)
  3153. {
  3154. drm_i915_private_t *dev_priv = dev->dev_private;
  3155. struct drm_gem_object **object_list = NULL;
  3156. struct drm_gem_object *batch_obj;
  3157. struct drm_i915_gem_object *obj_priv;
  3158. struct drm_clip_rect *cliprects = NULL;
  3159. struct drm_i915_gem_relocation_entry *relocs = NULL;
  3160. int ret = 0, ret2, i, pinned = 0;
  3161. uint64_t exec_offset;
  3162. uint32_t seqno, flush_domains, reloc_index;
  3163. int pin_tries, flips;
  3164. #if WATCH_EXEC
  3165. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3166. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3167. #endif
  3168. if (args->buffer_count < 1) {
  3169. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3170. return -EINVAL;
  3171. }
  3172. object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
  3173. if (object_list == NULL) {
  3174. DRM_ERROR("Failed to allocate object list for %d buffers\n",
  3175. args->buffer_count);
  3176. ret = -ENOMEM;
  3177. goto pre_mutex_err;
  3178. }
  3179. if (args->num_cliprects != 0) {
  3180. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3181. GFP_KERNEL);
  3182. if (cliprects == NULL) {
  3183. ret = -ENOMEM;
  3184. goto pre_mutex_err;
  3185. }
  3186. ret = copy_from_user(cliprects,
  3187. (struct drm_clip_rect __user *)
  3188. (uintptr_t) args->cliprects_ptr,
  3189. sizeof(*cliprects) * args->num_cliprects);
  3190. if (ret != 0) {
  3191. DRM_ERROR("copy %d cliprects failed: %d\n",
  3192. args->num_cliprects, ret);
  3193. goto pre_mutex_err;
  3194. }
  3195. }
  3196. ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
  3197. &relocs);
  3198. if (ret != 0)
  3199. goto pre_mutex_err;
  3200. mutex_lock(&dev->struct_mutex);
  3201. i915_verify_inactive(dev, __FILE__, __LINE__);
  3202. if (atomic_read(&dev_priv->mm.wedged)) {
  3203. mutex_unlock(&dev->struct_mutex);
  3204. ret = -EIO;
  3205. goto pre_mutex_err;
  3206. }
  3207. if (dev_priv->mm.suspended) {
  3208. mutex_unlock(&dev->struct_mutex);
  3209. ret = -EBUSY;
  3210. goto pre_mutex_err;
  3211. }
  3212. /* Look up object handles */
  3213. flips = 0;
  3214. for (i = 0; i < args->buffer_count; i++) {
  3215. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  3216. exec_list[i].handle);
  3217. if (object_list[i] == NULL) {
  3218. DRM_ERROR("Invalid object handle %d at index %d\n",
  3219. exec_list[i].handle, i);
  3220. /* prevent error path from reading uninitialized data */
  3221. args->buffer_count = i + 1;
  3222. ret = -EBADF;
  3223. goto err;
  3224. }
  3225. obj_priv = object_list[i]->driver_private;
  3226. if (obj_priv->in_execbuffer) {
  3227. DRM_ERROR("Object %p appears more than once in object list\n",
  3228. object_list[i]);
  3229. /* prevent error path from reading uninitialized data */
  3230. args->buffer_count = i + 1;
  3231. ret = -EBADF;
  3232. goto err;
  3233. }
  3234. obj_priv->in_execbuffer = true;
  3235. flips += atomic_read(&obj_priv->pending_flip);
  3236. }
  3237. if (flips > 0) {
  3238. ret = i915_gem_wait_for_pending_flip(dev, object_list,
  3239. args->buffer_count);
  3240. if (ret)
  3241. goto err;
  3242. }
  3243. /* Pin and relocate */
  3244. for (pin_tries = 0; ; pin_tries++) {
  3245. ret = 0;
  3246. reloc_index = 0;
  3247. for (i = 0; i < args->buffer_count; i++) {
  3248. object_list[i]->pending_read_domains = 0;
  3249. object_list[i]->pending_write_domain = 0;
  3250. ret = i915_gem_object_pin_and_relocate(object_list[i],
  3251. file_priv,
  3252. &exec_list[i],
  3253. &relocs[reloc_index]);
  3254. if (ret)
  3255. break;
  3256. pinned = i + 1;
  3257. reloc_index += exec_list[i].relocation_count;
  3258. }
  3259. /* success */
  3260. if (ret == 0)
  3261. break;
  3262. /* error other than GTT full, or we've already tried again */
  3263. if (ret != -ENOSPC || pin_tries >= 1) {
  3264. if (ret != -ERESTARTSYS) {
  3265. unsigned long long total_size = 0;
  3266. for (i = 0; i < args->buffer_count; i++)
  3267. total_size += object_list[i]->size;
  3268. DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
  3269. pinned+1, args->buffer_count,
  3270. total_size, ret);
  3271. DRM_ERROR("%d objects [%d pinned], "
  3272. "%d object bytes [%d pinned], "
  3273. "%d/%d gtt bytes\n",
  3274. atomic_read(&dev->object_count),
  3275. atomic_read(&dev->pin_count),
  3276. atomic_read(&dev->object_memory),
  3277. atomic_read(&dev->pin_memory),
  3278. atomic_read(&dev->gtt_memory),
  3279. dev->gtt_total);
  3280. }
  3281. goto err;
  3282. }
  3283. /* unpin all of our buffers */
  3284. for (i = 0; i < pinned; i++)
  3285. i915_gem_object_unpin(object_list[i]);
  3286. pinned = 0;
  3287. /* evict everyone we can from the aperture */
  3288. ret = i915_gem_evict_everything(dev);
  3289. if (ret && ret != -ENOSPC)
  3290. goto err;
  3291. }
  3292. /* Set the pending read domains for the batch buffer to COMMAND */
  3293. batch_obj = object_list[args->buffer_count-1];
  3294. if (batch_obj->pending_write_domain) {
  3295. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3296. ret = -EINVAL;
  3297. goto err;
  3298. }
  3299. batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3300. /* Sanity check the batch buffer, prior to moving objects */
  3301. exec_offset = exec_list[args->buffer_count - 1].offset;
  3302. ret = i915_gem_check_execbuffer (args, exec_offset);
  3303. if (ret != 0) {
  3304. DRM_ERROR("execbuf with invalid offset/length\n");
  3305. goto err;
  3306. }
  3307. i915_verify_inactive(dev, __FILE__, __LINE__);
  3308. /* Zero the global flush/invalidate flags. These
  3309. * will be modified as new domains are computed
  3310. * for each object
  3311. */
  3312. dev->invalidate_domains = 0;
  3313. dev->flush_domains = 0;
  3314. for (i = 0; i < args->buffer_count; i++) {
  3315. struct drm_gem_object *obj = object_list[i];
  3316. /* Compute new gpu domains and update invalidate/flush */
  3317. i915_gem_object_set_to_gpu_domain(obj);
  3318. }
  3319. i915_verify_inactive(dev, __FILE__, __LINE__);
  3320. if (dev->invalidate_domains | dev->flush_domains) {
  3321. #if WATCH_EXEC
  3322. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3323. __func__,
  3324. dev->invalidate_domains,
  3325. dev->flush_domains);
  3326. #endif
  3327. i915_gem_flush(dev,
  3328. dev->invalidate_domains,
  3329. dev->flush_domains);
  3330. if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
  3331. (void)i915_add_request(dev, file_priv,
  3332. dev->flush_domains);
  3333. }
  3334. for (i = 0; i < args->buffer_count; i++) {
  3335. struct drm_gem_object *obj = object_list[i];
  3336. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3337. uint32_t old_write_domain = obj->write_domain;
  3338. obj->write_domain = obj->pending_write_domain;
  3339. if (obj->write_domain)
  3340. list_move_tail(&obj_priv->gpu_write_list,
  3341. &dev_priv->mm.gpu_write_list);
  3342. else
  3343. list_del_init(&obj_priv->gpu_write_list);
  3344. trace_i915_gem_object_change_domain(obj,
  3345. obj->read_domains,
  3346. old_write_domain);
  3347. }
  3348. i915_verify_inactive(dev, __FILE__, __LINE__);
  3349. #if WATCH_COHERENCY
  3350. for (i = 0; i < args->buffer_count; i++) {
  3351. i915_gem_object_check_coherency(object_list[i],
  3352. exec_list[i].handle);
  3353. }
  3354. #endif
  3355. #if WATCH_EXEC
  3356. i915_gem_dump_object(batch_obj,
  3357. args->batch_len,
  3358. __func__,
  3359. ~0);
  3360. #endif
  3361. /* Exec the batchbuffer */
  3362. ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
  3363. if (ret) {
  3364. DRM_ERROR("dispatch failed %d\n", ret);
  3365. goto err;
  3366. }
  3367. /*
  3368. * Ensure that the commands in the batch buffer are
  3369. * finished before the interrupt fires
  3370. */
  3371. flush_domains = i915_retire_commands(dev);
  3372. i915_verify_inactive(dev, __FILE__, __LINE__);
  3373. /*
  3374. * Get a seqno representing the execution of the current buffer,
  3375. * which we can wait on. We would like to mitigate these interrupts,
  3376. * likely by only creating seqnos occasionally (so that we have
  3377. * *some* interrupts representing completion of buffers that we can
  3378. * wait on when trying to clear up gtt space).
  3379. */
  3380. seqno = i915_add_request(dev, file_priv, flush_domains);
  3381. BUG_ON(seqno == 0);
  3382. for (i = 0; i < args->buffer_count; i++) {
  3383. struct drm_gem_object *obj = object_list[i];
  3384. i915_gem_object_move_to_active(obj, seqno);
  3385. #if WATCH_LRU
  3386. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  3387. #endif
  3388. }
  3389. #if WATCH_LRU
  3390. i915_dump_lru(dev, __func__);
  3391. #endif
  3392. i915_verify_inactive(dev, __FILE__, __LINE__);
  3393. err:
  3394. for (i = 0; i < pinned; i++)
  3395. i915_gem_object_unpin(object_list[i]);
  3396. for (i = 0; i < args->buffer_count; i++) {
  3397. if (object_list[i]) {
  3398. obj_priv = object_list[i]->driver_private;
  3399. obj_priv->in_execbuffer = false;
  3400. }
  3401. drm_gem_object_unreference(object_list[i]);
  3402. }
  3403. mutex_unlock(&dev->struct_mutex);
  3404. pre_mutex_err:
  3405. /* Copy the updated relocations out regardless of current error
  3406. * state. Failure to update the relocs would mean that the next
  3407. * time userland calls execbuf, it would do so with presumed offset
  3408. * state that didn't match the actual object state.
  3409. */
  3410. ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
  3411. relocs);
  3412. if (ret2 != 0) {
  3413. DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
  3414. if (ret == 0)
  3415. ret = ret2;
  3416. }
  3417. drm_free_large(object_list);
  3418. kfree(cliprects);
  3419. return ret;
  3420. }
  3421. /*
  3422. * Legacy execbuffer just creates an exec2 list from the original exec object
  3423. * list array and passes it to the real function.
  3424. */
  3425. int
  3426. i915_gem_execbuffer(struct drm_device *dev, void *data,
  3427. struct drm_file *file_priv)
  3428. {
  3429. struct drm_i915_gem_execbuffer *args = data;
  3430. struct drm_i915_gem_execbuffer2 exec2;
  3431. struct drm_i915_gem_exec_object *exec_list = NULL;
  3432. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3433. int ret, i;
  3434. #if WATCH_EXEC
  3435. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3436. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3437. #endif
  3438. if (args->buffer_count < 1) {
  3439. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3440. return -EINVAL;
  3441. }
  3442. /* Copy in the exec list from userland */
  3443. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  3444. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3445. if (exec_list == NULL || exec2_list == NULL) {
  3446. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3447. args->buffer_count);
  3448. drm_free_large(exec_list);
  3449. drm_free_large(exec2_list);
  3450. return -ENOMEM;
  3451. }
  3452. ret = copy_from_user(exec_list,
  3453. (struct drm_i915_relocation_entry __user *)
  3454. (uintptr_t) args->buffers_ptr,
  3455. sizeof(*exec_list) * args->buffer_count);
  3456. if (ret != 0) {
  3457. DRM_ERROR("copy %d exec entries failed %d\n",
  3458. args->buffer_count, ret);
  3459. drm_free_large(exec_list);
  3460. drm_free_large(exec2_list);
  3461. return -EFAULT;
  3462. }
  3463. for (i = 0; i < args->buffer_count; i++) {
  3464. exec2_list[i].handle = exec_list[i].handle;
  3465. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  3466. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  3467. exec2_list[i].alignment = exec_list[i].alignment;
  3468. exec2_list[i].offset = exec_list[i].offset;
  3469. if (!IS_I965G(dev))
  3470. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  3471. else
  3472. exec2_list[i].flags = 0;
  3473. }
  3474. exec2.buffers_ptr = args->buffers_ptr;
  3475. exec2.buffer_count = args->buffer_count;
  3476. exec2.batch_start_offset = args->batch_start_offset;
  3477. exec2.batch_len = args->batch_len;
  3478. exec2.DR1 = args->DR1;
  3479. exec2.DR4 = args->DR4;
  3480. exec2.num_cliprects = args->num_cliprects;
  3481. exec2.cliprects_ptr = args->cliprects_ptr;
  3482. exec2.flags = 0;
  3483. ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
  3484. if (!ret) {
  3485. /* Copy the new buffer offsets back to the user's exec list. */
  3486. for (i = 0; i < args->buffer_count; i++)
  3487. exec_list[i].offset = exec2_list[i].offset;
  3488. /* ... and back out to userspace */
  3489. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3490. (uintptr_t) args->buffers_ptr,
  3491. exec_list,
  3492. sizeof(*exec_list) * args->buffer_count);
  3493. if (ret) {
  3494. ret = -EFAULT;
  3495. DRM_ERROR("failed to copy %d exec entries "
  3496. "back to user (%d)\n",
  3497. args->buffer_count, ret);
  3498. }
  3499. }
  3500. drm_free_large(exec_list);
  3501. drm_free_large(exec2_list);
  3502. return ret;
  3503. }
  3504. int
  3505. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  3506. struct drm_file *file_priv)
  3507. {
  3508. struct drm_i915_gem_execbuffer2 *args = data;
  3509. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3510. int ret;
  3511. #if WATCH_EXEC
  3512. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3513. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3514. #endif
  3515. if (args->buffer_count < 1) {
  3516. DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
  3517. return -EINVAL;
  3518. }
  3519. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3520. if (exec2_list == NULL) {
  3521. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3522. args->buffer_count);
  3523. return -ENOMEM;
  3524. }
  3525. ret = copy_from_user(exec2_list,
  3526. (struct drm_i915_relocation_entry __user *)
  3527. (uintptr_t) args->buffers_ptr,
  3528. sizeof(*exec2_list) * args->buffer_count);
  3529. if (ret != 0) {
  3530. DRM_ERROR("copy %d exec entries failed %d\n",
  3531. args->buffer_count, ret);
  3532. drm_free_large(exec2_list);
  3533. return -EFAULT;
  3534. }
  3535. ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
  3536. if (!ret) {
  3537. /* Copy the new buffer offsets back to the user's exec list. */
  3538. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3539. (uintptr_t) args->buffers_ptr,
  3540. exec2_list,
  3541. sizeof(*exec2_list) * args->buffer_count);
  3542. if (ret) {
  3543. ret = -EFAULT;
  3544. DRM_ERROR("failed to copy %d exec entries "
  3545. "back to user (%d)\n",
  3546. args->buffer_count, ret);
  3547. }
  3548. }
  3549. drm_free_large(exec2_list);
  3550. return ret;
  3551. }
  3552. int
  3553. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  3554. {
  3555. struct drm_device *dev = obj->dev;
  3556. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3557. int ret;
  3558. i915_verify_inactive(dev, __FILE__, __LINE__);
  3559. if (obj_priv->gtt_space == NULL) {
  3560. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  3561. if (ret)
  3562. return ret;
  3563. }
  3564. obj_priv->pin_count++;
  3565. /* If the object is not active and not pending a flush,
  3566. * remove it from the inactive list
  3567. */
  3568. if (obj_priv->pin_count == 1) {
  3569. atomic_inc(&dev->pin_count);
  3570. atomic_add(obj->size, &dev->pin_memory);
  3571. if (!obj_priv->active &&
  3572. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
  3573. !list_empty(&obj_priv->list))
  3574. list_del_init(&obj_priv->list);
  3575. }
  3576. i915_verify_inactive(dev, __FILE__, __LINE__);
  3577. return 0;
  3578. }
  3579. void
  3580. i915_gem_object_unpin(struct drm_gem_object *obj)
  3581. {
  3582. struct drm_device *dev = obj->dev;
  3583. drm_i915_private_t *dev_priv = dev->dev_private;
  3584. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3585. i915_verify_inactive(dev, __FILE__, __LINE__);
  3586. obj_priv->pin_count--;
  3587. BUG_ON(obj_priv->pin_count < 0);
  3588. BUG_ON(obj_priv->gtt_space == NULL);
  3589. /* If the object is no longer pinned, and is
  3590. * neither active nor being flushed, then stick it on
  3591. * the inactive list
  3592. */
  3593. if (obj_priv->pin_count == 0) {
  3594. if (!obj_priv->active &&
  3595. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  3596. list_move_tail(&obj_priv->list,
  3597. &dev_priv->mm.inactive_list);
  3598. atomic_dec(&dev->pin_count);
  3599. atomic_sub(obj->size, &dev->pin_memory);
  3600. }
  3601. i915_verify_inactive(dev, __FILE__, __LINE__);
  3602. }
  3603. int
  3604. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3605. struct drm_file *file_priv)
  3606. {
  3607. struct drm_i915_gem_pin *args = data;
  3608. struct drm_gem_object *obj;
  3609. struct drm_i915_gem_object *obj_priv;
  3610. int ret;
  3611. mutex_lock(&dev->struct_mutex);
  3612. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3613. if (obj == NULL) {
  3614. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  3615. args->handle);
  3616. mutex_unlock(&dev->struct_mutex);
  3617. return -EBADF;
  3618. }
  3619. obj_priv = obj->driver_private;
  3620. if (obj_priv->madv != I915_MADV_WILLNEED) {
  3621. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3622. drm_gem_object_unreference(obj);
  3623. mutex_unlock(&dev->struct_mutex);
  3624. return -EINVAL;
  3625. }
  3626. if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
  3627. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3628. args->handle);
  3629. drm_gem_object_unreference(obj);
  3630. mutex_unlock(&dev->struct_mutex);
  3631. return -EINVAL;
  3632. }
  3633. obj_priv->user_pin_count++;
  3634. obj_priv->pin_filp = file_priv;
  3635. if (obj_priv->user_pin_count == 1) {
  3636. ret = i915_gem_object_pin(obj, args->alignment);
  3637. if (ret != 0) {
  3638. drm_gem_object_unreference(obj);
  3639. mutex_unlock(&dev->struct_mutex);
  3640. return ret;
  3641. }
  3642. }
  3643. /* XXX - flush the CPU caches for pinned objects
  3644. * as the X server doesn't manage domains yet
  3645. */
  3646. i915_gem_object_flush_cpu_write_domain(obj);
  3647. args->offset = obj_priv->gtt_offset;
  3648. drm_gem_object_unreference(obj);
  3649. mutex_unlock(&dev->struct_mutex);
  3650. return 0;
  3651. }
  3652. int
  3653. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3654. struct drm_file *file_priv)
  3655. {
  3656. struct drm_i915_gem_pin *args = data;
  3657. struct drm_gem_object *obj;
  3658. struct drm_i915_gem_object *obj_priv;
  3659. mutex_lock(&dev->struct_mutex);
  3660. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3661. if (obj == NULL) {
  3662. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  3663. args->handle);
  3664. mutex_unlock(&dev->struct_mutex);
  3665. return -EBADF;
  3666. }
  3667. obj_priv = obj->driver_private;
  3668. if (obj_priv->pin_filp != file_priv) {
  3669. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3670. args->handle);
  3671. drm_gem_object_unreference(obj);
  3672. mutex_unlock(&dev->struct_mutex);
  3673. return -EINVAL;
  3674. }
  3675. obj_priv->user_pin_count--;
  3676. if (obj_priv->user_pin_count == 0) {
  3677. obj_priv->pin_filp = NULL;
  3678. i915_gem_object_unpin(obj);
  3679. }
  3680. drm_gem_object_unreference(obj);
  3681. mutex_unlock(&dev->struct_mutex);
  3682. return 0;
  3683. }
  3684. int
  3685. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3686. struct drm_file *file_priv)
  3687. {
  3688. struct drm_i915_gem_busy *args = data;
  3689. struct drm_gem_object *obj;
  3690. struct drm_i915_gem_object *obj_priv;
  3691. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3692. if (obj == NULL) {
  3693. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  3694. args->handle);
  3695. return -EBADF;
  3696. }
  3697. mutex_lock(&dev->struct_mutex);
  3698. /* Update the active list for the hardware's current position.
  3699. * Otherwise this only updates on a delayed timer or when irqs are
  3700. * actually unmasked, and our working set ends up being larger than
  3701. * required.
  3702. */
  3703. i915_gem_retire_requests(dev);
  3704. obj_priv = obj->driver_private;
  3705. /* Don't count being on the flushing list against the object being
  3706. * done. Otherwise, a buffer left on the flushing list but not getting
  3707. * flushed (because nobody's flushing that domain) won't ever return
  3708. * unbusy and get reused by libdrm's bo cache. The other expected
  3709. * consumer of this interface, OpenGL's occlusion queries, also specs
  3710. * that the objects get unbusy "eventually" without any interference.
  3711. */
  3712. args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
  3713. drm_gem_object_unreference(obj);
  3714. mutex_unlock(&dev->struct_mutex);
  3715. return 0;
  3716. }
  3717. int
  3718. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3719. struct drm_file *file_priv)
  3720. {
  3721. return i915_gem_ring_throttle(dev, file_priv);
  3722. }
  3723. int
  3724. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3725. struct drm_file *file_priv)
  3726. {
  3727. struct drm_i915_gem_madvise *args = data;
  3728. struct drm_gem_object *obj;
  3729. struct drm_i915_gem_object *obj_priv;
  3730. switch (args->madv) {
  3731. case I915_MADV_DONTNEED:
  3732. case I915_MADV_WILLNEED:
  3733. break;
  3734. default:
  3735. return -EINVAL;
  3736. }
  3737. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3738. if (obj == NULL) {
  3739. DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
  3740. args->handle);
  3741. return -EBADF;
  3742. }
  3743. mutex_lock(&dev->struct_mutex);
  3744. obj_priv = obj->driver_private;
  3745. if (obj_priv->pin_count) {
  3746. drm_gem_object_unreference(obj);
  3747. mutex_unlock(&dev->struct_mutex);
  3748. DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
  3749. return -EINVAL;
  3750. }
  3751. if (obj_priv->madv != __I915_MADV_PURGED)
  3752. obj_priv->madv = args->madv;
  3753. /* if the object is no longer bound, discard its backing storage */
  3754. if (i915_gem_object_is_purgeable(obj_priv) &&
  3755. obj_priv->gtt_space == NULL)
  3756. i915_gem_object_truncate(obj);
  3757. args->retained = obj_priv->madv != __I915_MADV_PURGED;
  3758. drm_gem_object_unreference(obj);
  3759. mutex_unlock(&dev->struct_mutex);
  3760. return 0;
  3761. }
  3762. int i915_gem_init_object(struct drm_gem_object *obj)
  3763. {
  3764. struct drm_i915_gem_object *obj_priv;
  3765. obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
  3766. if (obj_priv == NULL)
  3767. return -ENOMEM;
  3768. /*
  3769. * We've just allocated pages from the kernel,
  3770. * so they've just been written by the CPU with
  3771. * zeros. They'll need to be clflushed before we
  3772. * use them with the GPU.
  3773. */
  3774. obj->write_domain = I915_GEM_DOMAIN_CPU;
  3775. obj->read_domains = I915_GEM_DOMAIN_CPU;
  3776. obj_priv->agp_type = AGP_USER_MEMORY;
  3777. obj->driver_private = obj_priv;
  3778. obj_priv->obj = obj;
  3779. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  3780. INIT_LIST_HEAD(&obj_priv->list);
  3781. INIT_LIST_HEAD(&obj_priv->gpu_write_list);
  3782. INIT_LIST_HEAD(&obj_priv->fence_list);
  3783. obj_priv->madv = I915_MADV_WILLNEED;
  3784. trace_i915_gem_object_create(obj);
  3785. return 0;
  3786. }
  3787. void i915_gem_free_object(struct drm_gem_object *obj)
  3788. {
  3789. struct drm_device *dev = obj->dev;
  3790. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3791. trace_i915_gem_object_destroy(obj);
  3792. while (obj_priv->pin_count > 0)
  3793. i915_gem_object_unpin(obj);
  3794. if (obj_priv->phys_obj)
  3795. i915_gem_detach_phys_object(dev, obj);
  3796. i915_gem_object_unbind(obj);
  3797. if (obj_priv->mmap_offset)
  3798. i915_gem_free_mmap_offset(obj);
  3799. kfree(obj_priv->page_cpu_valid);
  3800. kfree(obj_priv->bit_17);
  3801. kfree(obj->driver_private);
  3802. }
  3803. /** Unbinds all inactive objects. */
  3804. static int
  3805. i915_gem_evict_from_inactive_list(struct drm_device *dev)
  3806. {
  3807. drm_i915_private_t *dev_priv = dev->dev_private;
  3808. while (!list_empty(&dev_priv->mm.inactive_list)) {
  3809. struct drm_gem_object *obj;
  3810. int ret;
  3811. obj = list_first_entry(&dev_priv->mm.inactive_list,
  3812. struct drm_i915_gem_object,
  3813. list)->obj;
  3814. ret = i915_gem_object_unbind(obj);
  3815. if (ret != 0) {
  3816. DRM_ERROR("Error unbinding object: %d\n", ret);
  3817. return ret;
  3818. }
  3819. }
  3820. return 0;
  3821. }
  3822. int
  3823. i915_gem_idle(struct drm_device *dev)
  3824. {
  3825. drm_i915_private_t *dev_priv = dev->dev_private;
  3826. int ret;
  3827. mutex_lock(&dev->struct_mutex);
  3828. if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
  3829. mutex_unlock(&dev->struct_mutex);
  3830. return 0;
  3831. }
  3832. ret = i915_gpu_idle(dev);
  3833. if (ret) {
  3834. mutex_unlock(&dev->struct_mutex);
  3835. return ret;
  3836. }
  3837. /* Under UMS, be paranoid and evict. */
  3838. if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
  3839. ret = i915_gem_evict_from_inactive_list(dev);
  3840. if (ret) {
  3841. mutex_unlock(&dev->struct_mutex);
  3842. return ret;
  3843. }
  3844. }
  3845. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3846. * We need to replace this with a semaphore, or something.
  3847. * And not confound mm.suspended!
  3848. */
  3849. dev_priv->mm.suspended = 1;
  3850. del_timer(&dev_priv->hangcheck_timer);
  3851. i915_kernel_lost_context(dev);
  3852. i915_gem_cleanup_ringbuffer(dev);
  3853. mutex_unlock(&dev->struct_mutex);
  3854. /* Cancel the retire work handler, which should be idle now. */
  3855. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3856. return 0;
  3857. }
  3858. static int
  3859. i915_gem_init_hws(struct drm_device *dev)
  3860. {
  3861. drm_i915_private_t *dev_priv = dev->dev_private;
  3862. struct drm_gem_object *obj;
  3863. struct drm_i915_gem_object *obj_priv;
  3864. int ret;
  3865. /* If we need a physical address for the status page, it's already
  3866. * initialized at driver load time.
  3867. */
  3868. if (!I915_NEED_GFX_HWS(dev))
  3869. return 0;
  3870. obj = drm_gem_object_alloc(dev, 4096);
  3871. if (obj == NULL) {
  3872. DRM_ERROR("Failed to allocate status page\n");
  3873. return -ENOMEM;
  3874. }
  3875. obj_priv = obj->driver_private;
  3876. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  3877. ret = i915_gem_object_pin(obj, 4096);
  3878. if (ret != 0) {
  3879. drm_gem_object_unreference(obj);
  3880. return ret;
  3881. }
  3882. dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  3883. dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
  3884. if (dev_priv->hw_status_page == NULL) {
  3885. DRM_ERROR("Failed to map status page.\n");
  3886. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3887. i915_gem_object_unpin(obj);
  3888. drm_gem_object_unreference(obj);
  3889. return -EINVAL;
  3890. }
  3891. dev_priv->hws_obj = obj;
  3892. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  3893. if (IS_GEN6(dev)) {
  3894. I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
  3895. I915_READ(HWS_PGA_GEN6); /* posting read */
  3896. } else {
  3897. I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
  3898. I915_READ(HWS_PGA); /* posting read */
  3899. }
  3900. DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  3901. return 0;
  3902. }
  3903. static void
  3904. i915_gem_cleanup_hws(struct drm_device *dev)
  3905. {
  3906. drm_i915_private_t *dev_priv = dev->dev_private;
  3907. struct drm_gem_object *obj;
  3908. struct drm_i915_gem_object *obj_priv;
  3909. if (dev_priv->hws_obj == NULL)
  3910. return;
  3911. obj = dev_priv->hws_obj;
  3912. obj_priv = obj->driver_private;
  3913. kunmap(obj_priv->pages[0]);
  3914. i915_gem_object_unpin(obj);
  3915. drm_gem_object_unreference(obj);
  3916. dev_priv->hws_obj = NULL;
  3917. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3918. dev_priv->hw_status_page = NULL;
  3919. /* Write high address into HWS_PGA when disabling. */
  3920. I915_WRITE(HWS_PGA, 0x1ffff000);
  3921. }
  3922. int
  3923. i915_gem_init_ringbuffer(struct drm_device *dev)
  3924. {
  3925. drm_i915_private_t *dev_priv = dev->dev_private;
  3926. struct drm_gem_object *obj;
  3927. struct drm_i915_gem_object *obj_priv;
  3928. drm_i915_ring_buffer_t *ring = &dev_priv->ring;
  3929. int ret;
  3930. u32 head;
  3931. ret = i915_gem_init_hws(dev);
  3932. if (ret != 0)
  3933. return ret;
  3934. obj = drm_gem_object_alloc(dev, 128 * 1024);
  3935. if (obj == NULL) {
  3936. DRM_ERROR("Failed to allocate ringbuffer\n");
  3937. i915_gem_cleanup_hws(dev);
  3938. return -ENOMEM;
  3939. }
  3940. obj_priv = obj->driver_private;
  3941. ret = i915_gem_object_pin(obj, 4096);
  3942. if (ret != 0) {
  3943. drm_gem_object_unreference(obj);
  3944. i915_gem_cleanup_hws(dev);
  3945. return ret;
  3946. }
  3947. /* Set up the kernel mapping for the ring. */
  3948. ring->Size = obj->size;
  3949. ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
  3950. ring->map.size = obj->size;
  3951. ring->map.type = 0;
  3952. ring->map.flags = 0;
  3953. ring->map.mtrr = 0;
  3954. drm_core_ioremap_wc(&ring->map, dev);
  3955. if (ring->map.handle == NULL) {
  3956. DRM_ERROR("Failed to map ringbuffer.\n");
  3957. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3958. i915_gem_object_unpin(obj);
  3959. drm_gem_object_unreference(obj);
  3960. i915_gem_cleanup_hws(dev);
  3961. return -EINVAL;
  3962. }
  3963. ring->ring_obj = obj;
  3964. ring->virtual_start = ring->map.handle;
  3965. /* Stop the ring if it's running. */
  3966. I915_WRITE(PRB0_CTL, 0);
  3967. I915_WRITE(PRB0_TAIL, 0);
  3968. I915_WRITE(PRB0_HEAD, 0);
  3969. /* Initialize the ring. */
  3970. I915_WRITE(PRB0_START, obj_priv->gtt_offset);
  3971. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3972. /* G45 ring initialization fails to reset head to zero */
  3973. if (head != 0) {
  3974. DRM_ERROR("Ring head not reset to zero "
  3975. "ctl %08x head %08x tail %08x start %08x\n",
  3976. I915_READ(PRB0_CTL),
  3977. I915_READ(PRB0_HEAD),
  3978. I915_READ(PRB0_TAIL),
  3979. I915_READ(PRB0_START));
  3980. I915_WRITE(PRB0_HEAD, 0);
  3981. DRM_ERROR("Ring head forced to zero "
  3982. "ctl %08x head %08x tail %08x start %08x\n",
  3983. I915_READ(PRB0_CTL),
  3984. I915_READ(PRB0_HEAD),
  3985. I915_READ(PRB0_TAIL),
  3986. I915_READ(PRB0_START));
  3987. }
  3988. I915_WRITE(PRB0_CTL,
  3989. ((obj->size - 4096) & RING_NR_PAGES) |
  3990. RING_NO_REPORT |
  3991. RING_VALID);
  3992. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3993. /* If the head is still not zero, the ring is dead */
  3994. if (head != 0) {
  3995. DRM_ERROR("Ring initialization failed "
  3996. "ctl %08x head %08x tail %08x start %08x\n",
  3997. I915_READ(PRB0_CTL),
  3998. I915_READ(PRB0_HEAD),
  3999. I915_READ(PRB0_TAIL),
  4000. I915_READ(PRB0_START));
  4001. return -EIO;
  4002. }
  4003. /* Update our cache of the ring state */
  4004. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  4005. i915_kernel_lost_context(dev);
  4006. else {
  4007. ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  4008. ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
  4009. ring->space = ring->head - (ring->tail + 8);
  4010. if (ring->space < 0)
  4011. ring->space += ring->Size;
  4012. }
  4013. return 0;
  4014. }
  4015. void
  4016. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  4017. {
  4018. drm_i915_private_t *dev_priv = dev->dev_private;
  4019. if (dev_priv->ring.ring_obj == NULL)
  4020. return;
  4021. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  4022. i915_gem_object_unpin(dev_priv->ring.ring_obj);
  4023. drm_gem_object_unreference(dev_priv->ring.ring_obj);
  4024. dev_priv->ring.ring_obj = NULL;
  4025. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  4026. i915_gem_cleanup_hws(dev);
  4027. }
  4028. int
  4029. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  4030. struct drm_file *file_priv)
  4031. {
  4032. drm_i915_private_t *dev_priv = dev->dev_private;
  4033. int ret;
  4034. if (drm_core_check_feature(dev, DRIVER_MODESET))
  4035. return 0;
  4036. if (atomic_read(&dev_priv->mm.wedged)) {
  4037. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  4038. atomic_set(&dev_priv->mm.wedged, 0);
  4039. }
  4040. mutex_lock(&dev->struct_mutex);
  4041. dev_priv->mm.suspended = 0;
  4042. ret = i915_gem_init_ringbuffer(dev);
  4043. if (ret != 0) {
  4044. mutex_unlock(&dev->struct_mutex);
  4045. return ret;
  4046. }
  4047. spin_lock(&dev_priv->mm.active_list_lock);
  4048. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  4049. spin_unlock(&dev_priv->mm.active_list_lock);
  4050. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  4051. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  4052. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  4053. mutex_unlock(&dev->struct_mutex);
  4054. drm_irq_install(dev);
  4055. return 0;
  4056. }
  4057. int
  4058. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  4059. struct drm_file *file_priv)
  4060. {
  4061. if (drm_core_check_feature(dev, DRIVER_MODESET))
  4062. return 0;
  4063. drm_irq_uninstall(dev);
  4064. return i915_gem_idle(dev);
  4065. }
  4066. void
  4067. i915_gem_lastclose(struct drm_device *dev)
  4068. {
  4069. int ret;
  4070. if (drm_core_check_feature(dev, DRIVER_MODESET))
  4071. return;
  4072. ret = i915_gem_idle(dev);
  4073. if (ret)
  4074. DRM_ERROR("failed to idle hardware: %d\n", ret);
  4075. }
  4076. void
  4077. i915_gem_load(struct drm_device *dev)
  4078. {
  4079. int i;
  4080. drm_i915_private_t *dev_priv = dev->dev_private;
  4081. spin_lock_init(&dev_priv->mm.active_list_lock);
  4082. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  4083. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  4084. INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
  4085. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  4086. INIT_LIST_HEAD(&dev_priv->mm.request_list);
  4087. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4088. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  4089. i915_gem_retire_work_handler);
  4090. dev_priv->mm.next_gem_seqno = 1;
  4091. spin_lock(&shrink_list_lock);
  4092. list_add(&dev_priv->mm.shrink_list, &shrink_list);
  4093. spin_unlock(&shrink_list_lock);
  4094. /* Old X drivers will take 0-2 for front, back, depth buffers */
  4095. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  4096. dev_priv->fence_reg_start = 3;
  4097. if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4098. dev_priv->num_fence_regs = 16;
  4099. else
  4100. dev_priv->num_fence_regs = 8;
  4101. /* Initialize fence registers to zero */
  4102. if (IS_I965G(dev)) {
  4103. for (i = 0; i < 16; i++)
  4104. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  4105. } else {
  4106. for (i = 0; i < 8; i++)
  4107. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  4108. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4109. for (i = 0; i < 8; i++)
  4110. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  4111. }
  4112. i915_gem_detect_bit_6_swizzle(dev);
  4113. init_waitqueue_head(&dev_priv->pending_flip_queue);
  4114. }
  4115. /*
  4116. * Create a physically contiguous memory object for this object
  4117. * e.g. for cursor + overlay regs
  4118. */
  4119. int i915_gem_init_phys_object(struct drm_device *dev,
  4120. int id, int size)
  4121. {
  4122. drm_i915_private_t *dev_priv = dev->dev_private;
  4123. struct drm_i915_gem_phys_object *phys_obj;
  4124. int ret;
  4125. if (dev_priv->mm.phys_objs[id - 1] || !size)
  4126. return 0;
  4127. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  4128. if (!phys_obj)
  4129. return -ENOMEM;
  4130. phys_obj->id = id;
  4131. phys_obj->handle = drm_pci_alloc(dev, size, 0);
  4132. if (!phys_obj->handle) {
  4133. ret = -ENOMEM;
  4134. goto kfree_obj;
  4135. }
  4136. #ifdef CONFIG_X86
  4137. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4138. #endif
  4139. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  4140. return 0;
  4141. kfree_obj:
  4142. kfree(phys_obj);
  4143. return ret;
  4144. }
  4145. void i915_gem_free_phys_object(struct drm_device *dev, int id)
  4146. {
  4147. drm_i915_private_t *dev_priv = dev->dev_private;
  4148. struct drm_i915_gem_phys_object *phys_obj;
  4149. if (!dev_priv->mm.phys_objs[id - 1])
  4150. return;
  4151. phys_obj = dev_priv->mm.phys_objs[id - 1];
  4152. if (phys_obj->cur_obj) {
  4153. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  4154. }
  4155. #ifdef CONFIG_X86
  4156. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4157. #endif
  4158. drm_pci_free(dev, phys_obj->handle);
  4159. kfree(phys_obj);
  4160. dev_priv->mm.phys_objs[id - 1] = NULL;
  4161. }
  4162. void i915_gem_free_all_phys_object(struct drm_device *dev)
  4163. {
  4164. int i;
  4165. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  4166. i915_gem_free_phys_object(dev, i);
  4167. }
  4168. void i915_gem_detach_phys_object(struct drm_device *dev,
  4169. struct drm_gem_object *obj)
  4170. {
  4171. struct drm_i915_gem_object *obj_priv;
  4172. int i;
  4173. int ret;
  4174. int page_count;
  4175. obj_priv = obj->driver_private;
  4176. if (!obj_priv->phys_obj)
  4177. return;
  4178. ret = i915_gem_object_get_pages(obj, 0);
  4179. if (ret)
  4180. goto out;
  4181. page_count = obj->size / PAGE_SIZE;
  4182. for (i = 0; i < page_count; i++) {
  4183. char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4184. char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4185. memcpy(dst, src, PAGE_SIZE);
  4186. kunmap_atomic(dst, KM_USER0);
  4187. }
  4188. drm_clflush_pages(obj_priv->pages, page_count);
  4189. drm_agp_chipset_flush(dev);
  4190. i915_gem_object_put_pages(obj);
  4191. out:
  4192. obj_priv->phys_obj->cur_obj = NULL;
  4193. obj_priv->phys_obj = NULL;
  4194. }
  4195. int
  4196. i915_gem_attach_phys_object(struct drm_device *dev,
  4197. struct drm_gem_object *obj, int id)
  4198. {
  4199. drm_i915_private_t *dev_priv = dev->dev_private;
  4200. struct drm_i915_gem_object *obj_priv;
  4201. int ret = 0;
  4202. int page_count;
  4203. int i;
  4204. if (id > I915_MAX_PHYS_OBJECT)
  4205. return -EINVAL;
  4206. obj_priv = obj->driver_private;
  4207. if (obj_priv->phys_obj) {
  4208. if (obj_priv->phys_obj->id == id)
  4209. return 0;
  4210. i915_gem_detach_phys_object(dev, obj);
  4211. }
  4212. /* create a new object */
  4213. if (!dev_priv->mm.phys_objs[id - 1]) {
  4214. ret = i915_gem_init_phys_object(dev, id,
  4215. obj->size);
  4216. if (ret) {
  4217. DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
  4218. goto out;
  4219. }
  4220. }
  4221. /* bind to the object */
  4222. obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4223. obj_priv->phys_obj->cur_obj = obj;
  4224. ret = i915_gem_object_get_pages(obj, 0);
  4225. if (ret) {
  4226. DRM_ERROR("failed to get page list\n");
  4227. goto out;
  4228. }
  4229. page_count = obj->size / PAGE_SIZE;
  4230. for (i = 0; i < page_count; i++) {
  4231. char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4232. char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4233. memcpy(dst, src, PAGE_SIZE);
  4234. kunmap_atomic(src, KM_USER0);
  4235. }
  4236. i915_gem_object_put_pages(obj);
  4237. return 0;
  4238. out:
  4239. return ret;
  4240. }
  4241. static int
  4242. i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  4243. struct drm_i915_gem_pwrite *args,
  4244. struct drm_file *file_priv)
  4245. {
  4246. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  4247. void *obj_addr;
  4248. int ret;
  4249. char __user *user_data;
  4250. user_data = (char __user *) (uintptr_t) args->data_ptr;
  4251. obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  4252. DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
  4253. ret = copy_from_user(obj_addr, user_data, args->size);
  4254. if (ret)
  4255. return -EFAULT;
  4256. drm_agp_chipset_flush(dev);
  4257. return 0;
  4258. }
  4259. void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
  4260. {
  4261. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  4262. /* Clean up our request list when the client is going away, so that
  4263. * later retire_requests won't dereference our soon-to-be-gone
  4264. * file_priv.
  4265. */
  4266. mutex_lock(&dev->struct_mutex);
  4267. while (!list_empty(&i915_file_priv->mm.request_list))
  4268. list_del_init(i915_file_priv->mm.request_list.next);
  4269. mutex_unlock(&dev->struct_mutex);
  4270. }
  4271. static int
  4272. i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
  4273. {
  4274. drm_i915_private_t *dev_priv, *next_dev;
  4275. struct drm_i915_gem_object *obj_priv, *next_obj;
  4276. int cnt = 0;
  4277. int would_deadlock = 1;
  4278. /* "fast-path" to count number of available objects */
  4279. if (nr_to_scan == 0) {
  4280. spin_lock(&shrink_list_lock);
  4281. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4282. struct drm_device *dev = dev_priv->dev;
  4283. if (mutex_trylock(&dev->struct_mutex)) {
  4284. list_for_each_entry(obj_priv,
  4285. &dev_priv->mm.inactive_list,
  4286. list)
  4287. cnt++;
  4288. mutex_unlock(&dev->struct_mutex);
  4289. }
  4290. }
  4291. spin_unlock(&shrink_list_lock);
  4292. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4293. }
  4294. spin_lock(&shrink_list_lock);
  4295. /* first scan for clean buffers */
  4296. list_for_each_entry_safe(dev_priv, next_dev,
  4297. &shrink_list, mm.shrink_list) {
  4298. struct drm_device *dev = dev_priv->dev;
  4299. if (! mutex_trylock(&dev->struct_mutex))
  4300. continue;
  4301. spin_unlock(&shrink_list_lock);
  4302. i915_gem_retire_requests(dev);
  4303. list_for_each_entry_safe(obj_priv, next_obj,
  4304. &dev_priv->mm.inactive_list,
  4305. list) {
  4306. if (i915_gem_object_is_purgeable(obj_priv)) {
  4307. i915_gem_object_unbind(obj_priv->obj);
  4308. if (--nr_to_scan <= 0)
  4309. break;
  4310. }
  4311. }
  4312. spin_lock(&shrink_list_lock);
  4313. mutex_unlock(&dev->struct_mutex);
  4314. would_deadlock = 0;
  4315. if (nr_to_scan <= 0)
  4316. break;
  4317. }
  4318. /* second pass, evict/count anything still on the inactive list */
  4319. list_for_each_entry_safe(dev_priv, next_dev,
  4320. &shrink_list, mm.shrink_list) {
  4321. struct drm_device *dev = dev_priv->dev;
  4322. if (! mutex_trylock(&dev->struct_mutex))
  4323. continue;
  4324. spin_unlock(&shrink_list_lock);
  4325. list_for_each_entry_safe(obj_priv, next_obj,
  4326. &dev_priv->mm.inactive_list,
  4327. list) {
  4328. if (nr_to_scan > 0) {
  4329. i915_gem_object_unbind(obj_priv->obj);
  4330. nr_to_scan--;
  4331. } else
  4332. cnt++;
  4333. }
  4334. spin_lock(&shrink_list_lock);
  4335. mutex_unlock(&dev->struct_mutex);
  4336. would_deadlock = 0;
  4337. }
  4338. spin_unlock(&shrink_list_lock);
  4339. if (would_deadlock)
  4340. return -1;
  4341. else if (cnt > 0)
  4342. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4343. else
  4344. return 0;
  4345. }
  4346. static struct shrinker shrinker = {
  4347. .shrink = i915_gem_shrink,
  4348. .seeks = DEFAULT_SEEKS,
  4349. };
  4350. __init void
  4351. i915_gem_shrinker_init(void)
  4352. {
  4353. register_shrinker(&shrinker);
  4354. }
  4355. __exit void
  4356. i915_gem_shrinker_exit(void)
  4357. {
  4358. unregister_shrinker(&shrinker);
  4359. }