i915_gem.c 129 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/swap.h>
  34. #include <linux/pci.h>
  35. #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  36. static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
  37. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  38. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  39. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  40. int write);
  41. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  42. uint64_t offset,
  43. uint64_t size);
  44. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  45. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  46. static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
  47. unsigned alignment);
  48. static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
  49. static int i915_gem_evict_something(struct drm_device *dev, int min_size);
  50. static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
  51. static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  52. struct drm_i915_gem_pwrite *args,
  53. struct drm_file *file_priv);
  54. static LIST_HEAD(shrink_list);
  55. static DEFINE_SPINLOCK(shrink_list_lock);
  56. int i915_gem_do_init(struct drm_device *dev, unsigned long start,
  57. unsigned long end)
  58. {
  59. drm_i915_private_t *dev_priv = dev->dev_private;
  60. if (start >= end ||
  61. (start & (PAGE_SIZE - 1)) != 0 ||
  62. (end & (PAGE_SIZE - 1)) != 0) {
  63. return -EINVAL;
  64. }
  65. drm_mm_init(&dev_priv->mm.gtt_space, start,
  66. end - start);
  67. dev->gtt_total = (uint32_t) (end - start);
  68. return 0;
  69. }
  70. int
  71. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  72. struct drm_file *file_priv)
  73. {
  74. struct drm_i915_gem_init *args = data;
  75. int ret;
  76. mutex_lock(&dev->struct_mutex);
  77. ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
  78. mutex_unlock(&dev->struct_mutex);
  79. return ret;
  80. }
  81. int
  82. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  83. struct drm_file *file_priv)
  84. {
  85. struct drm_i915_gem_get_aperture *args = data;
  86. if (!(dev->driver->driver_features & DRIVER_GEM))
  87. return -ENODEV;
  88. args->aper_size = dev->gtt_total;
  89. args->aper_available_size = (args->aper_size -
  90. atomic_read(&dev->pin_memory));
  91. return 0;
  92. }
  93. /**
  94. * Creates a new mm object and returns a handle to it.
  95. */
  96. int
  97. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  98. struct drm_file *file_priv)
  99. {
  100. struct drm_i915_gem_create *args = data;
  101. struct drm_gem_object *obj;
  102. int ret;
  103. u32 handle;
  104. args->size = roundup(args->size, PAGE_SIZE);
  105. /* Allocate the new object */
  106. obj = drm_gem_object_alloc(dev, args->size);
  107. if (obj == NULL)
  108. return -ENOMEM;
  109. ret = drm_gem_handle_create(file_priv, obj, &handle);
  110. mutex_lock(&dev->struct_mutex);
  111. drm_gem_object_handle_unreference(obj);
  112. mutex_unlock(&dev->struct_mutex);
  113. if (ret)
  114. return ret;
  115. args->handle = handle;
  116. return 0;
  117. }
  118. static inline int
  119. fast_shmem_read(struct page **pages,
  120. loff_t page_base, int page_offset,
  121. char __user *data,
  122. int length)
  123. {
  124. char __iomem *vaddr;
  125. int unwritten;
  126. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  127. if (vaddr == NULL)
  128. return -ENOMEM;
  129. unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
  130. kunmap_atomic(vaddr, KM_USER0);
  131. if (unwritten)
  132. return -EFAULT;
  133. return 0;
  134. }
  135. static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
  136. {
  137. drm_i915_private_t *dev_priv = obj->dev->dev_private;
  138. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  139. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  140. obj_priv->tiling_mode != I915_TILING_NONE;
  141. }
  142. static inline int
  143. slow_shmem_copy(struct page *dst_page,
  144. int dst_offset,
  145. struct page *src_page,
  146. int src_offset,
  147. int length)
  148. {
  149. char *dst_vaddr, *src_vaddr;
  150. dst_vaddr = kmap_atomic(dst_page, KM_USER0);
  151. if (dst_vaddr == NULL)
  152. return -ENOMEM;
  153. src_vaddr = kmap_atomic(src_page, KM_USER1);
  154. if (src_vaddr == NULL) {
  155. kunmap_atomic(dst_vaddr, KM_USER0);
  156. return -ENOMEM;
  157. }
  158. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  159. kunmap_atomic(src_vaddr, KM_USER1);
  160. kunmap_atomic(dst_vaddr, KM_USER0);
  161. return 0;
  162. }
  163. static inline int
  164. slow_shmem_bit17_copy(struct page *gpu_page,
  165. int gpu_offset,
  166. struct page *cpu_page,
  167. int cpu_offset,
  168. int length,
  169. int is_read)
  170. {
  171. char *gpu_vaddr, *cpu_vaddr;
  172. /* Use the unswizzled path if this page isn't affected. */
  173. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  174. if (is_read)
  175. return slow_shmem_copy(cpu_page, cpu_offset,
  176. gpu_page, gpu_offset, length);
  177. else
  178. return slow_shmem_copy(gpu_page, gpu_offset,
  179. cpu_page, cpu_offset, length);
  180. }
  181. gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
  182. if (gpu_vaddr == NULL)
  183. return -ENOMEM;
  184. cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
  185. if (cpu_vaddr == NULL) {
  186. kunmap_atomic(gpu_vaddr, KM_USER0);
  187. return -ENOMEM;
  188. }
  189. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  190. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  191. */
  192. while (length > 0) {
  193. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  194. int this_length = min(cacheline_end - gpu_offset, length);
  195. int swizzled_gpu_offset = gpu_offset ^ 64;
  196. if (is_read) {
  197. memcpy(cpu_vaddr + cpu_offset,
  198. gpu_vaddr + swizzled_gpu_offset,
  199. this_length);
  200. } else {
  201. memcpy(gpu_vaddr + swizzled_gpu_offset,
  202. cpu_vaddr + cpu_offset,
  203. this_length);
  204. }
  205. cpu_offset += this_length;
  206. gpu_offset += this_length;
  207. length -= this_length;
  208. }
  209. kunmap_atomic(cpu_vaddr, KM_USER1);
  210. kunmap_atomic(gpu_vaddr, KM_USER0);
  211. return 0;
  212. }
  213. /**
  214. * This is the fast shmem pread path, which attempts to copy_from_user directly
  215. * from the backing pages of the object to the user's address space. On a
  216. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  217. */
  218. static int
  219. i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  220. struct drm_i915_gem_pread *args,
  221. struct drm_file *file_priv)
  222. {
  223. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  224. ssize_t remain;
  225. loff_t offset, page_base;
  226. char __user *user_data;
  227. int page_offset, page_length;
  228. int ret;
  229. user_data = (char __user *) (uintptr_t) args->data_ptr;
  230. remain = args->size;
  231. mutex_lock(&dev->struct_mutex);
  232. ret = i915_gem_object_get_pages(obj);
  233. if (ret != 0)
  234. goto fail_unlock;
  235. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  236. args->size);
  237. if (ret != 0)
  238. goto fail_put_pages;
  239. obj_priv = obj->driver_private;
  240. offset = args->offset;
  241. while (remain > 0) {
  242. /* Operation in this page
  243. *
  244. * page_base = page offset within aperture
  245. * page_offset = offset within page
  246. * page_length = bytes to copy for this page
  247. */
  248. page_base = (offset & ~(PAGE_SIZE-1));
  249. page_offset = offset & (PAGE_SIZE-1);
  250. page_length = remain;
  251. if ((page_offset + remain) > PAGE_SIZE)
  252. page_length = PAGE_SIZE - page_offset;
  253. ret = fast_shmem_read(obj_priv->pages,
  254. page_base, page_offset,
  255. user_data, page_length);
  256. if (ret)
  257. goto fail_put_pages;
  258. remain -= page_length;
  259. user_data += page_length;
  260. offset += page_length;
  261. }
  262. fail_put_pages:
  263. i915_gem_object_put_pages(obj);
  264. fail_unlock:
  265. mutex_unlock(&dev->struct_mutex);
  266. return ret;
  267. }
  268. static inline gfp_t
  269. i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
  270. {
  271. return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
  272. }
  273. static inline void
  274. i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
  275. {
  276. mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
  277. }
  278. static int
  279. i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  280. {
  281. int ret;
  282. ret = i915_gem_object_get_pages(obj);
  283. /* If we've insufficient memory to map in the pages, attempt
  284. * to make some space by throwing out some old buffers.
  285. */
  286. if (ret == -ENOMEM) {
  287. struct drm_device *dev = obj->dev;
  288. gfp_t gfp;
  289. ret = i915_gem_evict_something(dev, obj->size);
  290. if (ret)
  291. return ret;
  292. gfp = i915_gem_object_get_page_gfp_mask(obj);
  293. i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
  294. ret = i915_gem_object_get_pages(obj);
  295. i915_gem_object_set_page_gfp_mask (obj, gfp);
  296. }
  297. return ret;
  298. }
  299. /**
  300. * This is the fallback shmem pread path, which allocates temporary storage
  301. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  302. * can copy out of the object's backing pages while holding the struct mutex
  303. * and not take page faults.
  304. */
  305. static int
  306. i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
  307. struct drm_i915_gem_pread *args,
  308. struct drm_file *file_priv)
  309. {
  310. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  311. struct mm_struct *mm = current->mm;
  312. struct page **user_pages;
  313. ssize_t remain;
  314. loff_t offset, pinned_pages, i;
  315. loff_t first_data_page, last_data_page, num_pages;
  316. int shmem_page_index, shmem_page_offset;
  317. int data_page_index, data_page_offset;
  318. int page_length;
  319. int ret;
  320. uint64_t data_ptr = args->data_ptr;
  321. int do_bit17_swizzling;
  322. remain = args->size;
  323. /* Pin the user pages containing the data. We can't fault while
  324. * holding the struct mutex, yet we want to hold it while
  325. * dereferencing the user data.
  326. */
  327. first_data_page = data_ptr / PAGE_SIZE;
  328. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  329. num_pages = last_data_page - first_data_page + 1;
  330. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  331. if (user_pages == NULL)
  332. return -ENOMEM;
  333. down_read(&mm->mmap_sem);
  334. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  335. num_pages, 1, 0, user_pages, NULL);
  336. up_read(&mm->mmap_sem);
  337. if (pinned_pages < num_pages) {
  338. ret = -EFAULT;
  339. goto fail_put_user_pages;
  340. }
  341. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  342. mutex_lock(&dev->struct_mutex);
  343. ret = i915_gem_object_get_pages_or_evict(obj);
  344. if (ret)
  345. goto fail_unlock;
  346. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  347. args->size);
  348. if (ret != 0)
  349. goto fail_put_pages;
  350. obj_priv = obj->driver_private;
  351. offset = args->offset;
  352. while (remain > 0) {
  353. /* Operation in this page
  354. *
  355. * shmem_page_index = page number within shmem file
  356. * shmem_page_offset = offset within page in shmem file
  357. * data_page_index = page number in get_user_pages return
  358. * data_page_offset = offset with data_page_index page.
  359. * page_length = bytes to copy for this page
  360. */
  361. shmem_page_index = offset / PAGE_SIZE;
  362. shmem_page_offset = offset & ~PAGE_MASK;
  363. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  364. data_page_offset = data_ptr & ~PAGE_MASK;
  365. page_length = remain;
  366. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  367. page_length = PAGE_SIZE - shmem_page_offset;
  368. if ((data_page_offset + page_length) > PAGE_SIZE)
  369. page_length = PAGE_SIZE - data_page_offset;
  370. if (do_bit17_swizzling) {
  371. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  372. shmem_page_offset,
  373. user_pages[data_page_index],
  374. data_page_offset,
  375. page_length,
  376. 1);
  377. } else {
  378. ret = slow_shmem_copy(user_pages[data_page_index],
  379. data_page_offset,
  380. obj_priv->pages[shmem_page_index],
  381. shmem_page_offset,
  382. page_length);
  383. }
  384. if (ret)
  385. goto fail_put_pages;
  386. remain -= page_length;
  387. data_ptr += page_length;
  388. offset += page_length;
  389. }
  390. fail_put_pages:
  391. i915_gem_object_put_pages(obj);
  392. fail_unlock:
  393. mutex_unlock(&dev->struct_mutex);
  394. fail_put_user_pages:
  395. for (i = 0; i < pinned_pages; i++) {
  396. SetPageDirty(user_pages[i]);
  397. page_cache_release(user_pages[i]);
  398. }
  399. drm_free_large(user_pages);
  400. return ret;
  401. }
  402. /**
  403. * Reads data from the object referenced by handle.
  404. *
  405. * On error, the contents of *data are undefined.
  406. */
  407. int
  408. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  409. struct drm_file *file_priv)
  410. {
  411. struct drm_i915_gem_pread *args = data;
  412. struct drm_gem_object *obj;
  413. struct drm_i915_gem_object *obj_priv;
  414. int ret;
  415. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  416. if (obj == NULL)
  417. return -EBADF;
  418. obj_priv = obj->driver_private;
  419. /* Bounds check source.
  420. *
  421. * XXX: This could use review for overflow issues...
  422. */
  423. if (args->offset > obj->size || args->size > obj->size ||
  424. args->offset + args->size > obj->size) {
  425. drm_gem_object_unreference(obj);
  426. return -EINVAL;
  427. }
  428. if (i915_gem_object_needs_bit17_swizzle(obj)) {
  429. ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  430. } else {
  431. ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
  432. if (ret != 0)
  433. ret = i915_gem_shmem_pread_slow(dev, obj, args,
  434. file_priv);
  435. }
  436. drm_gem_object_unreference(obj);
  437. return ret;
  438. }
  439. /* This is the fast write path which cannot handle
  440. * page faults in the source data
  441. */
  442. static inline int
  443. fast_user_write(struct io_mapping *mapping,
  444. loff_t page_base, int page_offset,
  445. char __user *user_data,
  446. int length)
  447. {
  448. char *vaddr_atomic;
  449. unsigned long unwritten;
  450. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  451. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  452. user_data, length);
  453. io_mapping_unmap_atomic(vaddr_atomic);
  454. if (unwritten)
  455. return -EFAULT;
  456. return 0;
  457. }
  458. /* Here's the write path which can sleep for
  459. * page faults
  460. */
  461. static inline int
  462. slow_kernel_write(struct io_mapping *mapping,
  463. loff_t gtt_base, int gtt_offset,
  464. struct page *user_page, int user_offset,
  465. int length)
  466. {
  467. char *src_vaddr, *dst_vaddr;
  468. unsigned long unwritten;
  469. dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
  470. src_vaddr = kmap_atomic(user_page, KM_USER1);
  471. unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
  472. src_vaddr + user_offset,
  473. length);
  474. kunmap_atomic(src_vaddr, KM_USER1);
  475. io_mapping_unmap_atomic(dst_vaddr);
  476. if (unwritten)
  477. return -EFAULT;
  478. return 0;
  479. }
  480. static inline int
  481. fast_shmem_write(struct page **pages,
  482. loff_t page_base, int page_offset,
  483. char __user *data,
  484. int length)
  485. {
  486. char __iomem *vaddr;
  487. unsigned long unwritten;
  488. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  489. if (vaddr == NULL)
  490. return -ENOMEM;
  491. unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
  492. kunmap_atomic(vaddr, KM_USER0);
  493. if (unwritten)
  494. return -EFAULT;
  495. return 0;
  496. }
  497. /**
  498. * This is the fast pwrite path, where we copy the data directly from the
  499. * user into the GTT, uncached.
  500. */
  501. static int
  502. i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  503. struct drm_i915_gem_pwrite *args,
  504. struct drm_file *file_priv)
  505. {
  506. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  507. drm_i915_private_t *dev_priv = dev->dev_private;
  508. ssize_t remain;
  509. loff_t offset, page_base;
  510. char __user *user_data;
  511. int page_offset, page_length;
  512. int ret;
  513. user_data = (char __user *) (uintptr_t) args->data_ptr;
  514. remain = args->size;
  515. if (!access_ok(VERIFY_READ, user_data, remain))
  516. return -EFAULT;
  517. mutex_lock(&dev->struct_mutex);
  518. ret = i915_gem_object_pin(obj, 0);
  519. if (ret) {
  520. mutex_unlock(&dev->struct_mutex);
  521. return ret;
  522. }
  523. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  524. if (ret)
  525. goto fail;
  526. obj_priv = obj->driver_private;
  527. offset = obj_priv->gtt_offset + args->offset;
  528. while (remain > 0) {
  529. /* Operation in this page
  530. *
  531. * page_base = page offset within aperture
  532. * page_offset = offset within page
  533. * page_length = bytes to copy for this page
  534. */
  535. page_base = (offset & ~(PAGE_SIZE-1));
  536. page_offset = offset & (PAGE_SIZE-1);
  537. page_length = remain;
  538. if ((page_offset + remain) > PAGE_SIZE)
  539. page_length = PAGE_SIZE - page_offset;
  540. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  541. page_offset, user_data, page_length);
  542. /* If we get a fault while copying data, then (presumably) our
  543. * source page isn't available. Return the error and we'll
  544. * retry in the slow path.
  545. */
  546. if (ret)
  547. goto fail;
  548. remain -= page_length;
  549. user_data += page_length;
  550. offset += page_length;
  551. }
  552. fail:
  553. i915_gem_object_unpin(obj);
  554. mutex_unlock(&dev->struct_mutex);
  555. return ret;
  556. }
  557. /**
  558. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  559. * the memory and maps it using kmap_atomic for copying.
  560. *
  561. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  562. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  563. */
  564. static int
  565. i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  566. struct drm_i915_gem_pwrite *args,
  567. struct drm_file *file_priv)
  568. {
  569. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  570. drm_i915_private_t *dev_priv = dev->dev_private;
  571. ssize_t remain;
  572. loff_t gtt_page_base, offset;
  573. loff_t first_data_page, last_data_page, num_pages;
  574. loff_t pinned_pages, i;
  575. struct page **user_pages;
  576. struct mm_struct *mm = current->mm;
  577. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  578. int ret;
  579. uint64_t data_ptr = args->data_ptr;
  580. remain = args->size;
  581. /* Pin the user pages containing the data. We can't fault while
  582. * holding the struct mutex, and all of the pwrite implementations
  583. * want to hold it while dereferencing the user data.
  584. */
  585. first_data_page = data_ptr / PAGE_SIZE;
  586. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  587. num_pages = last_data_page - first_data_page + 1;
  588. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  589. if (user_pages == NULL)
  590. return -ENOMEM;
  591. down_read(&mm->mmap_sem);
  592. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  593. num_pages, 0, 0, user_pages, NULL);
  594. up_read(&mm->mmap_sem);
  595. if (pinned_pages < num_pages) {
  596. ret = -EFAULT;
  597. goto out_unpin_pages;
  598. }
  599. mutex_lock(&dev->struct_mutex);
  600. ret = i915_gem_object_pin(obj, 0);
  601. if (ret)
  602. goto out_unlock;
  603. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  604. if (ret)
  605. goto out_unpin_object;
  606. obj_priv = obj->driver_private;
  607. offset = obj_priv->gtt_offset + args->offset;
  608. while (remain > 0) {
  609. /* Operation in this page
  610. *
  611. * gtt_page_base = page offset within aperture
  612. * gtt_page_offset = offset within page in aperture
  613. * data_page_index = page number in get_user_pages return
  614. * data_page_offset = offset with data_page_index page.
  615. * page_length = bytes to copy for this page
  616. */
  617. gtt_page_base = offset & PAGE_MASK;
  618. gtt_page_offset = offset & ~PAGE_MASK;
  619. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  620. data_page_offset = data_ptr & ~PAGE_MASK;
  621. page_length = remain;
  622. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  623. page_length = PAGE_SIZE - gtt_page_offset;
  624. if ((data_page_offset + page_length) > PAGE_SIZE)
  625. page_length = PAGE_SIZE - data_page_offset;
  626. ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
  627. gtt_page_base, gtt_page_offset,
  628. user_pages[data_page_index],
  629. data_page_offset,
  630. page_length);
  631. /* If we get a fault while copying data, then (presumably) our
  632. * source page isn't available. Return the error and we'll
  633. * retry in the slow path.
  634. */
  635. if (ret)
  636. goto out_unpin_object;
  637. remain -= page_length;
  638. offset += page_length;
  639. data_ptr += page_length;
  640. }
  641. out_unpin_object:
  642. i915_gem_object_unpin(obj);
  643. out_unlock:
  644. mutex_unlock(&dev->struct_mutex);
  645. out_unpin_pages:
  646. for (i = 0; i < pinned_pages; i++)
  647. page_cache_release(user_pages[i]);
  648. drm_free_large(user_pages);
  649. return ret;
  650. }
  651. /**
  652. * This is the fast shmem pwrite path, which attempts to directly
  653. * copy_from_user into the kmapped pages backing the object.
  654. */
  655. static int
  656. i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  657. struct drm_i915_gem_pwrite *args,
  658. struct drm_file *file_priv)
  659. {
  660. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  661. ssize_t remain;
  662. loff_t offset, page_base;
  663. char __user *user_data;
  664. int page_offset, page_length;
  665. int ret;
  666. user_data = (char __user *) (uintptr_t) args->data_ptr;
  667. remain = args->size;
  668. mutex_lock(&dev->struct_mutex);
  669. ret = i915_gem_object_get_pages(obj);
  670. if (ret != 0)
  671. goto fail_unlock;
  672. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  673. if (ret != 0)
  674. goto fail_put_pages;
  675. obj_priv = obj->driver_private;
  676. offset = args->offset;
  677. obj_priv->dirty = 1;
  678. while (remain > 0) {
  679. /* Operation in this page
  680. *
  681. * page_base = page offset within aperture
  682. * page_offset = offset within page
  683. * page_length = bytes to copy for this page
  684. */
  685. page_base = (offset & ~(PAGE_SIZE-1));
  686. page_offset = offset & (PAGE_SIZE-1);
  687. page_length = remain;
  688. if ((page_offset + remain) > PAGE_SIZE)
  689. page_length = PAGE_SIZE - page_offset;
  690. ret = fast_shmem_write(obj_priv->pages,
  691. page_base, page_offset,
  692. user_data, page_length);
  693. if (ret)
  694. goto fail_put_pages;
  695. remain -= page_length;
  696. user_data += page_length;
  697. offset += page_length;
  698. }
  699. fail_put_pages:
  700. i915_gem_object_put_pages(obj);
  701. fail_unlock:
  702. mutex_unlock(&dev->struct_mutex);
  703. return ret;
  704. }
  705. /**
  706. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  707. * the memory and maps it using kmap_atomic for copying.
  708. *
  709. * This avoids taking mmap_sem for faulting on the user's address while the
  710. * struct_mutex is held.
  711. */
  712. static int
  713. i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  714. struct drm_i915_gem_pwrite *args,
  715. struct drm_file *file_priv)
  716. {
  717. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  718. struct mm_struct *mm = current->mm;
  719. struct page **user_pages;
  720. ssize_t remain;
  721. loff_t offset, pinned_pages, i;
  722. loff_t first_data_page, last_data_page, num_pages;
  723. int shmem_page_index, shmem_page_offset;
  724. int data_page_index, data_page_offset;
  725. int page_length;
  726. int ret;
  727. uint64_t data_ptr = args->data_ptr;
  728. int do_bit17_swizzling;
  729. remain = args->size;
  730. /* Pin the user pages containing the data. We can't fault while
  731. * holding the struct mutex, and all of the pwrite implementations
  732. * want to hold it while dereferencing the user data.
  733. */
  734. first_data_page = data_ptr / PAGE_SIZE;
  735. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  736. num_pages = last_data_page - first_data_page + 1;
  737. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  738. if (user_pages == NULL)
  739. return -ENOMEM;
  740. down_read(&mm->mmap_sem);
  741. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  742. num_pages, 0, 0, user_pages, NULL);
  743. up_read(&mm->mmap_sem);
  744. if (pinned_pages < num_pages) {
  745. ret = -EFAULT;
  746. goto fail_put_user_pages;
  747. }
  748. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  749. mutex_lock(&dev->struct_mutex);
  750. ret = i915_gem_object_get_pages_or_evict(obj);
  751. if (ret)
  752. goto fail_unlock;
  753. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  754. if (ret != 0)
  755. goto fail_put_pages;
  756. obj_priv = obj->driver_private;
  757. offset = args->offset;
  758. obj_priv->dirty = 1;
  759. while (remain > 0) {
  760. /* Operation in this page
  761. *
  762. * shmem_page_index = page number within shmem file
  763. * shmem_page_offset = offset within page in shmem file
  764. * data_page_index = page number in get_user_pages return
  765. * data_page_offset = offset with data_page_index page.
  766. * page_length = bytes to copy for this page
  767. */
  768. shmem_page_index = offset / PAGE_SIZE;
  769. shmem_page_offset = offset & ~PAGE_MASK;
  770. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  771. data_page_offset = data_ptr & ~PAGE_MASK;
  772. page_length = remain;
  773. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  774. page_length = PAGE_SIZE - shmem_page_offset;
  775. if ((data_page_offset + page_length) > PAGE_SIZE)
  776. page_length = PAGE_SIZE - data_page_offset;
  777. if (do_bit17_swizzling) {
  778. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  779. shmem_page_offset,
  780. user_pages[data_page_index],
  781. data_page_offset,
  782. page_length,
  783. 0);
  784. } else {
  785. ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
  786. shmem_page_offset,
  787. user_pages[data_page_index],
  788. data_page_offset,
  789. page_length);
  790. }
  791. if (ret)
  792. goto fail_put_pages;
  793. remain -= page_length;
  794. data_ptr += page_length;
  795. offset += page_length;
  796. }
  797. fail_put_pages:
  798. i915_gem_object_put_pages(obj);
  799. fail_unlock:
  800. mutex_unlock(&dev->struct_mutex);
  801. fail_put_user_pages:
  802. for (i = 0; i < pinned_pages; i++)
  803. page_cache_release(user_pages[i]);
  804. drm_free_large(user_pages);
  805. return ret;
  806. }
  807. /**
  808. * Writes data to the object referenced by handle.
  809. *
  810. * On error, the contents of the buffer that were to be modified are undefined.
  811. */
  812. int
  813. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  814. struct drm_file *file_priv)
  815. {
  816. struct drm_i915_gem_pwrite *args = data;
  817. struct drm_gem_object *obj;
  818. struct drm_i915_gem_object *obj_priv;
  819. int ret = 0;
  820. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  821. if (obj == NULL)
  822. return -EBADF;
  823. obj_priv = obj->driver_private;
  824. /* Bounds check destination.
  825. *
  826. * XXX: This could use review for overflow issues...
  827. */
  828. if (args->offset > obj->size || args->size > obj->size ||
  829. args->offset + args->size > obj->size) {
  830. drm_gem_object_unreference(obj);
  831. return -EINVAL;
  832. }
  833. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  834. * it would end up going through the fenced access, and we'll get
  835. * different detiling behavior between reading and writing.
  836. * pread/pwrite currently are reading and writing from the CPU
  837. * perspective, requiring manual detiling by the client.
  838. */
  839. if (obj_priv->phys_obj)
  840. ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
  841. else if (obj_priv->tiling_mode == I915_TILING_NONE &&
  842. dev->gtt_total != 0) {
  843. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
  844. if (ret == -EFAULT) {
  845. ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
  846. file_priv);
  847. }
  848. } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
  849. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
  850. } else {
  851. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
  852. if (ret == -EFAULT) {
  853. ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
  854. file_priv);
  855. }
  856. }
  857. #if WATCH_PWRITE
  858. if (ret)
  859. DRM_INFO("pwrite failed %d\n", ret);
  860. #endif
  861. drm_gem_object_unreference(obj);
  862. return ret;
  863. }
  864. /**
  865. * Called when user space prepares to use an object with the CPU, either
  866. * through the mmap ioctl's mapping or a GTT mapping.
  867. */
  868. int
  869. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  870. struct drm_file *file_priv)
  871. {
  872. struct drm_i915_private *dev_priv = dev->dev_private;
  873. struct drm_i915_gem_set_domain *args = data;
  874. struct drm_gem_object *obj;
  875. struct drm_i915_gem_object *obj_priv;
  876. uint32_t read_domains = args->read_domains;
  877. uint32_t write_domain = args->write_domain;
  878. int ret;
  879. if (!(dev->driver->driver_features & DRIVER_GEM))
  880. return -ENODEV;
  881. /* Only handle setting domains to types used by the CPU. */
  882. if (write_domain & I915_GEM_GPU_DOMAINS)
  883. return -EINVAL;
  884. if (read_domains & I915_GEM_GPU_DOMAINS)
  885. return -EINVAL;
  886. /* Having something in the write domain implies it's in the read
  887. * domain, and only that read domain. Enforce that in the request.
  888. */
  889. if (write_domain != 0 && read_domains != write_domain)
  890. return -EINVAL;
  891. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  892. if (obj == NULL)
  893. return -EBADF;
  894. obj_priv = obj->driver_private;
  895. mutex_lock(&dev->struct_mutex);
  896. intel_mark_busy(dev, obj);
  897. #if WATCH_BUF
  898. DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
  899. obj, obj->size, read_domains, write_domain);
  900. #endif
  901. if (read_domains & I915_GEM_DOMAIN_GTT) {
  902. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  903. /* Update the LRU on the fence for the CPU access that's
  904. * about to occur.
  905. */
  906. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  907. list_move_tail(&obj_priv->fence_list,
  908. &dev_priv->mm.fence_list);
  909. }
  910. /* Silently promote "you're not bound, there was nothing to do"
  911. * to success, since the client was just asking us to
  912. * make sure everything was done.
  913. */
  914. if (ret == -EINVAL)
  915. ret = 0;
  916. } else {
  917. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  918. }
  919. drm_gem_object_unreference(obj);
  920. mutex_unlock(&dev->struct_mutex);
  921. return ret;
  922. }
  923. /**
  924. * Called when user space has done writes to this buffer
  925. */
  926. int
  927. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  928. struct drm_file *file_priv)
  929. {
  930. struct drm_i915_gem_sw_finish *args = data;
  931. struct drm_gem_object *obj;
  932. struct drm_i915_gem_object *obj_priv;
  933. int ret = 0;
  934. if (!(dev->driver->driver_features & DRIVER_GEM))
  935. return -ENODEV;
  936. mutex_lock(&dev->struct_mutex);
  937. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  938. if (obj == NULL) {
  939. mutex_unlock(&dev->struct_mutex);
  940. return -EBADF;
  941. }
  942. #if WATCH_BUF
  943. DRM_INFO("%s: sw_finish %d (%p %zd)\n",
  944. __func__, args->handle, obj, obj->size);
  945. #endif
  946. obj_priv = obj->driver_private;
  947. /* Pinned buffers may be scanout, so flush the cache */
  948. if (obj_priv->pin_count)
  949. i915_gem_object_flush_cpu_write_domain(obj);
  950. drm_gem_object_unreference(obj);
  951. mutex_unlock(&dev->struct_mutex);
  952. return ret;
  953. }
  954. /**
  955. * Maps the contents of an object, returning the address it is mapped
  956. * into.
  957. *
  958. * While the mapping holds a reference on the contents of the object, it doesn't
  959. * imply a ref on the object itself.
  960. */
  961. int
  962. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  963. struct drm_file *file_priv)
  964. {
  965. struct drm_i915_gem_mmap *args = data;
  966. struct drm_gem_object *obj;
  967. loff_t offset;
  968. unsigned long addr;
  969. if (!(dev->driver->driver_features & DRIVER_GEM))
  970. return -ENODEV;
  971. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  972. if (obj == NULL)
  973. return -EBADF;
  974. offset = args->offset;
  975. down_write(&current->mm->mmap_sem);
  976. addr = do_mmap(obj->filp, 0, args->size,
  977. PROT_READ | PROT_WRITE, MAP_SHARED,
  978. args->offset);
  979. up_write(&current->mm->mmap_sem);
  980. mutex_lock(&dev->struct_mutex);
  981. drm_gem_object_unreference(obj);
  982. mutex_unlock(&dev->struct_mutex);
  983. if (IS_ERR((void *)addr))
  984. return addr;
  985. args->addr_ptr = (uint64_t) addr;
  986. return 0;
  987. }
  988. /**
  989. * i915_gem_fault - fault a page into the GTT
  990. * vma: VMA in question
  991. * vmf: fault info
  992. *
  993. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  994. * from userspace. The fault handler takes care of binding the object to
  995. * the GTT (if needed), allocating and programming a fence register (again,
  996. * only if needed based on whether the old reg is still valid or the object
  997. * is tiled) and inserting a new PTE into the faulting process.
  998. *
  999. * Note that the faulting process may involve evicting existing objects
  1000. * from the GTT and/or fence registers to make room. So performance may
  1001. * suffer if the GTT working set is large or there are few fence registers
  1002. * left.
  1003. */
  1004. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1005. {
  1006. struct drm_gem_object *obj = vma->vm_private_data;
  1007. struct drm_device *dev = obj->dev;
  1008. struct drm_i915_private *dev_priv = dev->dev_private;
  1009. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1010. pgoff_t page_offset;
  1011. unsigned long pfn;
  1012. int ret = 0;
  1013. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1014. /* We don't use vmf->pgoff since that has the fake offset */
  1015. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1016. PAGE_SHIFT;
  1017. /* Now bind it into the GTT if needed */
  1018. mutex_lock(&dev->struct_mutex);
  1019. if (!obj_priv->gtt_space) {
  1020. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1021. if (ret) {
  1022. mutex_unlock(&dev->struct_mutex);
  1023. return VM_FAULT_SIGBUS;
  1024. }
  1025. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1026. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1027. if (ret) {
  1028. mutex_unlock(&dev->struct_mutex);
  1029. return VM_FAULT_SIGBUS;
  1030. }
  1031. }
  1032. /* Need a new fence register? */
  1033. if (obj_priv->tiling_mode != I915_TILING_NONE) {
  1034. ret = i915_gem_object_get_fence_reg(obj);
  1035. if (ret) {
  1036. mutex_unlock(&dev->struct_mutex);
  1037. return VM_FAULT_SIGBUS;
  1038. }
  1039. }
  1040. pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
  1041. page_offset;
  1042. /* Finally, remap it using the new GTT offset */
  1043. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1044. mutex_unlock(&dev->struct_mutex);
  1045. switch (ret) {
  1046. case -ENOMEM:
  1047. case -EAGAIN:
  1048. return VM_FAULT_OOM;
  1049. case -EFAULT:
  1050. case -EINVAL:
  1051. return VM_FAULT_SIGBUS;
  1052. default:
  1053. return VM_FAULT_NOPAGE;
  1054. }
  1055. }
  1056. /**
  1057. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1058. * @obj: obj in question
  1059. *
  1060. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1061. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1062. * up the object based on the offset and sets up the various memory mapping
  1063. * structures.
  1064. *
  1065. * This routine allocates and attaches a fake offset for @obj.
  1066. */
  1067. static int
  1068. i915_gem_create_mmap_offset(struct drm_gem_object *obj)
  1069. {
  1070. struct drm_device *dev = obj->dev;
  1071. struct drm_gem_mm *mm = dev->mm_private;
  1072. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1073. struct drm_map_list *list;
  1074. struct drm_local_map *map;
  1075. int ret = 0;
  1076. /* Set the object up for mmap'ing */
  1077. list = &obj->map_list;
  1078. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1079. if (!list->map)
  1080. return -ENOMEM;
  1081. map = list->map;
  1082. map->type = _DRM_GEM;
  1083. map->size = obj->size;
  1084. map->handle = obj;
  1085. /* Get a DRM GEM mmap offset allocated... */
  1086. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1087. obj->size / PAGE_SIZE, 0, 0);
  1088. if (!list->file_offset_node) {
  1089. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  1090. ret = -ENOMEM;
  1091. goto out_free_list;
  1092. }
  1093. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1094. obj->size / PAGE_SIZE, 0);
  1095. if (!list->file_offset_node) {
  1096. ret = -ENOMEM;
  1097. goto out_free_list;
  1098. }
  1099. list->hash.key = list->file_offset_node->start;
  1100. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  1101. DRM_ERROR("failed to add to map hash\n");
  1102. goto out_free_mm;
  1103. }
  1104. /* By now we should be all set, any drm_mmap request on the offset
  1105. * below will get to our mmap & fault handler */
  1106. obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  1107. return 0;
  1108. out_free_mm:
  1109. drm_mm_put_block(list->file_offset_node);
  1110. out_free_list:
  1111. kfree(list->map);
  1112. return ret;
  1113. }
  1114. /**
  1115. * i915_gem_release_mmap - remove physical page mappings
  1116. * @obj: obj in question
  1117. *
  1118. * Preserve the reservation of the mmaping with the DRM core code, but
  1119. * relinquish ownership of the pages back to the system.
  1120. *
  1121. * It is vital that we remove the page mapping if we have mapped a tiled
  1122. * object through the GTT and then lose the fence register due to
  1123. * resource pressure. Similarly if the object has been moved out of the
  1124. * aperture, than pages mapped into userspace must be revoked. Removing the
  1125. * mapping will then trigger a page fault on the next user access, allowing
  1126. * fixup by i915_gem_fault().
  1127. */
  1128. void
  1129. i915_gem_release_mmap(struct drm_gem_object *obj)
  1130. {
  1131. struct drm_device *dev = obj->dev;
  1132. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1133. if (dev->dev_mapping)
  1134. unmap_mapping_range(dev->dev_mapping,
  1135. obj_priv->mmap_offset, obj->size, 1);
  1136. }
  1137. static void
  1138. i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  1139. {
  1140. struct drm_device *dev = obj->dev;
  1141. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1142. struct drm_gem_mm *mm = dev->mm_private;
  1143. struct drm_map_list *list;
  1144. list = &obj->map_list;
  1145. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1146. if (list->file_offset_node) {
  1147. drm_mm_put_block(list->file_offset_node);
  1148. list->file_offset_node = NULL;
  1149. }
  1150. if (list->map) {
  1151. kfree(list->map);
  1152. list->map = NULL;
  1153. }
  1154. obj_priv->mmap_offset = 0;
  1155. }
  1156. /**
  1157. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1158. * @obj: object to check
  1159. *
  1160. * Return the required GTT alignment for an object, taking into account
  1161. * potential fence register mapping if needed.
  1162. */
  1163. static uint32_t
  1164. i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
  1165. {
  1166. struct drm_device *dev = obj->dev;
  1167. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1168. int start, i;
  1169. /*
  1170. * Minimum alignment is 4k (GTT page size), but might be greater
  1171. * if a fence register is needed for the object.
  1172. */
  1173. if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
  1174. return 4096;
  1175. /*
  1176. * Previous chips need to be aligned to the size of the smallest
  1177. * fence register that can contain the object.
  1178. */
  1179. if (IS_I9XX(dev))
  1180. start = 1024*1024;
  1181. else
  1182. start = 512*1024;
  1183. for (i = start; i < obj->size; i <<= 1)
  1184. ;
  1185. return i;
  1186. }
  1187. /**
  1188. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1189. * @dev: DRM device
  1190. * @data: GTT mapping ioctl data
  1191. * @file_priv: GEM object info
  1192. *
  1193. * Simply returns the fake offset to userspace so it can mmap it.
  1194. * The mmap call will end up in drm_gem_mmap(), which will set things
  1195. * up so we can get faults in the handler above.
  1196. *
  1197. * The fault handler will take care of binding the object into the GTT
  1198. * (since it may have been evicted to make room for something), allocating
  1199. * a fence register, and mapping the appropriate aperture address into
  1200. * userspace.
  1201. */
  1202. int
  1203. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1204. struct drm_file *file_priv)
  1205. {
  1206. struct drm_i915_gem_mmap_gtt *args = data;
  1207. struct drm_i915_private *dev_priv = dev->dev_private;
  1208. struct drm_gem_object *obj;
  1209. struct drm_i915_gem_object *obj_priv;
  1210. int ret;
  1211. if (!(dev->driver->driver_features & DRIVER_GEM))
  1212. return -ENODEV;
  1213. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1214. if (obj == NULL)
  1215. return -EBADF;
  1216. mutex_lock(&dev->struct_mutex);
  1217. obj_priv = obj->driver_private;
  1218. if (!obj_priv->mmap_offset) {
  1219. ret = i915_gem_create_mmap_offset(obj);
  1220. if (ret) {
  1221. drm_gem_object_unreference(obj);
  1222. mutex_unlock(&dev->struct_mutex);
  1223. return ret;
  1224. }
  1225. }
  1226. args->offset = obj_priv->mmap_offset;
  1227. /*
  1228. * Pull it into the GTT so that we have a page list (makes the
  1229. * initial fault faster and any subsequent flushing possible).
  1230. */
  1231. if (!obj_priv->agp_mem) {
  1232. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1233. if (ret) {
  1234. drm_gem_object_unreference(obj);
  1235. mutex_unlock(&dev->struct_mutex);
  1236. return ret;
  1237. }
  1238. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1239. }
  1240. drm_gem_object_unreference(obj);
  1241. mutex_unlock(&dev->struct_mutex);
  1242. return 0;
  1243. }
  1244. void
  1245. i915_gem_object_put_pages(struct drm_gem_object *obj)
  1246. {
  1247. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1248. int page_count = obj->size / PAGE_SIZE;
  1249. int i;
  1250. BUG_ON(obj_priv->pages_refcount == 0);
  1251. if (--obj_priv->pages_refcount != 0)
  1252. return;
  1253. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1254. i915_gem_object_save_bit_17_swizzle(obj);
  1255. if (obj_priv->madv == I915_MADV_DONTNEED)
  1256. obj_priv->dirty = 0;
  1257. for (i = 0; i < page_count; i++) {
  1258. if (obj_priv->pages[i] == NULL)
  1259. break;
  1260. if (obj_priv->dirty)
  1261. set_page_dirty(obj_priv->pages[i]);
  1262. if (obj_priv->madv == I915_MADV_WILLNEED)
  1263. mark_page_accessed(obj_priv->pages[i]);
  1264. page_cache_release(obj_priv->pages[i]);
  1265. }
  1266. obj_priv->dirty = 0;
  1267. drm_free_large(obj_priv->pages);
  1268. obj_priv->pages = NULL;
  1269. }
  1270. static void
  1271. i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
  1272. {
  1273. struct drm_device *dev = obj->dev;
  1274. drm_i915_private_t *dev_priv = dev->dev_private;
  1275. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1276. /* Add a reference if we're newly entering the active list. */
  1277. if (!obj_priv->active) {
  1278. drm_gem_object_reference(obj);
  1279. obj_priv->active = 1;
  1280. }
  1281. /* Move from whatever list we were on to the tail of execution. */
  1282. spin_lock(&dev_priv->mm.active_list_lock);
  1283. list_move_tail(&obj_priv->list,
  1284. &dev_priv->mm.active_list);
  1285. spin_unlock(&dev_priv->mm.active_list_lock);
  1286. obj_priv->last_rendering_seqno = seqno;
  1287. }
  1288. static void
  1289. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  1290. {
  1291. struct drm_device *dev = obj->dev;
  1292. drm_i915_private_t *dev_priv = dev->dev_private;
  1293. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1294. BUG_ON(!obj_priv->active);
  1295. list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
  1296. obj_priv->last_rendering_seqno = 0;
  1297. }
  1298. static void
  1299. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  1300. {
  1301. struct drm_device *dev = obj->dev;
  1302. drm_i915_private_t *dev_priv = dev->dev_private;
  1303. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1304. i915_verify_inactive(dev, __FILE__, __LINE__);
  1305. if (obj_priv->pin_count != 0)
  1306. list_del_init(&obj_priv->list);
  1307. else
  1308. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1309. obj_priv->last_rendering_seqno = 0;
  1310. if (obj_priv->active) {
  1311. obj_priv->active = 0;
  1312. drm_gem_object_unreference(obj);
  1313. }
  1314. i915_verify_inactive(dev, __FILE__, __LINE__);
  1315. }
  1316. /**
  1317. * Creates a new sequence number, emitting a write of it to the status page
  1318. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  1319. *
  1320. * Must be called with struct_lock held.
  1321. *
  1322. * Returned sequence numbers are nonzero on success.
  1323. */
  1324. static uint32_t
  1325. i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  1326. uint32_t flush_domains)
  1327. {
  1328. drm_i915_private_t *dev_priv = dev->dev_private;
  1329. struct drm_i915_file_private *i915_file_priv = NULL;
  1330. struct drm_i915_gem_request *request;
  1331. uint32_t seqno;
  1332. int was_empty;
  1333. RING_LOCALS;
  1334. if (file_priv != NULL)
  1335. i915_file_priv = file_priv->driver_priv;
  1336. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1337. if (request == NULL)
  1338. return 0;
  1339. /* Grab the seqno we're going to make this request be, and bump the
  1340. * next (skipping 0 so it can be the reserved no-seqno value).
  1341. */
  1342. seqno = dev_priv->mm.next_gem_seqno;
  1343. dev_priv->mm.next_gem_seqno++;
  1344. if (dev_priv->mm.next_gem_seqno == 0)
  1345. dev_priv->mm.next_gem_seqno++;
  1346. BEGIN_LP_RING(4);
  1347. OUT_RING(MI_STORE_DWORD_INDEX);
  1348. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1349. OUT_RING(seqno);
  1350. OUT_RING(MI_USER_INTERRUPT);
  1351. ADVANCE_LP_RING();
  1352. DRM_DEBUG("%d\n", seqno);
  1353. request->seqno = seqno;
  1354. request->emitted_jiffies = jiffies;
  1355. was_empty = list_empty(&dev_priv->mm.request_list);
  1356. list_add_tail(&request->list, &dev_priv->mm.request_list);
  1357. if (i915_file_priv) {
  1358. list_add_tail(&request->client_list,
  1359. &i915_file_priv->mm.request_list);
  1360. } else {
  1361. INIT_LIST_HEAD(&request->client_list);
  1362. }
  1363. /* Associate any objects on the flushing list matching the write
  1364. * domain we're flushing with our flush.
  1365. */
  1366. if (flush_domains != 0) {
  1367. struct drm_i915_gem_object *obj_priv, *next;
  1368. list_for_each_entry_safe(obj_priv, next,
  1369. &dev_priv->mm.flushing_list, list) {
  1370. struct drm_gem_object *obj = obj_priv->obj;
  1371. if ((obj->write_domain & flush_domains) ==
  1372. obj->write_domain) {
  1373. uint32_t old_write_domain = obj->write_domain;
  1374. obj->write_domain = 0;
  1375. i915_gem_object_move_to_active(obj, seqno);
  1376. trace_i915_gem_object_change_domain(obj,
  1377. obj->read_domains,
  1378. old_write_domain);
  1379. }
  1380. }
  1381. }
  1382. if (!dev_priv->mm.suspended) {
  1383. mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
  1384. if (was_empty)
  1385. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1386. }
  1387. return seqno;
  1388. }
  1389. /**
  1390. * Command execution barrier
  1391. *
  1392. * Ensures that all commands in the ring are finished
  1393. * before signalling the CPU
  1394. */
  1395. static uint32_t
  1396. i915_retire_commands(struct drm_device *dev)
  1397. {
  1398. drm_i915_private_t *dev_priv = dev->dev_private;
  1399. uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1400. uint32_t flush_domains = 0;
  1401. RING_LOCALS;
  1402. /* The sampler always gets flushed on i965 (sigh) */
  1403. if (IS_I965G(dev))
  1404. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1405. BEGIN_LP_RING(2);
  1406. OUT_RING(cmd);
  1407. OUT_RING(0); /* noop */
  1408. ADVANCE_LP_RING();
  1409. return flush_domains;
  1410. }
  1411. /**
  1412. * Moves buffers associated only with the given active seqno from the active
  1413. * to inactive list, potentially freeing them.
  1414. */
  1415. static void
  1416. i915_gem_retire_request(struct drm_device *dev,
  1417. struct drm_i915_gem_request *request)
  1418. {
  1419. drm_i915_private_t *dev_priv = dev->dev_private;
  1420. trace_i915_gem_request_retire(dev, request->seqno);
  1421. /* Move any buffers on the active list that are no longer referenced
  1422. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1423. */
  1424. spin_lock(&dev_priv->mm.active_list_lock);
  1425. while (!list_empty(&dev_priv->mm.active_list)) {
  1426. struct drm_gem_object *obj;
  1427. struct drm_i915_gem_object *obj_priv;
  1428. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  1429. struct drm_i915_gem_object,
  1430. list);
  1431. obj = obj_priv->obj;
  1432. /* If the seqno being retired doesn't match the oldest in the
  1433. * list, then the oldest in the list must still be newer than
  1434. * this seqno.
  1435. */
  1436. if (obj_priv->last_rendering_seqno != request->seqno)
  1437. goto out;
  1438. #if WATCH_LRU
  1439. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  1440. __func__, request->seqno, obj);
  1441. #endif
  1442. if (obj->write_domain != 0)
  1443. i915_gem_object_move_to_flushing(obj);
  1444. else {
  1445. /* Take a reference on the object so it won't be
  1446. * freed while the spinlock is held. The list
  1447. * protection for this spinlock is safe when breaking
  1448. * the lock like this since the next thing we do
  1449. * is just get the head of the list again.
  1450. */
  1451. drm_gem_object_reference(obj);
  1452. i915_gem_object_move_to_inactive(obj);
  1453. spin_unlock(&dev_priv->mm.active_list_lock);
  1454. drm_gem_object_unreference(obj);
  1455. spin_lock(&dev_priv->mm.active_list_lock);
  1456. }
  1457. }
  1458. out:
  1459. spin_unlock(&dev_priv->mm.active_list_lock);
  1460. }
  1461. /**
  1462. * Returns true if seq1 is later than seq2.
  1463. */
  1464. bool
  1465. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1466. {
  1467. return (int32_t)(seq1 - seq2) >= 0;
  1468. }
  1469. uint32_t
  1470. i915_get_gem_seqno(struct drm_device *dev)
  1471. {
  1472. drm_i915_private_t *dev_priv = dev->dev_private;
  1473. return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
  1474. }
  1475. /**
  1476. * This function clears the request list as sequence numbers are passed.
  1477. */
  1478. void
  1479. i915_gem_retire_requests(struct drm_device *dev)
  1480. {
  1481. drm_i915_private_t *dev_priv = dev->dev_private;
  1482. uint32_t seqno;
  1483. if (!dev_priv->hw_status_page)
  1484. return;
  1485. seqno = i915_get_gem_seqno(dev);
  1486. while (!list_empty(&dev_priv->mm.request_list)) {
  1487. struct drm_i915_gem_request *request;
  1488. uint32_t retiring_seqno;
  1489. request = list_first_entry(&dev_priv->mm.request_list,
  1490. struct drm_i915_gem_request,
  1491. list);
  1492. retiring_seqno = request->seqno;
  1493. if (i915_seqno_passed(seqno, retiring_seqno) ||
  1494. atomic_read(&dev_priv->mm.wedged)) {
  1495. i915_gem_retire_request(dev, request);
  1496. list_del(&request->list);
  1497. list_del(&request->client_list);
  1498. kfree(request);
  1499. } else
  1500. break;
  1501. }
  1502. }
  1503. void
  1504. i915_gem_retire_work_handler(struct work_struct *work)
  1505. {
  1506. drm_i915_private_t *dev_priv;
  1507. struct drm_device *dev;
  1508. dev_priv = container_of(work, drm_i915_private_t,
  1509. mm.retire_work.work);
  1510. dev = dev_priv->dev;
  1511. mutex_lock(&dev->struct_mutex);
  1512. i915_gem_retire_requests(dev);
  1513. if (!dev_priv->mm.suspended &&
  1514. !list_empty(&dev_priv->mm.request_list))
  1515. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1516. mutex_unlock(&dev->struct_mutex);
  1517. }
  1518. /**
  1519. * Waits for a sequence number to be signaled, and cleans up the
  1520. * request and object lists appropriately for that event.
  1521. */
  1522. static int
  1523. i915_wait_request(struct drm_device *dev, uint32_t seqno)
  1524. {
  1525. drm_i915_private_t *dev_priv = dev->dev_private;
  1526. u32 ier;
  1527. int ret = 0;
  1528. BUG_ON(seqno == 0);
  1529. if (atomic_read(&dev_priv->mm.wedged))
  1530. return -EIO;
  1531. if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
  1532. if (IS_IGDNG(dev))
  1533. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1534. else
  1535. ier = I915_READ(IER);
  1536. if (!ier) {
  1537. DRM_ERROR("something (likely vbetool) disabled "
  1538. "interrupts, re-enabling\n");
  1539. i915_driver_irq_preinstall(dev);
  1540. i915_driver_irq_postinstall(dev);
  1541. }
  1542. trace_i915_gem_request_wait_begin(dev, seqno);
  1543. dev_priv->mm.waiting_gem_seqno = seqno;
  1544. i915_user_irq_get(dev);
  1545. ret = wait_event_interruptible(dev_priv->irq_queue,
  1546. i915_seqno_passed(i915_get_gem_seqno(dev),
  1547. seqno) ||
  1548. atomic_read(&dev_priv->mm.wedged));
  1549. i915_user_irq_put(dev);
  1550. dev_priv->mm.waiting_gem_seqno = 0;
  1551. trace_i915_gem_request_wait_end(dev, seqno);
  1552. }
  1553. if (atomic_read(&dev_priv->mm.wedged))
  1554. ret = -EIO;
  1555. if (ret && ret != -ERESTARTSYS)
  1556. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  1557. __func__, ret, seqno, i915_get_gem_seqno(dev));
  1558. /* Directly dispatch request retiring. While we have the work queue
  1559. * to handle this, the waiter on a request often wants an associated
  1560. * buffer to have made it to the inactive list, and we would need
  1561. * a separate wait queue to handle that.
  1562. */
  1563. if (ret == 0)
  1564. i915_gem_retire_requests(dev);
  1565. return ret;
  1566. }
  1567. static void
  1568. i915_gem_flush(struct drm_device *dev,
  1569. uint32_t invalidate_domains,
  1570. uint32_t flush_domains)
  1571. {
  1572. drm_i915_private_t *dev_priv = dev->dev_private;
  1573. uint32_t cmd;
  1574. RING_LOCALS;
  1575. #if WATCH_EXEC
  1576. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  1577. invalidate_domains, flush_domains);
  1578. #endif
  1579. trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
  1580. invalidate_domains, flush_domains);
  1581. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1582. drm_agp_chipset_flush(dev);
  1583. if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
  1584. /*
  1585. * read/write caches:
  1586. *
  1587. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  1588. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  1589. * also flushed at 2d versus 3d pipeline switches.
  1590. *
  1591. * read-only caches:
  1592. *
  1593. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  1594. * MI_READ_FLUSH is set, and is always flushed on 965.
  1595. *
  1596. * I915_GEM_DOMAIN_COMMAND may not exist?
  1597. *
  1598. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  1599. * invalidated when MI_EXE_FLUSH is set.
  1600. *
  1601. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  1602. * invalidated with every MI_FLUSH.
  1603. *
  1604. * TLBs:
  1605. *
  1606. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  1607. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  1608. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  1609. * are flushed at any MI_FLUSH.
  1610. */
  1611. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1612. if ((invalidate_domains|flush_domains) &
  1613. I915_GEM_DOMAIN_RENDER)
  1614. cmd &= ~MI_NO_WRITE_FLUSH;
  1615. if (!IS_I965G(dev)) {
  1616. /*
  1617. * On the 965, the sampler cache always gets flushed
  1618. * and this bit is reserved.
  1619. */
  1620. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  1621. cmd |= MI_READ_FLUSH;
  1622. }
  1623. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  1624. cmd |= MI_EXE_FLUSH;
  1625. #if WATCH_EXEC
  1626. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  1627. #endif
  1628. BEGIN_LP_RING(2);
  1629. OUT_RING(cmd);
  1630. OUT_RING(0); /* noop */
  1631. ADVANCE_LP_RING();
  1632. }
  1633. }
  1634. /**
  1635. * Ensures that all rendering to the object has completed and the object is
  1636. * safe to unbind from the GTT or access from the CPU.
  1637. */
  1638. static int
  1639. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  1640. {
  1641. struct drm_device *dev = obj->dev;
  1642. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1643. int ret;
  1644. /* This function only exists to support waiting for existing rendering,
  1645. * not for emitting required flushes.
  1646. */
  1647. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1648. /* If there is rendering queued on the buffer being evicted, wait for
  1649. * it.
  1650. */
  1651. if (obj_priv->active) {
  1652. #if WATCH_BUF
  1653. DRM_INFO("%s: object %p wait for seqno %08x\n",
  1654. __func__, obj, obj_priv->last_rendering_seqno);
  1655. #endif
  1656. ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
  1657. if (ret != 0)
  1658. return ret;
  1659. }
  1660. return 0;
  1661. }
  1662. /**
  1663. * Unbinds an object from the GTT aperture.
  1664. */
  1665. int
  1666. i915_gem_object_unbind(struct drm_gem_object *obj)
  1667. {
  1668. struct drm_device *dev = obj->dev;
  1669. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1670. int ret = 0;
  1671. #if WATCH_BUF
  1672. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  1673. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  1674. #endif
  1675. if (obj_priv->gtt_space == NULL)
  1676. return 0;
  1677. if (obj_priv->pin_count != 0) {
  1678. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1679. return -EINVAL;
  1680. }
  1681. /* blow away mappings if mapped through GTT */
  1682. i915_gem_release_mmap(obj);
  1683. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1684. i915_gem_clear_fence_reg(obj);
  1685. /* Move the object to the CPU domain to ensure that
  1686. * any possible CPU writes while it's not in the GTT
  1687. * are flushed when we go to remap it. This will
  1688. * also ensure that all pending GPU writes are finished
  1689. * before we unbind.
  1690. */
  1691. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1692. if (ret) {
  1693. if (ret != -ERESTARTSYS)
  1694. DRM_ERROR("set_domain failed: %d\n", ret);
  1695. return ret;
  1696. }
  1697. BUG_ON(obj_priv->active);
  1698. if (obj_priv->agp_mem != NULL) {
  1699. drm_unbind_agp(obj_priv->agp_mem);
  1700. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  1701. obj_priv->agp_mem = NULL;
  1702. }
  1703. i915_gem_object_put_pages(obj);
  1704. if (obj_priv->gtt_space) {
  1705. atomic_dec(&dev->gtt_count);
  1706. atomic_sub(obj->size, &dev->gtt_memory);
  1707. drm_mm_put_block(obj_priv->gtt_space);
  1708. obj_priv->gtt_space = NULL;
  1709. }
  1710. /* Remove ourselves from the LRU list if present. */
  1711. if (!list_empty(&obj_priv->list))
  1712. list_del_init(&obj_priv->list);
  1713. trace_i915_gem_object_unbind(obj);
  1714. return 0;
  1715. }
  1716. static inline int
  1717. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
  1718. {
  1719. return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED;
  1720. }
  1721. static struct drm_gem_object *
  1722. i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
  1723. {
  1724. drm_i915_private_t *dev_priv = dev->dev_private;
  1725. struct drm_i915_gem_object *obj_priv;
  1726. struct drm_gem_object *best = NULL;
  1727. struct drm_gem_object *first = NULL;
  1728. /* Try to find the smallest clean object */
  1729. list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
  1730. struct drm_gem_object *obj = obj_priv->obj;
  1731. if (obj->size >= min_size) {
  1732. if (i915_gem_object_is_purgeable(obj_priv) &&
  1733. (!best || obj->size < best->size)) {
  1734. best = obj;
  1735. if (best->size == min_size)
  1736. return best;
  1737. }
  1738. if (!first)
  1739. first = obj;
  1740. }
  1741. }
  1742. return best ? best : first;
  1743. }
  1744. static int
  1745. i915_gem_evict_everything(struct drm_device *dev)
  1746. {
  1747. drm_i915_private_t *dev_priv = dev->dev_private;
  1748. uint32_t seqno;
  1749. int ret;
  1750. bool lists_empty;
  1751. DRM_INFO("GTT full, evicting everything: "
  1752. "%d objects [%d pinned], "
  1753. "%d object bytes [%d pinned], "
  1754. "%d/%d gtt bytes\n",
  1755. atomic_read(&dev->object_count),
  1756. atomic_read(&dev->pin_count),
  1757. atomic_read(&dev->object_memory),
  1758. atomic_read(&dev->pin_memory),
  1759. atomic_read(&dev->gtt_memory),
  1760. dev->gtt_total);
  1761. spin_lock(&dev_priv->mm.active_list_lock);
  1762. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1763. list_empty(&dev_priv->mm.flushing_list) &&
  1764. list_empty(&dev_priv->mm.active_list));
  1765. spin_unlock(&dev_priv->mm.active_list_lock);
  1766. if (lists_empty) {
  1767. DRM_ERROR("GTT full, but lists empty!\n");
  1768. return -ENOSPC;
  1769. }
  1770. /* Flush everything (on to the inactive lists) and evict */
  1771. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1772. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  1773. if (seqno == 0)
  1774. return -ENOMEM;
  1775. ret = i915_wait_request(dev, seqno);
  1776. if (ret)
  1777. return ret;
  1778. ret = i915_gem_evict_from_inactive_list(dev);
  1779. if (ret)
  1780. return ret;
  1781. spin_lock(&dev_priv->mm.active_list_lock);
  1782. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1783. list_empty(&dev_priv->mm.flushing_list) &&
  1784. list_empty(&dev_priv->mm.active_list));
  1785. spin_unlock(&dev_priv->mm.active_list_lock);
  1786. BUG_ON(!lists_empty);
  1787. return 0;
  1788. }
  1789. static int
  1790. i915_gem_evict_something(struct drm_device *dev, int min_size)
  1791. {
  1792. drm_i915_private_t *dev_priv = dev->dev_private;
  1793. struct drm_gem_object *obj;
  1794. int have_waited = 0;
  1795. int ret;
  1796. for (;;) {
  1797. i915_gem_retire_requests(dev);
  1798. /* If there's an inactive buffer available now, grab it
  1799. * and be done.
  1800. */
  1801. obj = i915_gem_find_inactive_object(dev, min_size);
  1802. if (obj) {
  1803. struct drm_i915_gem_object *obj_priv;
  1804. #if WATCH_LRU
  1805. DRM_INFO("%s: evicting %p\n", __func__, obj);
  1806. #endif
  1807. obj_priv = obj->driver_private;
  1808. BUG_ON(obj_priv->pin_count != 0);
  1809. BUG_ON(obj_priv->active);
  1810. /* Wait on the rendering and unbind the buffer. */
  1811. return i915_gem_object_unbind(obj);
  1812. }
  1813. if (have_waited)
  1814. return 0;
  1815. /* If we didn't get anything, but the ring is still processing
  1816. * things, wait for the next to finish and hopefully leave us
  1817. * a buffer to evict.
  1818. */
  1819. if (!list_empty(&dev_priv->mm.request_list)) {
  1820. struct drm_i915_gem_request *request;
  1821. request = list_first_entry(&dev_priv->mm.request_list,
  1822. struct drm_i915_gem_request,
  1823. list);
  1824. ret = i915_wait_request(dev, request->seqno);
  1825. if (ret)
  1826. return ret;
  1827. have_waited = 1;
  1828. continue;
  1829. }
  1830. /* If we didn't have anything on the request list but there
  1831. * are buffers awaiting a flush, emit one and try again.
  1832. * When we wait on it, those buffers waiting for that flush
  1833. * will get moved to inactive.
  1834. */
  1835. if (!list_empty(&dev_priv->mm.flushing_list)) {
  1836. struct drm_i915_gem_object *obj_priv;
  1837. uint32_t seqno;
  1838. obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
  1839. struct drm_i915_gem_object,
  1840. list);
  1841. obj = obj_priv->obj;
  1842. i915_gem_flush(dev,
  1843. obj->write_domain,
  1844. obj->write_domain);
  1845. seqno = i915_add_request(dev, NULL, obj->write_domain);
  1846. if (seqno == 0)
  1847. return -ENOMEM;
  1848. ret = i915_wait_request(dev, seqno);
  1849. if (ret)
  1850. return ret;
  1851. have_waited = 1;
  1852. continue;
  1853. }
  1854. /* If we didn't do any of the above, there's no single buffer
  1855. * large enough to swap out for the new one, so just evict
  1856. * everything and start again. (This should be rare.)
  1857. */
  1858. if (!list_empty (&dev_priv->mm.inactive_list)) {
  1859. DRM_INFO("GTT full, evicting inactive buffers\n");
  1860. return i915_gem_evict_from_inactive_list(dev);
  1861. } else
  1862. return i915_gem_evict_everything(dev);
  1863. }
  1864. }
  1865. int
  1866. i915_gem_object_get_pages(struct drm_gem_object *obj)
  1867. {
  1868. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1869. int page_count, i;
  1870. struct address_space *mapping;
  1871. struct inode *inode;
  1872. struct page *page;
  1873. int ret;
  1874. if (obj_priv->pages_refcount++ != 0)
  1875. return 0;
  1876. /* Get the list of pages out of our struct file. They'll be pinned
  1877. * at this point until we release them.
  1878. */
  1879. page_count = obj->size / PAGE_SIZE;
  1880. BUG_ON(obj_priv->pages != NULL);
  1881. obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
  1882. if (obj_priv->pages == NULL) {
  1883. DRM_ERROR("Failed to allocate page list\n");
  1884. obj_priv->pages_refcount--;
  1885. return -ENOMEM;
  1886. }
  1887. inode = obj->filp->f_path.dentry->d_inode;
  1888. mapping = inode->i_mapping;
  1889. for (i = 0; i < page_count; i++) {
  1890. page = read_mapping_page(mapping, i, NULL);
  1891. if (IS_ERR(page)) {
  1892. ret = PTR_ERR(page);
  1893. i915_gem_object_put_pages(obj);
  1894. return ret;
  1895. }
  1896. obj_priv->pages[i] = page;
  1897. }
  1898. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1899. i915_gem_object_do_bit_17_swizzle(obj);
  1900. return 0;
  1901. }
  1902. static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
  1903. {
  1904. struct drm_gem_object *obj = reg->obj;
  1905. struct drm_device *dev = obj->dev;
  1906. drm_i915_private_t *dev_priv = dev->dev_private;
  1907. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1908. int regnum = obj_priv->fence_reg;
  1909. uint64_t val;
  1910. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1911. 0xfffff000) << 32;
  1912. val |= obj_priv->gtt_offset & 0xfffff000;
  1913. val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1914. if (obj_priv->tiling_mode == I915_TILING_Y)
  1915. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1916. val |= I965_FENCE_REG_VALID;
  1917. I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
  1918. }
  1919. static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
  1920. {
  1921. struct drm_gem_object *obj = reg->obj;
  1922. struct drm_device *dev = obj->dev;
  1923. drm_i915_private_t *dev_priv = dev->dev_private;
  1924. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1925. int regnum = obj_priv->fence_reg;
  1926. int tile_width;
  1927. uint32_t fence_reg, val;
  1928. uint32_t pitch_val;
  1929. if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
  1930. (obj_priv->gtt_offset & (obj->size - 1))) {
  1931. WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
  1932. __func__, obj_priv->gtt_offset, obj->size);
  1933. return;
  1934. }
  1935. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1936. HAS_128_BYTE_Y_TILING(dev))
  1937. tile_width = 128;
  1938. else
  1939. tile_width = 512;
  1940. /* Note: pitch better be a power of two tile widths */
  1941. pitch_val = obj_priv->stride / tile_width;
  1942. pitch_val = ffs(pitch_val) - 1;
  1943. val = obj_priv->gtt_offset;
  1944. if (obj_priv->tiling_mode == I915_TILING_Y)
  1945. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1946. val |= I915_FENCE_SIZE_BITS(obj->size);
  1947. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1948. val |= I830_FENCE_REG_VALID;
  1949. if (regnum < 8)
  1950. fence_reg = FENCE_REG_830_0 + (regnum * 4);
  1951. else
  1952. fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
  1953. I915_WRITE(fence_reg, val);
  1954. }
  1955. static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  1956. {
  1957. struct drm_gem_object *obj = reg->obj;
  1958. struct drm_device *dev = obj->dev;
  1959. drm_i915_private_t *dev_priv = dev->dev_private;
  1960. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1961. int regnum = obj_priv->fence_reg;
  1962. uint32_t val;
  1963. uint32_t pitch_val;
  1964. uint32_t fence_size_bits;
  1965. if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
  1966. (obj_priv->gtt_offset & (obj->size - 1))) {
  1967. WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
  1968. __func__, obj_priv->gtt_offset);
  1969. return;
  1970. }
  1971. pitch_val = obj_priv->stride / 128;
  1972. pitch_val = ffs(pitch_val) - 1;
  1973. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  1974. val = obj_priv->gtt_offset;
  1975. if (obj_priv->tiling_mode == I915_TILING_Y)
  1976. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1977. fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
  1978. WARN_ON(fence_size_bits & ~0x00000f00);
  1979. val |= fence_size_bits;
  1980. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1981. val |= I830_FENCE_REG_VALID;
  1982. I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  1983. }
  1984. /**
  1985. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  1986. * @obj: object to map through a fence reg
  1987. *
  1988. * When mapping objects through the GTT, userspace wants to be able to write
  1989. * to them without having to worry about swizzling if the object is tiled.
  1990. *
  1991. * This function walks the fence regs looking for a free one for @obj,
  1992. * stealing one if it can't find any.
  1993. *
  1994. * It then sets up the reg based on the object's properties: address, pitch
  1995. * and tiling format.
  1996. */
  1997. int
  1998. i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
  1999. {
  2000. struct drm_device *dev = obj->dev;
  2001. struct drm_i915_private *dev_priv = dev->dev_private;
  2002. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2003. struct drm_i915_fence_reg *reg = NULL;
  2004. struct drm_i915_gem_object *old_obj_priv = NULL;
  2005. int i, ret, avail;
  2006. /* Just update our place in the LRU if our fence is getting used. */
  2007. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  2008. list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2009. return 0;
  2010. }
  2011. switch (obj_priv->tiling_mode) {
  2012. case I915_TILING_NONE:
  2013. WARN(1, "allocating a fence for non-tiled object?\n");
  2014. break;
  2015. case I915_TILING_X:
  2016. if (!obj_priv->stride)
  2017. return -EINVAL;
  2018. WARN((obj_priv->stride & (512 - 1)),
  2019. "object 0x%08x is X tiled but has non-512B pitch\n",
  2020. obj_priv->gtt_offset);
  2021. break;
  2022. case I915_TILING_Y:
  2023. if (!obj_priv->stride)
  2024. return -EINVAL;
  2025. WARN((obj_priv->stride & (128 - 1)),
  2026. "object 0x%08x is Y tiled but has non-128B pitch\n",
  2027. obj_priv->gtt_offset);
  2028. break;
  2029. }
  2030. /* First try to find a free reg */
  2031. avail = 0;
  2032. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2033. reg = &dev_priv->fence_regs[i];
  2034. if (!reg->obj)
  2035. break;
  2036. old_obj_priv = reg->obj->driver_private;
  2037. if (!old_obj_priv->pin_count)
  2038. avail++;
  2039. }
  2040. /* None available, try to steal one or wait for a user to finish */
  2041. if (i == dev_priv->num_fence_regs) {
  2042. struct drm_gem_object *old_obj = NULL;
  2043. if (avail == 0)
  2044. return -ENOSPC;
  2045. list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
  2046. fence_list) {
  2047. old_obj = old_obj_priv->obj;
  2048. if (old_obj_priv->pin_count)
  2049. continue;
  2050. /* Take a reference, as otherwise the wait_rendering
  2051. * below may cause the object to get freed out from
  2052. * under us.
  2053. */
  2054. drm_gem_object_reference(old_obj);
  2055. /* i915 uses fences for GPU access to tiled buffers */
  2056. if (IS_I965G(dev) || !old_obj_priv->active)
  2057. break;
  2058. /* This brings the object to the head of the LRU if it
  2059. * had been written to. The only way this should
  2060. * result in us waiting longer than the expected
  2061. * optimal amount of time is if there was a
  2062. * fence-using buffer later that was read-only.
  2063. */
  2064. i915_gem_object_flush_gpu_write_domain(old_obj);
  2065. ret = i915_gem_object_wait_rendering(old_obj);
  2066. if (ret != 0) {
  2067. drm_gem_object_unreference(old_obj);
  2068. return ret;
  2069. }
  2070. break;
  2071. }
  2072. /*
  2073. * Zap this virtual mapping so we can set up a fence again
  2074. * for this object next time we need it.
  2075. */
  2076. i915_gem_release_mmap(old_obj);
  2077. i = old_obj_priv->fence_reg;
  2078. reg = &dev_priv->fence_regs[i];
  2079. old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2080. list_del_init(&old_obj_priv->fence_list);
  2081. drm_gem_object_unreference(old_obj);
  2082. }
  2083. obj_priv->fence_reg = i;
  2084. list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2085. reg->obj = obj;
  2086. if (IS_I965G(dev))
  2087. i965_write_fence_reg(reg);
  2088. else if (IS_I9XX(dev))
  2089. i915_write_fence_reg(reg);
  2090. else
  2091. i830_write_fence_reg(reg);
  2092. trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
  2093. return 0;
  2094. }
  2095. /**
  2096. * i915_gem_clear_fence_reg - clear out fence register info
  2097. * @obj: object to clear
  2098. *
  2099. * Zeroes out the fence register itself and clears out the associated
  2100. * data structures in dev_priv and obj_priv.
  2101. */
  2102. static void
  2103. i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  2104. {
  2105. struct drm_device *dev = obj->dev;
  2106. drm_i915_private_t *dev_priv = dev->dev_private;
  2107. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2108. if (IS_I965G(dev))
  2109. I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
  2110. else {
  2111. uint32_t fence_reg;
  2112. if (obj_priv->fence_reg < 8)
  2113. fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
  2114. else
  2115. fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
  2116. 8) * 4;
  2117. I915_WRITE(fence_reg, 0);
  2118. }
  2119. dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
  2120. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2121. list_del_init(&obj_priv->fence_list);
  2122. }
  2123. /**
  2124. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2125. * to the buffer to finish, and then resets the fence register.
  2126. * @obj: tiled object holding a fence register.
  2127. *
  2128. * Zeroes out the fence register itself and clears out the associated
  2129. * data structures in dev_priv and obj_priv.
  2130. */
  2131. int
  2132. i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
  2133. {
  2134. struct drm_device *dev = obj->dev;
  2135. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2136. if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
  2137. return 0;
  2138. /* On the i915, GPU access to tiled buffers is via a fence,
  2139. * therefore we must wait for any outstanding access to complete
  2140. * before clearing the fence.
  2141. */
  2142. if (!IS_I965G(dev)) {
  2143. int ret;
  2144. i915_gem_object_flush_gpu_write_domain(obj);
  2145. i915_gem_object_flush_gtt_write_domain(obj);
  2146. ret = i915_gem_object_wait_rendering(obj);
  2147. if (ret != 0)
  2148. return ret;
  2149. }
  2150. i915_gem_clear_fence_reg (obj);
  2151. return 0;
  2152. }
  2153. /**
  2154. * Finds free space in the GTT aperture and binds the object there.
  2155. */
  2156. static int
  2157. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  2158. {
  2159. struct drm_device *dev = obj->dev;
  2160. drm_i915_private_t *dev_priv = dev->dev_private;
  2161. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2162. struct drm_mm_node *free_space;
  2163. bool retry_alloc = false;
  2164. int ret;
  2165. if (dev_priv->mm.suspended)
  2166. return -EBUSY;
  2167. if (obj_priv->madv == I915_MADV_DONTNEED) {
  2168. DRM_ERROR("Attempting to bind a purgeable object\n");
  2169. return -EINVAL;
  2170. }
  2171. if (alignment == 0)
  2172. alignment = i915_gem_get_gtt_alignment(obj);
  2173. if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
  2174. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2175. return -EINVAL;
  2176. }
  2177. search_free:
  2178. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2179. obj->size, alignment, 0);
  2180. if (free_space != NULL) {
  2181. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  2182. alignment);
  2183. if (obj_priv->gtt_space != NULL) {
  2184. obj_priv->gtt_space->private = obj;
  2185. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  2186. }
  2187. }
  2188. if (obj_priv->gtt_space == NULL) {
  2189. /* If the gtt is empty and we're still having trouble
  2190. * fitting our object in, we're out of memory.
  2191. */
  2192. #if WATCH_LRU
  2193. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  2194. #endif
  2195. ret = i915_gem_evict_something(dev, obj->size);
  2196. if (ret != 0) {
  2197. if (ret != -ERESTARTSYS)
  2198. DRM_ERROR("Failed to evict a buffer %d\n", ret);
  2199. return ret;
  2200. }
  2201. goto search_free;
  2202. }
  2203. #if WATCH_BUF
  2204. DRM_INFO("Binding object of size %zd at 0x%08x\n",
  2205. obj->size, obj_priv->gtt_offset);
  2206. #endif
  2207. if (retry_alloc) {
  2208. i915_gem_object_set_page_gfp_mask (obj,
  2209. i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
  2210. }
  2211. ret = i915_gem_object_get_pages(obj);
  2212. if (retry_alloc) {
  2213. i915_gem_object_set_page_gfp_mask (obj,
  2214. i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
  2215. }
  2216. if (ret) {
  2217. drm_mm_put_block(obj_priv->gtt_space);
  2218. obj_priv->gtt_space = NULL;
  2219. if (ret == -ENOMEM) {
  2220. /* first try to clear up some space from the GTT */
  2221. ret = i915_gem_evict_something(dev, obj->size);
  2222. if (ret) {
  2223. if (ret != -ERESTARTSYS)
  2224. DRM_ERROR("Failed to allocate space for backing pages %d\n", ret);
  2225. /* now try to shrink everyone else */
  2226. if (! retry_alloc) {
  2227. retry_alloc = true;
  2228. goto search_free;
  2229. }
  2230. return ret;
  2231. }
  2232. goto search_free;
  2233. }
  2234. return ret;
  2235. }
  2236. /* Create an AGP memory structure pointing at our pages, and bind it
  2237. * into the GTT.
  2238. */
  2239. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  2240. obj_priv->pages,
  2241. obj->size >> PAGE_SHIFT,
  2242. obj_priv->gtt_offset,
  2243. obj_priv->agp_type);
  2244. if (obj_priv->agp_mem == NULL) {
  2245. i915_gem_object_put_pages(obj);
  2246. drm_mm_put_block(obj_priv->gtt_space);
  2247. obj_priv->gtt_space = NULL;
  2248. ret = i915_gem_evict_something(dev, obj->size);
  2249. if (ret) {
  2250. if (ret != -ERESTARTSYS)
  2251. DRM_ERROR("Failed to allocate space to bind AGP: %d\n", ret);
  2252. return ret;
  2253. }
  2254. goto search_free;
  2255. }
  2256. atomic_inc(&dev->gtt_count);
  2257. atomic_add(obj->size, &dev->gtt_memory);
  2258. /* Assert that the object is not currently in any GPU domain. As it
  2259. * wasn't in the GTT, there shouldn't be any way it could have been in
  2260. * a GPU cache
  2261. */
  2262. BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2263. BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2264. trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  2265. return 0;
  2266. }
  2267. void
  2268. i915_gem_clflush_object(struct drm_gem_object *obj)
  2269. {
  2270. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2271. /* If we don't have a page list set up, then we're not pinned
  2272. * to GPU, and we can ignore the cache flush because it'll happen
  2273. * again at bind time.
  2274. */
  2275. if (obj_priv->pages == NULL)
  2276. return;
  2277. trace_i915_gem_object_clflush(obj);
  2278. drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  2279. }
  2280. /** Flushes any GPU write domain for the object if it's dirty. */
  2281. static void
  2282. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
  2283. {
  2284. struct drm_device *dev = obj->dev;
  2285. uint32_t seqno;
  2286. uint32_t old_write_domain;
  2287. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2288. return;
  2289. /* Queue the GPU write cache flushing we need. */
  2290. old_write_domain = obj->write_domain;
  2291. i915_gem_flush(dev, 0, obj->write_domain);
  2292. seqno = i915_add_request(dev, NULL, obj->write_domain);
  2293. obj->write_domain = 0;
  2294. i915_gem_object_move_to_active(obj, seqno);
  2295. trace_i915_gem_object_change_domain(obj,
  2296. obj->read_domains,
  2297. old_write_domain);
  2298. }
  2299. /** Flushes the GTT write domain for the object if it's dirty. */
  2300. static void
  2301. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  2302. {
  2303. uint32_t old_write_domain;
  2304. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  2305. return;
  2306. /* No actual flushing is required for the GTT write domain. Writes
  2307. * to it immediately go to main memory as far as we know, so there's
  2308. * no chipset flush. It also doesn't land in render cache.
  2309. */
  2310. old_write_domain = obj->write_domain;
  2311. obj->write_domain = 0;
  2312. trace_i915_gem_object_change_domain(obj,
  2313. obj->read_domains,
  2314. old_write_domain);
  2315. }
  2316. /** Flushes the CPU write domain for the object if it's dirty. */
  2317. static void
  2318. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  2319. {
  2320. struct drm_device *dev = obj->dev;
  2321. uint32_t old_write_domain;
  2322. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  2323. return;
  2324. i915_gem_clflush_object(obj);
  2325. drm_agp_chipset_flush(dev);
  2326. old_write_domain = obj->write_domain;
  2327. obj->write_domain = 0;
  2328. trace_i915_gem_object_change_domain(obj,
  2329. obj->read_domains,
  2330. old_write_domain);
  2331. }
  2332. /**
  2333. * Moves a single object to the GTT read, and possibly write domain.
  2334. *
  2335. * This function returns when the move is complete, including waiting on
  2336. * flushes to occur.
  2337. */
  2338. int
  2339. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  2340. {
  2341. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2342. uint32_t old_write_domain, old_read_domains;
  2343. int ret;
  2344. /* Not valid to be called on unbound objects. */
  2345. if (obj_priv->gtt_space == NULL)
  2346. return -EINVAL;
  2347. i915_gem_object_flush_gpu_write_domain(obj);
  2348. /* Wait on any GPU rendering and flushing to occur. */
  2349. ret = i915_gem_object_wait_rendering(obj);
  2350. if (ret != 0)
  2351. return ret;
  2352. old_write_domain = obj->write_domain;
  2353. old_read_domains = obj->read_domains;
  2354. /* If we're writing through the GTT domain, then CPU and GPU caches
  2355. * will need to be invalidated at next use.
  2356. */
  2357. if (write)
  2358. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  2359. i915_gem_object_flush_cpu_write_domain(obj);
  2360. /* It should now be out of any other write domains, and we can update
  2361. * the domain values for our changes.
  2362. */
  2363. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2364. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2365. if (write) {
  2366. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2367. obj_priv->dirty = 1;
  2368. }
  2369. trace_i915_gem_object_change_domain(obj,
  2370. old_read_domains,
  2371. old_write_domain);
  2372. return 0;
  2373. }
  2374. /**
  2375. * Moves a single object to the CPU read, and possibly write domain.
  2376. *
  2377. * This function returns when the move is complete, including waiting on
  2378. * flushes to occur.
  2379. */
  2380. static int
  2381. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  2382. {
  2383. uint32_t old_write_domain, old_read_domains;
  2384. int ret;
  2385. i915_gem_object_flush_gpu_write_domain(obj);
  2386. /* Wait on any GPU rendering and flushing to occur. */
  2387. ret = i915_gem_object_wait_rendering(obj);
  2388. if (ret != 0)
  2389. return ret;
  2390. i915_gem_object_flush_gtt_write_domain(obj);
  2391. /* If we have a partially-valid cache of the object in the CPU,
  2392. * finish invalidating it and free the per-page flags.
  2393. */
  2394. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2395. old_write_domain = obj->write_domain;
  2396. old_read_domains = obj->read_domains;
  2397. /* Flush the CPU cache if it's still invalid. */
  2398. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2399. i915_gem_clflush_object(obj);
  2400. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2401. }
  2402. /* It should now be out of any other write domains, and we can update
  2403. * the domain values for our changes.
  2404. */
  2405. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2406. /* If we're writing through the CPU, then the GPU read domains will
  2407. * need to be invalidated at next use.
  2408. */
  2409. if (write) {
  2410. obj->read_domains &= I915_GEM_DOMAIN_CPU;
  2411. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2412. }
  2413. trace_i915_gem_object_change_domain(obj,
  2414. old_read_domains,
  2415. old_write_domain);
  2416. return 0;
  2417. }
  2418. /*
  2419. * Set the next domain for the specified object. This
  2420. * may not actually perform the necessary flushing/invaliding though,
  2421. * as that may want to be batched with other set_domain operations
  2422. *
  2423. * This is (we hope) the only really tricky part of gem. The goal
  2424. * is fairly simple -- track which caches hold bits of the object
  2425. * and make sure they remain coherent. A few concrete examples may
  2426. * help to explain how it works. For shorthand, we use the notation
  2427. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2428. * a pair of read and write domain masks.
  2429. *
  2430. * Case 1: the batch buffer
  2431. *
  2432. * 1. Allocated
  2433. * 2. Written by CPU
  2434. * 3. Mapped to GTT
  2435. * 4. Read by GPU
  2436. * 5. Unmapped from GTT
  2437. * 6. Freed
  2438. *
  2439. * Let's take these a step at a time
  2440. *
  2441. * 1. Allocated
  2442. * Pages allocated from the kernel may still have
  2443. * cache contents, so we set them to (CPU, CPU) always.
  2444. * 2. Written by CPU (using pwrite)
  2445. * The pwrite function calls set_domain (CPU, CPU) and
  2446. * this function does nothing (as nothing changes)
  2447. * 3. Mapped by GTT
  2448. * This function asserts that the object is not
  2449. * currently in any GPU-based read or write domains
  2450. * 4. Read by GPU
  2451. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2452. * As write_domain is zero, this function adds in the
  2453. * current read domains (CPU+COMMAND, 0).
  2454. * flush_domains is set to CPU.
  2455. * invalidate_domains is set to COMMAND
  2456. * clflush is run to get data out of the CPU caches
  2457. * then i915_dev_set_domain calls i915_gem_flush to
  2458. * emit an MI_FLUSH and drm_agp_chipset_flush
  2459. * 5. Unmapped from GTT
  2460. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2461. * flush_domains and invalidate_domains end up both zero
  2462. * so no flushing/invalidating happens
  2463. * 6. Freed
  2464. * yay, done
  2465. *
  2466. * Case 2: The shared render buffer
  2467. *
  2468. * 1. Allocated
  2469. * 2. Mapped to GTT
  2470. * 3. Read/written by GPU
  2471. * 4. set_domain to (CPU,CPU)
  2472. * 5. Read/written by CPU
  2473. * 6. Read/written by GPU
  2474. *
  2475. * 1. Allocated
  2476. * Same as last example, (CPU, CPU)
  2477. * 2. Mapped to GTT
  2478. * Nothing changes (assertions find that it is not in the GPU)
  2479. * 3. Read/written by GPU
  2480. * execbuffer calls set_domain (RENDER, RENDER)
  2481. * flush_domains gets CPU
  2482. * invalidate_domains gets GPU
  2483. * clflush (obj)
  2484. * MI_FLUSH and drm_agp_chipset_flush
  2485. * 4. set_domain (CPU, CPU)
  2486. * flush_domains gets GPU
  2487. * invalidate_domains gets CPU
  2488. * wait_rendering (obj) to make sure all drawing is complete.
  2489. * This will include an MI_FLUSH to get the data from GPU
  2490. * to memory
  2491. * clflush (obj) to invalidate the CPU cache
  2492. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2493. * 5. Read/written by CPU
  2494. * cache lines are loaded and dirtied
  2495. * 6. Read written by GPU
  2496. * Same as last GPU access
  2497. *
  2498. * Case 3: The constant buffer
  2499. *
  2500. * 1. Allocated
  2501. * 2. Written by CPU
  2502. * 3. Read by GPU
  2503. * 4. Updated (written) by CPU again
  2504. * 5. Read by GPU
  2505. *
  2506. * 1. Allocated
  2507. * (CPU, CPU)
  2508. * 2. Written by CPU
  2509. * (CPU, CPU)
  2510. * 3. Read by GPU
  2511. * (CPU+RENDER, 0)
  2512. * flush_domains = CPU
  2513. * invalidate_domains = RENDER
  2514. * clflush (obj)
  2515. * MI_FLUSH
  2516. * drm_agp_chipset_flush
  2517. * 4. Updated (written) by CPU again
  2518. * (CPU, CPU)
  2519. * flush_domains = 0 (no previous write domain)
  2520. * invalidate_domains = 0 (no new read domains)
  2521. * 5. Read by GPU
  2522. * (CPU+RENDER, 0)
  2523. * flush_domains = CPU
  2524. * invalidate_domains = RENDER
  2525. * clflush (obj)
  2526. * MI_FLUSH
  2527. * drm_agp_chipset_flush
  2528. */
  2529. static void
  2530. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
  2531. {
  2532. struct drm_device *dev = obj->dev;
  2533. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2534. uint32_t invalidate_domains = 0;
  2535. uint32_t flush_domains = 0;
  2536. uint32_t old_read_domains;
  2537. BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
  2538. BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
  2539. intel_mark_busy(dev, obj);
  2540. #if WATCH_BUF
  2541. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  2542. __func__, obj,
  2543. obj->read_domains, obj->pending_read_domains,
  2544. obj->write_domain, obj->pending_write_domain);
  2545. #endif
  2546. /*
  2547. * If the object isn't moving to a new write domain,
  2548. * let the object stay in multiple read domains
  2549. */
  2550. if (obj->pending_write_domain == 0)
  2551. obj->pending_read_domains |= obj->read_domains;
  2552. else
  2553. obj_priv->dirty = 1;
  2554. /*
  2555. * Flush the current write domain if
  2556. * the new read domains don't match. Invalidate
  2557. * any read domains which differ from the old
  2558. * write domain
  2559. */
  2560. if (obj->write_domain &&
  2561. obj->write_domain != obj->pending_read_domains) {
  2562. flush_domains |= obj->write_domain;
  2563. invalidate_domains |=
  2564. obj->pending_read_domains & ~obj->write_domain;
  2565. }
  2566. /*
  2567. * Invalidate any read caches which may have
  2568. * stale data. That is, any new read domains.
  2569. */
  2570. invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
  2571. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  2572. #if WATCH_BUF
  2573. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  2574. __func__, flush_domains, invalidate_domains);
  2575. #endif
  2576. i915_gem_clflush_object(obj);
  2577. }
  2578. old_read_domains = obj->read_domains;
  2579. /* The actual obj->write_domain will be updated with
  2580. * pending_write_domain after we emit the accumulated flush for all
  2581. * of our domain changes in execbuffers (which clears objects'
  2582. * write_domains). So if we have a current write domain that we
  2583. * aren't changing, set pending_write_domain to that.
  2584. */
  2585. if (flush_domains == 0 && obj->pending_write_domain == 0)
  2586. obj->pending_write_domain = obj->write_domain;
  2587. obj->read_domains = obj->pending_read_domains;
  2588. dev->invalidate_domains |= invalidate_domains;
  2589. dev->flush_domains |= flush_domains;
  2590. #if WATCH_BUF
  2591. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  2592. __func__,
  2593. obj->read_domains, obj->write_domain,
  2594. dev->invalidate_domains, dev->flush_domains);
  2595. #endif
  2596. trace_i915_gem_object_change_domain(obj,
  2597. old_read_domains,
  2598. obj->write_domain);
  2599. }
  2600. /**
  2601. * Moves the object from a partially CPU read to a full one.
  2602. *
  2603. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2604. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2605. */
  2606. static void
  2607. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  2608. {
  2609. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2610. if (!obj_priv->page_cpu_valid)
  2611. return;
  2612. /* If we're partially in the CPU read domain, finish moving it in.
  2613. */
  2614. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  2615. int i;
  2616. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  2617. if (obj_priv->page_cpu_valid[i])
  2618. continue;
  2619. drm_clflush_pages(obj_priv->pages + i, 1);
  2620. }
  2621. }
  2622. /* Free the page_cpu_valid mappings which are now stale, whether
  2623. * or not we've got I915_GEM_DOMAIN_CPU.
  2624. */
  2625. kfree(obj_priv->page_cpu_valid);
  2626. obj_priv->page_cpu_valid = NULL;
  2627. }
  2628. /**
  2629. * Set the CPU read domain on a range of the object.
  2630. *
  2631. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2632. * not entirely valid. The page_cpu_valid member of the object flags which
  2633. * pages have been flushed, and will be respected by
  2634. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2635. * of the whole object.
  2636. *
  2637. * This function returns when the move is complete, including waiting on
  2638. * flushes to occur.
  2639. */
  2640. static int
  2641. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  2642. uint64_t offset, uint64_t size)
  2643. {
  2644. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2645. uint32_t old_read_domains;
  2646. int i, ret;
  2647. if (offset == 0 && size == obj->size)
  2648. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2649. i915_gem_object_flush_gpu_write_domain(obj);
  2650. /* Wait on any GPU rendering and flushing to occur. */
  2651. ret = i915_gem_object_wait_rendering(obj);
  2652. if (ret != 0)
  2653. return ret;
  2654. i915_gem_object_flush_gtt_write_domain(obj);
  2655. /* If we're already fully in the CPU read domain, we're done. */
  2656. if (obj_priv->page_cpu_valid == NULL &&
  2657. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2658. return 0;
  2659. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2660. * newly adding I915_GEM_DOMAIN_CPU
  2661. */
  2662. if (obj_priv->page_cpu_valid == NULL) {
  2663. obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
  2664. GFP_KERNEL);
  2665. if (obj_priv->page_cpu_valid == NULL)
  2666. return -ENOMEM;
  2667. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2668. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  2669. /* Flush the cache on any pages that are still invalid from the CPU's
  2670. * perspective.
  2671. */
  2672. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2673. i++) {
  2674. if (obj_priv->page_cpu_valid[i])
  2675. continue;
  2676. drm_clflush_pages(obj_priv->pages + i, 1);
  2677. obj_priv->page_cpu_valid[i] = 1;
  2678. }
  2679. /* It should now be out of any other write domains, and we can update
  2680. * the domain values for our changes.
  2681. */
  2682. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2683. old_read_domains = obj->read_domains;
  2684. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2685. trace_i915_gem_object_change_domain(obj,
  2686. old_read_domains,
  2687. obj->write_domain);
  2688. return 0;
  2689. }
  2690. /**
  2691. * Pin an object to the GTT and evaluate the relocations landing in it.
  2692. */
  2693. static int
  2694. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  2695. struct drm_file *file_priv,
  2696. struct drm_i915_gem_exec_object *entry,
  2697. struct drm_i915_gem_relocation_entry *relocs)
  2698. {
  2699. struct drm_device *dev = obj->dev;
  2700. drm_i915_private_t *dev_priv = dev->dev_private;
  2701. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2702. int i, ret;
  2703. void __iomem *reloc_page;
  2704. /* Choose the GTT offset for our buffer and put it there. */
  2705. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  2706. if (ret)
  2707. return ret;
  2708. entry->offset = obj_priv->gtt_offset;
  2709. /* Apply the relocations, using the GTT aperture to avoid cache
  2710. * flushing requirements.
  2711. */
  2712. for (i = 0; i < entry->relocation_count; i++) {
  2713. struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
  2714. struct drm_gem_object *target_obj;
  2715. struct drm_i915_gem_object *target_obj_priv;
  2716. uint32_t reloc_val, reloc_offset;
  2717. uint32_t __iomem *reloc_entry;
  2718. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  2719. reloc->target_handle);
  2720. if (target_obj == NULL) {
  2721. i915_gem_object_unpin(obj);
  2722. return -EBADF;
  2723. }
  2724. target_obj_priv = target_obj->driver_private;
  2725. #if WATCH_RELOC
  2726. DRM_INFO("%s: obj %p offset %08x target %d "
  2727. "read %08x write %08x gtt %08x "
  2728. "presumed %08x delta %08x\n",
  2729. __func__,
  2730. obj,
  2731. (int) reloc->offset,
  2732. (int) reloc->target_handle,
  2733. (int) reloc->read_domains,
  2734. (int) reloc->write_domain,
  2735. (int) target_obj_priv->gtt_offset,
  2736. (int) reloc->presumed_offset,
  2737. reloc->delta);
  2738. #endif
  2739. /* The target buffer should have appeared before us in the
  2740. * exec_object list, so it should have a GTT space bound by now.
  2741. */
  2742. if (target_obj_priv->gtt_space == NULL) {
  2743. DRM_ERROR("No GTT space found for object %d\n",
  2744. reloc->target_handle);
  2745. drm_gem_object_unreference(target_obj);
  2746. i915_gem_object_unpin(obj);
  2747. return -EINVAL;
  2748. }
  2749. /* Validate that the target is in a valid r/w GPU domain */
  2750. if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
  2751. reloc->read_domains & I915_GEM_DOMAIN_CPU) {
  2752. DRM_ERROR("reloc with read/write CPU domains: "
  2753. "obj %p target %d offset %d "
  2754. "read %08x write %08x",
  2755. obj, reloc->target_handle,
  2756. (int) reloc->offset,
  2757. reloc->read_domains,
  2758. reloc->write_domain);
  2759. drm_gem_object_unreference(target_obj);
  2760. i915_gem_object_unpin(obj);
  2761. return -EINVAL;
  2762. }
  2763. if (reloc->write_domain && target_obj->pending_write_domain &&
  2764. reloc->write_domain != target_obj->pending_write_domain) {
  2765. DRM_ERROR("Write domain conflict: "
  2766. "obj %p target %d offset %d "
  2767. "new %08x old %08x\n",
  2768. obj, reloc->target_handle,
  2769. (int) reloc->offset,
  2770. reloc->write_domain,
  2771. target_obj->pending_write_domain);
  2772. drm_gem_object_unreference(target_obj);
  2773. i915_gem_object_unpin(obj);
  2774. return -EINVAL;
  2775. }
  2776. target_obj->pending_read_domains |= reloc->read_domains;
  2777. target_obj->pending_write_domain |= reloc->write_domain;
  2778. /* If the relocation already has the right value in it, no
  2779. * more work needs to be done.
  2780. */
  2781. if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
  2782. drm_gem_object_unreference(target_obj);
  2783. continue;
  2784. }
  2785. /* Check that the relocation address is valid... */
  2786. if (reloc->offset > obj->size - 4) {
  2787. DRM_ERROR("Relocation beyond object bounds: "
  2788. "obj %p target %d offset %d size %d.\n",
  2789. obj, reloc->target_handle,
  2790. (int) reloc->offset, (int) obj->size);
  2791. drm_gem_object_unreference(target_obj);
  2792. i915_gem_object_unpin(obj);
  2793. return -EINVAL;
  2794. }
  2795. if (reloc->offset & 3) {
  2796. DRM_ERROR("Relocation not 4-byte aligned: "
  2797. "obj %p target %d offset %d.\n",
  2798. obj, reloc->target_handle,
  2799. (int) reloc->offset);
  2800. drm_gem_object_unreference(target_obj);
  2801. i915_gem_object_unpin(obj);
  2802. return -EINVAL;
  2803. }
  2804. /* and points to somewhere within the target object. */
  2805. if (reloc->delta >= target_obj->size) {
  2806. DRM_ERROR("Relocation beyond target object bounds: "
  2807. "obj %p target %d delta %d size %d.\n",
  2808. obj, reloc->target_handle,
  2809. (int) reloc->delta, (int) target_obj->size);
  2810. drm_gem_object_unreference(target_obj);
  2811. i915_gem_object_unpin(obj);
  2812. return -EINVAL;
  2813. }
  2814. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  2815. if (ret != 0) {
  2816. drm_gem_object_unreference(target_obj);
  2817. i915_gem_object_unpin(obj);
  2818. return -EINVAL;
  2819. }
  2820. /* Map the page containing the relocation we're going to
  2821. * perform.
  2822. */
  2823. reloc_offset = obj_priv->gtt_offset + reloc->offset;
  2824. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2825. (reloc_offset &
  2826. ~(PAGE_SIZE - 1)));
  2827. reloc_entry = (uint32_t __iomem *)(reloc_page +
  2828. (reloc_offset & (PAGE_SIZE - 1)));
  2829. reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  2830. #if WATCH_BUF
  2831. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  2832. obj, (unsigned int) reloc->offset,
  2833. readl(reloc_entry), reloc_val);
  2834. #endif
  2835. writel(reloc_val, reloc_entry);
  2836. io_mapping_unmap_atomic(reloc_page);
  2837. /* The updated presumed offset for this entry will be
  2838. * copied back out to the user.
  2839. */
  2840. reloc->presumed_offset = target_obj_priv->gtt_offset;
  2841. drm_gem_object_unreference(target_obj);
  2842. }
  2843. #if WATCH_BUF
  2844. if (0)
  2845. i915_gem_dump_object(obj, 128, __func__, ~0);
  2846. #endif
  2847. return 0;
  2848. }
  2849. /** Dispatch a batchbuffer to the ring
  2850. */
  2851. static int
  2852. i915_dispatch_gem_execbuffer(struct drm_device *dev,
  2853. struct drm_i915_gem_execbuffer *exec,
  2854. struct drm_clip_rect *cliprects,
  2855. uint64_t exec_offset)
  2856. {
  2857. drm_i915_private_t *dev_priv = dev->dev_private;
  2858. int nbox = exec->num_cliprects;
  2859. int i = 0, count;
  2860. uint32_t exec_start, exec_len;
  2861. RING_LOCALS;
  2862. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2863. exec_len = (uint32_t) exec->batch_len;
  2864. trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
  2865. count = nbox ? nbox : 1;
  2866. for (i = 0; i < count; i++) {
  2867. if (i < nbox) {
  2868. int ret = i915_emit_box(dev, cliprects, i,
  2869. exec->DR1, exec->DR4);
  2870. if (ret)
  2871. return ret;
  2872. }
  2873. if (IS_I830(dev) || IS_845G(dev)) {
  2874. BEGIN_LP_RING(4);
  2875. OUT_RING(MI_BATCH_BUFFER);
  2876. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2877. OUT_RING(exec_start + exec_len - 4);
  2878. OUT_RING(0);
  2879. ADVANCE_LP_RING();
  2880. } else {
  2881. BEGIN_LP_RING(2);
  2882. if (IS_I965G(dev)) {
  2883. OUT_RING(MI_BATCH_BUFFER_START |
  2884. (2 << 6) |
  2885. MI_BATCH_NON_SECURE_I965);
  2886. OUT_RING(exec_start);
  2887. } else {
  2888. OUT_RING(MI_BATCH_BUFFER_START |
  2889. (2 << 6));
  2890. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2891. }
  2892. ADVANCE_LP_RING();
  2893. }
  2894. }
  2895. /* XXX breadcrumb */
  2896. return 0;
  2897. }
  2898. /* Throttle our rendering by waiting until the ring has completed our requests
  2899. * emitted over 20 msec ago.
  2900. *
  2901. * Note that if we were to use the current jiffies each time around the loop,
  2902. * we wouldn't escape the function with any frames outstanding if the time to
  2903. * render a frame was over 20ms.
  2904. *
  2905. * This should get us reasonable parallelism between CPU and GPU but also
  2906. * relatively low latency when blocking on a particular request to finish.
  2907. */
  2908. static int
  2909. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  2910. {
  2911. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  2912. int ret = 0;
  2913. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2914. mutex_lock(&dev->struct_mutex);
  2915. while (!list_empty(&i915_file_priv->mm.request_list)) {
  2916. struct drm_i915_gem_request *request;
  2917. request = list_first_entry(&i915_file_priv->mm.request_list,
  2918. struct drm_i915_gem_request,
  2919. client_list);
  2920. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2921. break;
  2922. ret = i915_wait_request(dev, request->seqno);
  2923. if (ret != 0)
  2924. break;
  2925. }
  2926. mutex_unlock(&dev->struct_mutex);
  2927. return ret;
  2928. }
  2929. static int
  2930. i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
  2931. uint32_t buffer_count,
  2932. struct drm_i915_gem_relocation_entry **relocs)
  2933. {
  2934. uint32_t reloc_count = 0, reloc_index = 0, i;
  2935. int ret;
  2936. *relocs = NULL;
  2937. for (i = 0; i < buffer_count; i++) {
  2938. if (reloc_count + exec_list[i].relocation_count < reloc_count)
  2939. return -EINVAL;
  2940. reloc_count += exec_list[i].relocation_count;
  2941. }
  2942. *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
  2943. if (*relocs == NULL)
  2944. return -ENOMEM;
  2945. for (i = 0; i < buffer_count; i++) {
  2946. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2947. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2948. ret = copy_from_user(&(*relocs)[reloc_index],
  2949. user_relocs,
  2950. exec_list[i].relocation_count *
  2951. sizeof(**relocs));
  2952. if (ret != 0) {
  2953. drm_free_large(*relocs);
  2954. *relocs = NULL;
  2955. return -EFAULT;
  2956. }
  2957. reloc_index += exec_list[i].relocation_count;
  2958. }
  2959. return 0;
  2960. }
  2961. static int
  2962. i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
  2963. uint32_t buffer_count,
  2964. struct drm_i915_gem_relocation_entry *relocs)
  2965. {
  2966. uint32_t reloc_count = 0, i;
  2967. int ret = 0;
  2968. for (i = 0; i < buffer_count; i++) {
  2969. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2970. int unwritten;
  2971. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2972. unwritten = copy_to_user(user_relocs,
  2973. &relocs[reloc_count],
  2974. exec_list[i].relocation_count *
  2975. sizeof(*relocs));
  2976. if (unwritten) {
  2977. ret = -EFAULT;
  2978. goto err;
  2979. }
  2980. reloc_count += exec_list[i].relocation_count;
  2981. }
  2982. err:
  2983. drm_free_large(relocs);
  2984. return ret;
  2985. }
  2986. static int
  2987. i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
  2988. uint64_t exec_offset)
  2989. {
  2990. uint32_t exec_start, exec_len;
  2991. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2992. exec_len = (uint32_t) exec->batch_len;
  2993. if ((exec_start | exec_len) & 0x7)
  2994. return -EINVAL;
  2995. if (!exec_start)
  2996. return -EINVAL;
  2997. return 0;
  2998. }
  2999. int
  3000. i915_gem_execbuffer(struct drm_device *dev, void *data,
  3001. struct drm_file *file_priv)
  3002. {
  3003. drm_i915_private_t *dev_priv = dev->dev_private;
  3004. struct drm_i915_gem_execbuffer *args = data;
  3005. struct drm_i915_gem_exec_object *exec_list = NULL;
  3006. struct drm_gem_object **object_list = NULL;
  3007. struct drm_gem_object *batch_obj;
  3008. struct drm_i915_gem_object *obj_priv;
  3009. struct drm_clip_rect *cliprects = NULL;
  3010. struct drm_i915_gem_relocation_entry *relocs;
  3011. int ret, ret2, i, pinned = 0;
  3012. uint64_t exec_offset;
  3013. uint32_t seqno, flush_domains, reloc_index;
  3014. int pin_tries;
  3015. #if WATCH_EXEC
  3016. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3017. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3018. #endif
  3019. if (args->buffer_count < 1) {
  3020. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3021. return -EINVAL;
  3022. }
  3023. /* Copy in the exec list from userland */
  3024. exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
  3025. object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
  3026. if (exec_list == NULL || object_list == NULL) {
  3027. DRM_ERROR("Failed to allocate exec or object list "
  3028. "for %d buffers\n",
  3029. args->buffer_count);
  3030. ret = -ENOMEM;
  3031. goto pre_mutex_err;
  3032. }
  3033. ret = copy_from_user(exec_list,
  3034. (struct drm_i915_relocation_entry __user *)
  3035. (uintptr_t) args->buffers_ptr,
  3036. sizeof(*exec_list) * args->buffer_count);
  3037. if (ret != 0) {
  3038. DRM_ERROR("copy %d exec entries failed %d\n",
  3039. args->buffer_count, ret);
  3040. goto pre_mutex_err;
  3041. }
  3042. if (args->num_cliprects != 0) {
  3043. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3044. GFP_KERNEL);
  3045. if (cliprects == NULL)
  3046. goto pre_mutex_err;
  3047. ret = copy_from_user(cliprects,
  3048. (struct drm_clip_rect __user *)
  3049. (uintptr_t) args->cliprects_ptr,
  3050. sizeof(*cliprects) * args->num_cliprects);
  3051. if (ret != 0) {
  3052. DRM_ERROR("copy %d cliprects failed: %d\n",
  3053. args->num_cliprects, ret);
  3054. goto pre_mutex_err;
  3055. }
  3056. }
  3057. ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
  3058. &relocs);
  3059. if (ret != 0)
  3060. goto pre_mutex_err;
  3061. mutex_lock(&dev->struct_mutex);
  3062. i915_verify_inactive(dev, __FILE__, __LINE__);
  3063. if (atomic_read(&dev_priv->mm.wedged)) {
  3064. DRM_ERROR("Execbuf while wedged\n");
  3065. mutex_unlock(&dev->struct_mutex);
  3066. ret = -EIO;
  3067. goto pre_mutex_err;
  3068. }
  3069. if (dev_priv->mm.suspended) {
  3070. DRM_ERROR("Execbuf while VT-switched.\n");
  3071. mutex_unlock(&dev->struct_mutex);
  3072. ret = -EBUSY;
  3073. goto pre_mutex_err;
  3074. }
  3075. /* Look up object handles */
  3076. for (i = 0; i < args->buffer_count; i++) {
  3077. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  3078. exec_list[i].handle);
  3079. if (object_list[i] == NULL) {
  3080. DRM_ERROR("Invalid object handle %d at index %d\n",
  3081. exec_list[i].handle, i);
  3082. ret = -EBADF;
  3083. goto err;
  3084. }
  3085. obj_priv = object_list[i]->driver_private;
  3086. if (obj_priv->in_execbuffer) {
  3087. DRM_ERROR("Object %p appears more than once in object list\n",
  3088. object_list[i]);
  3089. ret = -EBADF;
  3090. goto err;
  3091. }
  3092. obj_priv->in_execbuffer = true;
  3093. }
  3094. /* Pin and relocate */
  3095. for (pin_tries = 0; ; pin_tries++) {
  3096. ret = 0;
  3097. reloc_index = 0;
  3098. for (i = 0; i < args->buffer_count; i++) {
  3099. object_list[i]->pending_read_domains = 0;
  3100. object_list[i]->pending_write_domain = 0;
  3101. ret = i915_gem_object_pin_and_relocate(object_list[i],
  3102. file_priv,
  3103. &exec_list[i],
  3104. &relocs[reloc_index]);
  3105. if (ret)
  3106. break;
  3107. pinned = i + 1;
  3108. reloc_index += exec_list[i].relocation_count;
  3109. }
  3110. /* success */
  3111. if (ret == 0)
  3112. break;
  3113. /* error other than GTT full, or we've already tried again */
  3114. if (ret != -ENOSPC || pin_tries >= 1) {
  3115. if (ret != -ERESTARTSYS) {
  3116. unsigned long long total_size = 0;
  3117. for (i = 0; i < args->buffer_count; i++)
  3118. total_size += object_list[i]->size;
  3119. DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
  3120. pinned+1, args->buffer_count,
  3121. total_size, ret);
  3122. DRM_ERROR("%d objects [%d pinned], "
  3123. "%d object bytes [%d pinned], "
  3124. "%d/%d gtt bytes\n",
  3125. atomic_read(&dev->object_count),
  3126. atomic_read(&dev->pin_count),
  3127. atomic_read(&dev->object_memory),
  3128. atomic_read(&dev->pin_memory),
  3129. atomic_read(&dev->gtt_memory),
  3130. dev->gtt_total);
  3131. }
  3132. goto err;
  3133. }
  3134. /* unpin all of our buffers */
  3135. for (i = 0; i < pinned; i++)
  3136. i915_gem_object_unpin(object_list[i]);
  3137. pinned = 0;
  3138. /* evict everyone we can from the aperture */
  3139. ret = i915_gem_evict_everything(dev);
  3140. if (ret && ret != -ENOSPC)
  3141. goto err;
  3142. }
  3143. /* Set the pending read domains for the batch buffer to COMMAND */
  3144. batch_obj = object_list[args->buffer_count-1];
  3145. if (batch_obj->pending_write_domain) {
  3146. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3147. ret = -EINVAL;
  3148. goto err;
  3149. }
  3150. batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3151. /* Sanity check the batch buffer, prior to moving objects */
  3152. exec_offset = exec_list[args->buffer_count - 1].offset;
  3153. ret = i915_gem_check_execbuffer (args, exec_offset);
  3154. if (ret != 0) {
  3155. DRM_ERROR("execbuf with invalid offset/length\n");
  3156. goto err;
  3157. }
  3158. i915_verify_inactive(dev, __FILE__, __LINE__);
  3159. /* Zero the global flush/invalidate flags. These
  3160. * will be modified as new domains are computed
  3161. * for each object
  3162. */
  3163. dev->invalidate_domains = 0;
  3164. dev->flush_domains = 0;
  3165. for (i = 0; i < args->buffer_count; i++) {
  3166. struct drm_gem_object *obj = object_list[i];
  3167. /* Compute new gpu domains and update invalidate/flush */
  3168. i915_gem_object_set_to_gpu_domain(obj);
  3169. }
  3170. i915_verify_inactive(dev, __FILE__, __LINE__);
  3171. if (dev->invalidate_domains | dev->flush_domains) {
  3172. #if WATCH_EXEC
  3173. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3174. __func__,
  3175. dev->invalidate_domains,
  3176. dev->flush_domains);
  3177. #endif
  3178. i915_gem_flush(dev,
  3179. dev->invalidate_domains,
  3180. dev->flush_domains);
  3181. if (dev->flush_domains)
  3182. (void)i915_add_request(dev, file_priv,
  3183. dev->flush_domains);
  3184. }
  3185. for (i = 0; i < args->buffer_count; i++) {
  3186. struct drm_gem_object *obj = object_list[i];
  3187. uint32_t old_write_domain = obj->write_domain;
  3188. obj->write_domain = obj->pending_write_domain;
  3189. trace_i915_gem_object_change_domain(obj,
  3190. obj->read_domains,
  3191. old_write_domain);
  3192. }
  3193. i915_verify_inactive(dev, __FILE__, __LINE__);
  3194. #if WATCH_COHERENCY
  3195. for (i = 0; i < args->buffer_count; i++) {
  3196. i915_gem_object_check_coherency(object_list[i],
  3197. exec_list[i].handle);
  3198. }
  3199. #endif
  3200. #if WATCH_EXEC
  3201. i915_gem_dump_object(batch_obj,
  3202. args->batch_len,
  3203. __func__,
  3204. ~0);
  3205. #endif
  3206. /* Exec the batchbuffer */
  3207. ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
  3208. if (ret) {
  3209. DRM_ERROR("dispatch failed %d\n", ret);
  3210. goto err;
  3211. }
  3212. /*
  3213. * Ensure that the commands in the batch buffer are
  3214. * finished before the interrupt fires
  3215. */
  3216. flush_domains = i915_retire_commands(dev);
  3217. i915_verify_inactive(dev, __FILE__, __LINE__);
  3218. /*
  3219. * Get a seqno representing the execution of the current buffer,
  3220. * which we can wait on. We would like to mitigate these interrupts,
  3221. * likely by only creating seqnos occasionally (so that we have
  3222. * *some* interrupts representing completion of buffers that we can
  3223. * wait on when trying to clear up gtt space).
  3224. */
  3225. seqno = i915_add_request(dev, file_priv, flush_domains);
  3226. BUG_ON(seqno == 0);
  3227. for (i = 0; i < args->buffer_count; i++) {
  3228. struct drm_gem_object *obj = object_list[i];
  3229. i915_gem_object_move_to_active(obj, seqno);
  3230. #if WATCH_LRU
  3231. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  3232. #endif
  3233. }
  3234. #if WATCH_LRU
  3235. i915_dump_lru(dev, __func__);
  3236. #endif
  3237. i915_verify_inactive(dev, __FILE__, __LINE__);
  3238. err:
  3239. for (i = 0; i < pinned; i++)
  3240. i915_gem_object_unpin(object_list[i]);
  3241. for (i = 0; i < args->buffer_count; i++) {
  3242. if (object_list[i]) {
  3243. obj_priv = object_list[i]->driver_private;
  3244. obj_priv->in_execbuffer = false;
  3245. }
  3246. drm_gem_object_unreference(object_list[i]);
  3247. }
  3248. mutex_unlock(&dev->struct_mutex);
  3249. if (!ret) {
  3250. /* Copy the new buffer offsets back to the user's exec list. */
  3251. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3252. (uintptr_t) args->buffers_ptr,
  3253. exec_list,
  3254. sizeof(*exec_list) * args->buffer_count);
  3255. if (ret) {
  3256. ret = -EFAULT;
  3257. DRM_ERROR("failed to copy %d exec entries "
  3258. "back to user (%d)\n",
  3259. args->buffer_count, ret);
  3260. }
  3261. }
  3262. /* Copy the updated relocations out regardless of current error
  3263. * state. Failure to update the relocs would mean that the next
  3264. * time userland calls execbuf, it would do so with presumed offset
  3265. * state that didn't match the actual object state.
  3266. */
  3267. ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
  3268. relocs);
  3269. if (ret2 != 0) {
  3270. DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
  3271. if (ret == 0)
  3272. ret = ret2;
  3273. }
  3274. pre_mutex_err:
  3275. drm_free_large(object_list);
  3276. drm_free_large(exec_list);
  3277. kfree(cliprects);
  3278. return ret;
  3279. }
  3280. int
  3281. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  3282. {
  3283. struct drm_device *dev = obj->dev;
  3284. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3285. int ret;
  3286. i915_verify_inactive(dev, __FILE__, __LINE__);
  3287. if (obj_priv->gtt_space == NULL) {
  3288. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  3289. if (ret != 0) {
  3290. if (ret != -EBUSY && ret != -ERESTARTSYS)
  3291. DRM_ERROR("Failure to bind: %d\n", ret);
  3292. return ret;
  3293. }
  3294. }
  3295. /*
  3296. * Pre-965 chips need a fence register set up in order to
  3297. * properly handle tiled surfaces.
  3298. */
  3299. if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
  3300. ret = i915_gem_object_get_fence_reg(obj);
  3301. if (ret != 0) {
  3302. if (ret != -EBUSY && ret != -ERESTARTSYS)
  3303. DRM_ERROR("Failure to install fence: %d\n",
  3304. ret);
  3305. return ret;
  3306. }
  3307. }
  3308. obj_priv->pin_count++;
  3309. /* If the object is not active and not pending a flush,
  3310. * remove it from the inactive list
  3311. */
  3312. if (obj_priv->pin_count == 1) {
  3313. atomic_inc(&dev->pin_count);
  3314. atomic_add(obj->size, &dev->pin_memory);
  3315. if (!obj_priv->active &&
  3316. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
  3317. !list_empty(&obj_priv->list))
  3318. list_del_init(&obj_priv->list);
  3319. }
  3320. i915_verify_inactive(dev, __FILE__, __LINE__);
  3321. return 0;
  3322. }
  3323. void
  3324. i915_gem_object_unpin(struct drm_gem_object *obj)
  3325. {
  3326. struct drm_device *dev = obj->dev;
  3327. drm_i915_private_t *dev_priv = dev->dev_private;
  3328. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3329. i915_verify_inactive(dev, __FILE__, __LINE__);
  3330. obj_priv->pin_count--;
  3331. BUG_ON(obj_priv->pin_count < 0);
  3332. BUG_ON(obj_priv->gtt_space == NULL);
  3333. /* If the object is no longer pinned, and is
  3334. * neither active nor being flushed, then stick it on
  3335. * the inactive list
  3336. */
  3337. if (obj_priv->pin_count == 0) {
  3338. if (!obj_priv->active &&
  3339. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  3340. list_move_tail(&obj_priv->list,
  3341. &dev_priv->mm.inactive_list);
  3342. atomic_dec(&dev->pin_count);
  3343. atomic_sub(obj->size, &dev->pin_memory);
  3344. }
  3345. i915_verify_inactive(dev, __FILE__, __LINE__);
  3346. }
  3347. int
  3348. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3349. struct drm_file *file_priv)
  3350. {
  3351. struct drm_i915_gem_pin *args = data;
  3352. struct drm_gem_object *obj;
  3353. struct drm_i915_gem_object *obj_priv;
  3354. int ret;
  3355. mutex_lock(&dev->struct_mutex);
  3356. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3357. if (obj == NULL) {
  3358. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  3359. args->handle);
  3360. mutex_unlock(&dev->struct_mutex);
  3361. return -EBADF;
  3362. }
  3363. obj_priv = obj->driver_private;
  3364. if (obj_priv->madv == I915_MADV_DONTNEED) {
  3365. DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n");
  3366. drm_gem_object_unreference(obj);
  3367. mutex_unlock(&dev->struct_mutex);
  3368. return -EINVAL;
  3369. }
  3370. if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
  3371. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3372. args->handle);
  3373. drm_gem_object_unreference(obj);
  3374. mutex_unlock(&dev->struct_mutex);
  3375. return -EINVAL;
  3376. }
  3377. obj_priv->user_pin_count++;
  3378. obj_priv->pin_filp = file_priv;
  3379. if (obj_priv->user_pin_count == 1) {
  3380. ret = i915_gem_object_pin(obj, args->alignment);
  3381. if (ret != 0) {
  3382. drm_gem_object_unreference(obj);
  3383. mutex_unlock(&dev->struct_mutex);
  3384. return ret;
  3385. }
  3386. }
  3387. /* XXX - flush the CPU caches for pinned objects
  3388. * as the X server doesn't manage domains yet
  3389. */
  3390. i915_gem_object_flush_cpu_write_domain(obj);
  3391. args->offset = obj_priv->gtt_offset;
  3392. drm_gem_object_unreference(obj);
  3393. mutex_unlock(&dev->struct_mutex);
  3394. return 0;
  3395. }
  3396. int
  3397. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3398. struct drm_file *file_priv)
  3399. {
  3400. struct drm_i915_gem_pin *args = data;
  3401. struct drm_gem_object *obj;
  3402. struct drm_i915_gem_object *obj_priv;
  3403. mutex_lock(&dev->struct_mutex);
  3404. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3405. if (obj == NULL) {
  3406. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  3407. args->handle);
  3408. mutex_unlock(&dev->struct_mutex);
  3409. return -EBADF;
  3410. }
  3411. obj_priv = obj->driver_private;
  3412. if (obj_priv->pin_filp != file_priv) {
  3413. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3414. args->handle);
  3415. drm_gem_object_unreference(obj);
  3416. mutex_unlock(&dev->struct_mutex);
  3417. return -EINVAL;
  3418. }
  3419. obj_priv->user_pin_count--;
  3420. if (obj_priv->user_pin_count == 0) {
  3421. obj_priv->pin_filp = NULL;
  3422. i915_gem_object_unpin(obj);
  3423. }
  3424. drm_gem_object_unreference(obj);
  3425. mutex_unlock(&dev->struct_mutex);
  3426. return 0;
  3427. }
  3428. int
  3429. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3430. struct drm_file *file_priv)
  3431. {
  3432. struct drm_i915_gem_busy *args = data;
  3433. struct drm_gem_object *obj;
  3434. struct drm_i915_gem_object *obj_priv;
  3435. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3436. if (obj == NULL) {
  3437. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  3438. args->handle);
  3439. return -EBADF;
  3440. }
  3441. mutex_lock(&dev->struct_mutex);
  3442. /* Update the active list for the hardware's current position.
  3443. * Otherwise this only updates on a delayed timer or when irqs are
  3444. * actually unmasked, and our working set ends up being larger than
  3445. * required.
  3446. */
  3447. i915_gem_retire_requests(dev);
  3448. obj_priv = obj->driver_private;
  3449. /* Don't count being on the flushing list against the object being
  3450. * done. Otherwise, a buffer left on the flushing list but not getting
  3451. * flushed (because nobody's flushing that domain) won't ever return
  3452. * unbusy and get reused by libdrm's bo cache. The other expected
  3453. * consumer of this interface, OpenGL's occlusion queries, also specs
  3454. * that the objects get unbusy "eventually" without any interference.
  3455. */
  3456. args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
  3457. drm_gem_object_unreference(obj);
  3458. mutex_unlock(&dev->struct_mutex);
  3459. return 0;
  3460. }
  3461. int
  3462. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3463. struct drm_file *file_priv)
  3464. {
  3465. return i915_gem_ring_throttle(dev, file_priv);
  3466. }
  3467. int
  3468. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3469. struct drm_file *file_priv)
  3470. {
  3471. struct drm_i915_gem_madvise *args = data;
  3472. struct drm_gem_object *obj;
  3473. struct drm_i915_gem_object *obj_priv;
  3474. switch (args->madv) {
  3475. case I915_MADV_DONTNEED:
  3476. case I915_MADV_WILLNEED:
  3477. break;
  3478. default:
  3479. return -EINVAL;
  3480. }
  3481. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3482. if (obj == NULL) {
  3483. DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
  3484. args->handle);
  3485. return -EBADF;
  3486. }
  3487. mutex_lock(&dev->struct_mutex);
  3488. obj_priv = obj->driver_private;
  3489. if (obj_priv->pin_count) {
  3490. drm_gem_object_unreference(obj);
  3491. mutex_unlock(&dev->struct_mutex);
  3492. DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
  3493. return -EINVAL;
  3494. }
  3495. obj_priv->madv = args->madv;
  3496. args->retained = obj_priv->gtt_space != NULL;
  3497. drm_gem_object_unreference(obj);
  3498. mutex_unlock(&dev->struct_mutex);
  3499. return 0;
  3500. }
  3501. int i915_gem_init_object(struct drm_gem_object *obj)
  3502. {
  3503. struct drm_i915_gem_object *obj_priv;
  3504. obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
  3505. if (obj_priv == NULL)
  3506. return -ENOMEM;
  3507. /*
  3508. * We've just allocated pages from the kernel,
  3509. * so they've just been written by the CPU with
  3510. * zeros. They'll need to be clflushed before we
  3511. * use them with the GPU.
  3512. */
  3513. obj->write_domain = I915_GEM_DOMAIN_CPU;
  3514. obj->read_domains = I915_GEM_DOMAIN_CPU;
  3515. obj_priv->agp_type = AGP_USER_MEMORY;
  3516. obj->driver_private = obj_priv;
  3517. obj_priv->obj = obj;
  3518. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  3519. INIT_LIST_HEAD(&obj_priv->list);
  3520. INIT_LIST_HEAD(&obj_priv->fence_list);
  3521. obj_priv->madv = I915_MADV_WILLNEED;
  3522. trace_i915_gem_object_create(obj);
  3523. return 0;
  3524. }
  3525. void i915_gem_free_object(struct drm_gem_object *obj)
  3526. {
  3527. struct drm_device *dev = obj->dev;
  3528. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3529. trace_i915_gem_object_destroy(obj);
  3530. while (obj_priv->pin_count > 0)
  3531. i915_gem_object_unpin(obj);
  3532. if (obj_priv->phys_obj)
  3533. i915_gem_detach_phys_object(dev, obj);
  3534. i915_gem_object_unbind(obj);
  3535. if (obj_priv->mmap_offset)
  3536. i915_gem_free_mmap_offset(obj);
  3537. kfree(obj_priv->page_cpu_valid);
  3538. kfree(obj_priv->bit_17);
  3539. kfree(obj->driver_private);
  3540. }
  3541. /** Unbinds all inactive objects. */
  3542. static int
  3543. i915_gem_evict_from_inactive_list(struct drm_device *dev)
  3544. {
  3545. drm_i915_private_t *dev_priv = dev->dev_private;
  3546. while (!list_empty(&dev_priv->mm.inactive_list)) {
  3547. struct drm_gem_object *obj;
  3548. int ret;
  3549. obj = list_first_entry(&dev_priv->mm.inactive_list,
  3550. struct drm_i915_gem_object,
  3551. list)->obj;
  3552. ret = i915_gem_object_unbind(obj);
  3553. if (ret != 0) {
  3554. DRM_ERROR("Error unbinding object: %d\n", ret);
  3555. return ret;
  3556. }
  3557. }
  3558. return 0;
  3559. }
  3560. int
  3561. i915_gem_idle(struct drm_device *dev)
  3562. {
  3563. drm_i915_private_t *dev_priv = dev->dev_private;
  3564. uint32_t seqno, cur_seqno, last_seqno;
  3565. int stuck, ret;
  3566. mutex_lock(&dev->struct_mutex);
  3567. if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
  3568. mutex_unlock(&dev->struct_mutex);
  3569. return 0;
  3570. }
  3571. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3572. * We need to replace this with a semaphore, or something.
  3573. */
  3574. dev_priv->mm.suspended = 1;
  3575. del_timer(&dev_priv->hangcheck_timer);
  3576. /* Cancel the retire work handler, wait for it to finish if running
  3577. */
  3578. mutex_unlock(&dev->struct_mutex);
  3579. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3580. mutex_lock(&dev->struct_mutex);
  3581. i915_kernel_lost_context(dev);
  3582. /* Flush the GPU along with all non-CPU write domains
  3583. */
  3584. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  3585. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  3586. if (seqno == 0) {
  3587. mutex_unlock(&dev->struct_mutex);
  3588. return -ENOMEM;
  3589. }
  3590. dev_priv->mm.waiting_gem_seqno = seqno;
  3591. last_seqno = 0;
  3592. stuck = 0;
  3593. for (;;) {
  3594. cur_seqno = i915_get_gem_seqno(dev);
  3595. if (i915_seqno_passed(cur_seqno, seqno))
  3596. break;
  3597. if (last_seqno == cur_seqno) {
  3598. if (stuck++ > 100) {
  3599. DRM_ERROR("hardware wedged\n");
  3600. atomic_set(&dev_priv->mm.wedged, 1);
  3601. DRM_WAKEUP(&dev_priv->irq_queue);
  3602. break;
  3603. }
  3604. }
  3605. msleep(10);
  3606. last_seqno = cur_seqno;
  3607. }
  3608. dev_priv->mm.waiting_gem_seqno = 0;
  3609. i915_gem_retire_requests(dev);
  3610. spin_lock(&dev_priv->mm.active_list_lock);
  3611. if (!atomic_read(&dev_priv->mm.wedged)) {
  3612. /* Active and flushing should now be empty as we've
  3613. * waited for a sequence higher than any pending execbuffer
  3614. */
  3615. WARN_ON(!list_empty(&dev_priv->mm.active_list));
  3616. WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
  3617. /* Request should now be empty as we've also waited
  3618. * for the last request in the list
  3619. */
  3620. WARN_ON(!list_empty(&dev_priv->mm.request_list));
  3621. }
  3622. /* Empty the active and flushing lists to inactive. If there's
  3623. * anything left at this point, it means that we're wedged and
  3624. * nothing good's going to happen by leaving them there. So strip
  3625. * the GPU domains and just stuff them onto inactive.
  3626. */
  3627. while (!list_empty(&dev_priv->mm.active_list)) {
  3628. struct drm_gem_object *obj;
  3629. uint32_t old_write_domain;
  3630. obj = list_first_entry(&dev_priv->mm.active_list,
  3631. struct drm_i915_gem_object,
  3632. list)->obj;
  3633. old_write_domain = obj->write_domain;
  3634. obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  3635. i915_gem_object_move_to_inactive(obj);
  3636. trace_i915_gem_object_change_domain(obj,
  3637. obj->read_domains,
  3638. old_write_domain);
  3639. }
  3640. spin_unlock(&dev_priv->mm.active_list_lock);
  3641. while (!list_empty(&dev_priv->mm.flushing_list)) {
  3642. struct drm_gem_object *obj;
  3643. uint32_t old_write_domain;
  3644. obj = list_first_entry(&dev_priv->mm.flushing_list,
  3645. struct drm_i915_gem_object,
  3646. list)->obj;
  3647. old_write_domain = obj->write_domain;
  3648. obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  3649. i915_gem_object_move_to_inactive(obj);
  3650. trace_i915_gem_object_change_domain(obj,
  3651. obj->read_domains,
  3652. old_write_domain);
  3653. }
  3654. /* Move all inactive buffers out of the GTT. */
  3655. ret = i915_gem_evict_from_inactive_list(dev);
  3656. WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
  3657. if (ret) {
  3658. mutex_unlock(&dev->struct_mutex);
  3659. return ret;
  3660. }
  3661. i915_gem_cleanup_ringbuffer(dev);
  3662. mutex_unlock(&dev->struct_mutex);
  3663. return 0;
  3664. }
  3665. static int
  3666. i915_gem_init_hws(struct drm_device *dev)
  3667. {
  3668. drm_i915_private_t *dev_priv = dev->dev_private;
  3669. struct drm_gem_object *obj;
  3670. struct drm_i915_gem_object *obj_priv;
  3671. int ret;
  3672. /* If we need a physical address for the status page, it's already
  3673. * initialized at driver load time.
  3674. */
  3675. if (!I915_NEED_GFX_HWS(dev))
  3676. return 0;
  3677. obj = drm_gem_object_alloc(dev, 4096);
  3678. if (obj == NULL) {
  3679. DRM_ERROR("Failed to allocate status page\n");
  3680. return -ENOMEM;
  3681. }
  3682. obj_priv = obj->driver_private;
  3683. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  3684. ret = i915_gem_object_pin(obj, 4096);
  3685. if (ret != 0) {
  3686. drm_gem_object_unreference(obj);
  3687. return ret;
  3688. }
  3689. dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  3690. dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
  3691. if (dev_priv->hw_status_page == NULL) {
  3692. DRM_ERROR("Failed to map status page.\n");
  3693. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3694. i915_gem_object_unpin(obj);
  3695. drm_gem_object_unreference(obj);
  3696. return -EINVAL;
  3697. }
  3698. dev_priv->hws_obj = obj;
  3699. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  3700. I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
  3701. I915_READ(HWS_PGA); /* posting read */
  3702. DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  3703. return 0;
  3704. }
  3705. static void
  3706. i915_gem_cleanup_hws(struct drm_device *dev)
  3707. {
  3708. drm_i915_private_t *dev_priv = dev->dev_private;
  3709. struct drm_gem_object *obj;
  3710. struct drm_i915_gem_object *obj_priv;
  3711. if (dev_priv->hws_obj == NULL)
  3712. return;
  3713. obj = dev_priv->hws_obj;
  3714. obj_priv = obj->driver_private;
  3715. kunmap(obj_priv->pages[0]);
  3716. i915_gem_object_unpin(obj);
  3717. drm_gem_object_unreference(obj);
  3718. dev_priv->hws_obj = NULL;
  3719. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3720. dev_priv->hw_status_page = NULL;
  3721. /* Write high address into HWS_PGA when disabling. */
  3722. I915_WRITE(HWS_PGA, 0x1ffff000);
  3723. }
  3724. int
  3725. i915_gem_init_ringbuffer(struct drm_device *dev)
  3726. {
  3727. drm_i915_private_t *dev_priv = dev->dev_private;
  3728. struct drm_gem_object *obj;
  3729. struct drm_i915_gem_object *obj_priv;
  3730. drm_i915_ring_buffer_t *ring = &dev_priv->ring;
  3731. int ret;
  3732. u32 head;
  3733. ret = i915_gem_init_hws(dev);
  3734. if (ret != 0)
  3735. return ret;
  3736. obj = drm_gem_object_alloc(dev, 128 * 1024);
  3737. if (obj == NULL) {
  3738. DRM_ERROR("Failed to allocate ringbuffer\n");
  3739. i915_gem_cleanup_hws(dev);
  3740. return -ENOMEM;
  3741. }
  3742. obj_priv = obj->driver_private;
  3743. ret = i915_gem_object_pin(obj, 4096);
  3744. if (ret != 0) {
  3745. drm_gem_object_unreference(obj);
  3746. i915_gem_cleanup_hws(dev);
  3747. return ret;
  3748. }
  3749. /* Set up the kernel mapping for the ring. */
  3750. ring->Size = obj->size;
  3751. ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
  3752. ring->map.size = obj->size;
  3753. ring->map.type = 0;
  3754. ring->map.flags = 0;
  3755. ring->map.mtrr = 0;
  3756. drm_core_ioremap_wc(&ring->map, dev);
  3757. if (ring->map.handle == NULL) {
  3758. DRM_ERROR("Failed to map ringbuffer.\n");
  3759. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3760. i915_gem_object_unpin(obj);
  3761. drm_gem_object_unreference(obj);
  3762. i915_gem_cleanup_hws(dev);
  3763. return -EINVAL;
  3764. }
  3765. ring->ring_obj = obj;
  3766. ring->virtual_start = ring->map.handle;
  3767. /* Stop the ring if it's running. */
  3768. I915_WRITE(PRB0_CTL, 0);
  3769. I915_WRITE(PRB0_TAIL, 0);
  3770. I915_WRITE(PRB0_HEAD, 0);
  3771. /* Initialize the ring. */
  3772. I915_WRITE(PRB0_START, obj_priv->gtt_offset);
  3773. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3774. /* G45 ring initialization fails to reset head to zero */
  3775. if (head != 0) {
  3776. DRM_ERROR("Ring head not reset to zero "
  3777. "ctl %08x head %08x tail %08x start %08x\n",
  3778. I915_READ(PRB0_CTL),
  3779. I915_READ(PRB0_HEAD),
  3780. I915_READ(PRB0_TAIL),
  3781. I915_READ(PRB0_START));
  3782. I915_WRITE(PRB0_HEAD, 0);
  3783. DRM_ERROR("Ring head forced to zero "
  3784. "ctl %08x head %08x tail %08x start %08x\n",
  3785. I915_READ(PRB0_CTL),
  3786. I915_READ(PRB0_HEAD),
  3787. I915_READ(PRB0_TAIL),
  3788. I915_READ(PRB0_START));
  3789. }
  3790. I915_WRITE(PRB0_CTL,
  3791. ((obj->size - 4096) & RING_NR_PAGES) |
  3792. RING_NO_REPORT |
  3793. RING_VALID);
  3794. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3795. /* If the head is still not zero, the ring is dead */
  3796. if (head != 0) {
  3797. DRM_ERROR("Ring initialization failed "
  3798. "ctl %08x head %08x tail %08x start %08x\n",
  3799. I915_READ(PRB0_CTL),
  3800. I915_READ(PRB0_HEAD),
  3801. I915_READ(PRB0_TAIL),
  3802. I915_READ(PRB0_START));
  3803. return -EIO;
  3804. }
  3805. /* Update our cache of the ring state */
  3806. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3807. i915_kernel_lost_context(dev);
  3808. else {
  3809. ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3810. ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
  3811. ring->space = ring->head - (ring->tail + 8);
  3812. if (ring->space < 0)
  3813. ring->space += ring->Size;
  3814. }
  3815. return 0;
  3816. }
  3817. void
  3818. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3819. {
  3820. drm_i915_private_t *dev_priv = dev->dev_private;
  3821. if (dev_priv->ring.ring_obj == NULL)
  3822. return;
  3823. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  3824. i915_gem_object_unpin(dev_priv->ring.ring_obj);
  3825. drm_gem_object_unreference(dev_priv->ring.ring_obj);
  3826. dev_priv->ring.ring_obj = NULL;
  3827. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3828. i915_gem_cleanup_hws(dev);
  3829. }
  3830. int
  3831. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3832. struct drm_file *file_priv)
  3833. {
  3834. drm_i915_private_t *dev_priv = dev->dev_private;
  3835. int ret;
  3836. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3837. return 0;
  3838. if (atomic_read(&dev_priv->mm.wedged)) {
  3839. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3840. atomic_set(&dev_priv->mm.wedged, 0);
  3841. }
  3842. mutex_lock(&dev->struct_mutex);
  3843. dev_priv->mm.suspended = 0;
  3844. ret = i915_gem_init_ringbuffer(dev);
  3845. if (ret != 0) {
  3846. mutex_unlock(&dev->struct_mutex);
  3847. return ret;
  3848. }
  3849. spin_lock(&dev_priv->mm.active_list_lock);
  3850. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  3851. spin_unlock(&dev_priv->mm.active_list_lock);
  3852. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  3853. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  3854. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  3855. mutex_unlock(&dev->struct_mutex);
  3856. drm_irq_install(dev);
  3857. return 0;
  3858. }
  3859. int
  3860. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3861. struct drm_file *file_priv)
  3862. {
  3863. int ret;
  3864. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3865. return 0;
  3866. ret = i915_gem_idle(dev);
  3867. drm_irq_uninstall(dev);
  3868. return ret;
  3869. }
  3870. void
  3871. i915_gem_lastclose(struct drm_device *dev)
  3872. {
  3873. int ret;
  3874. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3875. return;
  3876. ret = i915_gem_idle(dev);
  3877. if (ret)
  3878. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3879. }
  3880. void
  3881. i915_gem_load(struct drm_device *dev)
  3882. {
  3883. int i;
  3884. drm_i915_private_t *dev_priv = dev->dev_private;
  3885. spin_lock_init(&dev_priv->mm.active_list_lock);
  3886. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  3887. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  3888. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3889. INIT_LIST_HEAD(&dev_priv->mm.request_list);
  3890. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3891. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3892. i915_gem_retire_work_handler);
  3893. dev_priv->mm.next_gem_seqno = 1;
  3894. spin_lock(&shrink_list_lock);
  3895. list_add(&dev_priv->mm.shrink_list, &shrink_list);
  3896. spin_unlock(&shrink_list_lock);
  3897. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3898. dev_priv->fence_reg_start = 3;
  3899. if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3900. dev_priv->num_fence_regs = 16;
  3901. else
  3902. dev_priv->num_fence_regs = 8;
  3903. /* Initialize fence registers to zero */
  3904. if (IS_I965G(dev)) {
  3905. for (i = 0; i < 16; i++)
  3906. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  3907. } else {
  3908. for (i = 0; i < 8; i++)
  3909. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  3910. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3911. for (i = 0; i < 8; i++)
  3912. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  3913. }
  3914. i915_gem_detect_bit_6_swizzle(dev);
  3915. }
  3916. /*
  3917. * Create a physically contiguous memory object for this object
  3918. * e.g. for cursor + overlay regs
  3919. */
  3920. int i915_gem_init_phys_object(struct drm_device *dev,
  3921. int id, int size)
  3922. {
  3923. drm_i915_private_t *dev_priv = dev->dev_private;
  3924. struct drm_i915_gem_phys_object *phys_obj;
  3925. int ret;
  3926. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3927. return 0;
  3928. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3929. if (!phys_obj)
  3930. return -ENOMEM;
  3931. phys_obj->id = id;
  3932. phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
  3933. if (!phys_obj->handle) {
  3934. ret = -ENOMEM;
  3935. goto kfree_obj;
  3936. }
  3937. #ifdef CONFIG_X86
  3938. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3939. #endif
  3940. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3941. return 0;
  3942. kfree_obj:
  3943. kfree(phys_obj);
  3944. return ret;
  3945. }
  3946. void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3947. {
  3948. drm_i915_private_t *dev_priv = dev->dev_private;
  3949. struct drm_i915_gem_phys_object *phys_obj;
  3950. if (!dev_priv->mm.phys_objs[id - 1])
  3951. return;
  3952. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3953. if (phys_obj->cur_obj) {
  3954. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3955. }
  3956. #ifdef CONFIG_X86
  3957. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3958. #endif
  3959. drm_pci_free(dev, phys_obj->handle);
  3960. kfree(phys_obj);
  3961. dev_priv->mm.phys_objs[id - 1] = NULL;
  3962. }
  3963. void i915_gem_free_all_phys_object(struct drm_device *dev)
  3964. {
  3965. int i;
  3966. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  3967. i915_gem_free_phys_object(dev, i);
  3968. }
  3969. void i915_gem_detach_phys_object(struct drm_device *dev,
  3970. struct drm_gem_object *obj)
  3971. {
  3972. struct drm_i915_gem_object *obj_priv;
  3973. int i;
  3974. int ret;
  3975. int page_count;
  3976. obj_priv = obj->driver_private;
  3977. if (!obj_priv->phys_obj)
  3978. return;
  3979. ret = i915_gem_object_get_pages(obj);
  3980. if (ret)
  3981. goto out;
  3982. page_count = obj->size / PAGE_SIZE;
  3983. for (i = 0; i < page_count; i++) {
  3984. char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
  3985. char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  3986. memcpy(dst, src, PAGE_SIZE);
  3987. kunmap_atomic(dst, KM_USER0);
  3988. }
  3989. drm_clflush_pages(obj_priv->pages, page_count);
  3990. drm_agp_chipset_flush(dev);
  3991. i915_gem_object_put_pages(obj);
  3992. out:
  3993. obj_priv->phys_obj->cur_obj = NULL;
  3994. obj_priv->phys_obj = NULL;
  3995. }
  3996. int
  3997. i915_gem_attach_phys_object(struct drm_device *dev,
  3998. struct drm_gem_object *obj, int id)
  3999. {
  4000. drm_i915_private_t *dev_priv = dev->dev_private;
  4001. struct drm_i915_gem_object *obj_priv;
  4002. int ret = 0;
  4003. int page_count;
  4004. int i;
  4005. if (id > I915_MAX_PHYS_OBJECT)
  4006. return -EINVAL;
  4007. obj_priv = obj->driver_private;
  4008. if (obj_priv->phys_obj) {
  4009. if (obj_priv->phys_obj->id == id)
  4010. return 0;
  4011. i915_gem_detach_phys_object(dev, obj);
  4012. }
  4013. /* create a new object */
  4014. if (!dev_priv->mm.phys_objs[id - 1]) {
  4015. ret = i915_gem_init_phys_object(dev, id,
  4016. obj->size);
  4017. if (ret) {
  4018. DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
  4019. goto out;
  4020. }
  4021. }
  4022. /* bind to the object */
  4023. obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4024. obj_priv->phys_obj->cur_obj = obj;
  4025. ret = i915_gem_object_get_pages(obj);
  4026. if (ret) {
  4027. DRM_ERROR("failed to get page list\n");
  4028. goto out;
  4029. }
  4030. page_count = obj->size / PAGE_SIZE;
  4031. for (i = 0; i < page_count; i++) {
  4032. char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4033. char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4034. memcpy(dst, src, PAGE_SIZE);
  4035. kunmap_atomic(src, KM_USER0);
  4036. }
  4037. i915_gem_object_put_pages(obj);
  4038. return 0;
  4039. out:
  4040. return ret;
  4041. }
  4042. static int
  4043. i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  4044. struct drm_i915_gem_pwrite *args,
  4045. struct drm_file *file_priv)
  4046. {
  4047. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  4048. void *obj_addr;
  4049. int ret;
  4050. char __user *user_data;
  4051. user_data = (char __user *) (uintptr_t) args->data_ptr;
  4052. obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  4053. DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
  4054. ret = copy_from_user(obj_addr, user_data, args->size);
  4055. if (ret)
  4056. return -EFAULT;
  4057. drm_agp_chipset_flush(dev);
  4058. return 0;
  4059. }
  4060. void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
  4061. {
  4062. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  4063. /* Clean up our request list when the client is going away, so that
  4064. * later retire_requests won't dereference our soon-to-be-gone
  4065. * file_priv.
  4066. */
  4067. mutex_lock(&dev->struct_mutex);
  4068. while (!list_empty(&i915_file_priv->mm.request_list))
  4069. list_del_init(i915_file_priv->mm.request_list.next);
  4070. mutex_unlock(&dev->struct_mutex);
  4071. }
  4072. /* Immediately discard the backing storage */
  4073. static void
  4074. i915_gem_object_truncate(struct drm_gem_object *obj)
  4075. {
  4076. struct inode *inode;
  4077. inode = obj->filp->f_path.dentry->d_inode;
  4078. mutex_lock(&inode->i_mutex);
  4079. truncate_inode_pages(inode->i_mapping, 0);
  4080. mutex_unlock(&inode->i_mutex);
  4081. }
  4082. static int
  4083. i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
  4084. {
  4085. drm_i915_private_t *dev_priv, *next_dev;
  4086. struct drm_i915_gem_object *obj_priv, *next_obj;
  4087. int cnt = 0;
  4088. int would_deadlock = 1;
  4089. /* "fast-path" to count number of available objects */
  4090. if (nr_to_scan == 0) {
  4091. spin_lock(&shrink_list_lock);
  4092. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4093. struct drm_device *dev = dev_priv->dev;
  4094. if (mutex_trylock(&dev->struct_mutex)) {
  4095. list_for_each_entry(obj_priv,
  4096. &dev_priv->mm.inactive_list,
  4097. list)
  4098. cnt++;
  4099. mutex_unlock(&dev->struct_mutex);
  4100. }
  4101. }
  4102. spin_unlock(&shrink_list_lock);
  4103. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4104. }
  4105. spin_lock(&shrink_list_lock);
  4106. /* first scan for clean buffers */
  4107. list_for_each_entry_safe(dev_priv, next_dev,
  4108. &shrink_list, mm.shrink_list) {
  4109. struct drm_device *dev = dev_priv->dev;
  4110. if (! mutex_trylock(&dev->struct_mutex))
  4111. continue;
  4112. spin_unlock(&shrink_list_lock);
  4113. i915_gem_retire_requests(dev);
  4114. list_for_each_entry_safe(obj_priv, next_obj,
  4115. &dev_priv->mm.inactive_list,
  4116. list) {
  4117. if (i915_gem_object_is_purgeable(obj_priv)) {
  4118. struct drm_gem_object *obj = obj_priv->obj;
  4119. i915_gem_object_unbind(obj);
  4120. i915_gem_object_truncate(obj);
  4121. if (--nr_to_scan <= 0)
  4122. break;
  4123. }
  4124. }
  4125. spin_lock(&shrink_list_lock);
  4126. mutex_unlock(&dev->struct_mutex);
  4127. if (nr_to_scan <= 0)
  4128. break;
  4129. }
  4130. /* second pass, evict/count anything still on the inactive list */
  4131. list_for_each_entry_safe(dev_priv, next_dev,
  4132. &shrink_list, mm.shrink_list) {
  4133. struct drm_device *dev = dev_priv->dev;
  4134. if (! mutex_trylock(&dev->struct_mutex))
  4135. continue;
  4136. spin_unlock(&shrink_list_lock);
  4137. list_for_each_entry_safe(obj_priv, next_obj,
  4138. &dev_priv->mm.inactive_list,
  4139. list) {
  4140. if (nr_to_scan > 0) {
  4141. struct drm_gem_object *obj = obj_priv->obj;
  4142. i915_gem_object_unbind(obj);
  4143. if (i915_gem_object_is_purgeable(obj_priv))
  4144. i915_gem_object_truncate(obj);
  4145. nr_to_scan--;
  4146. } else
  4147. cnt++;
  4148. }
  4149. spin_lock(&shrink_list_lock);
  4150. mutex_unlock(&dev->struct_mutex);
  4151. would_deadlock = 0;
  4152. }
  4153. spin_unlock(&shrink_list_lock);
  4154. if (would_deadlock)
  4155. return -1;
  4156. else if (cnt > 0)
  4157. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4158. else
  4159. return 0;
  4160. }
  4161. static struct shrinker shrinker = {
  4162. .shrink = i915_gem_shrink,
  4163. .seeks = DEFAULT_SEEKS,
  4164. };
  4165. __init void
  4166. i915_gem_shrinker_init(void)
  4167. {
  4168. register_shrinker(&shrinker);
  4169. }
  4170. __exit void
  4171. i915_gem_shrinker_exit(void)
  4172. {
  4173. unregister_shrinker(&shrinker);
  4174. }