i915_gem.c 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/shmem_fs.h>
  34. #include <linux/slab.h>
  35. #include <linux/swap.h>
  36. #include <linux/pci.h>
  37. #include <linux/dma-buf.h>
  38. static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
  39. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  40. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  41. static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  42. unsigned alignment,
  43. bool map_and_fenceable);
  44. static int i915_gem_phys_pwrite(struct drm_device *dev,
  45. struct drm_i915_gem_object *obj,
  46. struct drm_i915_gem_pwrite *args,
  47. struct drm_file *file);
  48. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  49. struct drm_i915_gem_object *obj);
  50. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  51. struct drm_i915_fence_reg *fence,
  52. bool enable);
  53. static int i915_gem_inactive_shrink(struct shrinker *shrinker,
  54. struct shrink_control *sc);
  55. static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  56. static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
  57. {
  58. if (obj->tiling_mode)
  59. i915_gem_release_mmap(obj);
  60. /* As we do not have an associated fence register, we will force
  61. * a tiling change if we ever need to acquire one.
  62. */
  63. obj->fence_dirty = false;
  64. obj->fence_reg = I915_FENCE_REG_NONE;
  65. }
  66. /* some bookkeeping */
  67. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  68. size_t size)
  69. {
  70. dev_priv->mm.object_count++;
  71. dev_priv->mm.object_memory += size;
  72. }
  73. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  74. size_t size)
  75. {
  76. dev_priv->mm.object_count--;
  77. dev_priv->mm.object_memory -= size;
  78. }
  79. static int
  80. i915_gem_wait_for_error(struct drm_device *dev)
  81. {
  82. struct drm_i915_private *dev_priv = dev->dev_private;
  83. struct completion *x = &dev_priv->error_completion;
  84. unsigned long flags;
  85. int ret;
  86. if (!atomic_read(&dev_priv->mm.wedged))
  87. return 0;
  88. /*
  89. * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  90. * userspace. If it takes that long something really bad is going on and
  91. * we should simply try to bail out and fail as gracefully as possible.
  92. */
  93. ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
  94. if (ret == 0) {
  95. DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  96. return -EIO;
  97. } else if (ret < 0) {
  98. return ret;
  99. }
  100. if (atomic_read(&dev_priv->mm.wedged)) {
  101. /* GPU is hung, bump the completion count to account for
  102. * the token we just consumed so that we never hit zero and
  103. * end up waiting upon a subsequent completion event that
  104. * will never happen.
  105. */
  106. spin_lock_irqsave(&x->wait.lock, flags);
  107. x->done++;
  108. spin_unlock_irqrestore(&x->wait.lock, flags);
  109. }
  110. return 0;
  111. }
  112. int i915_mutex_lock_interruptible(struct drm_device *dev)
  113. {
  114. int ret;
  115. ret = i915_gem_wait_for_error(dev);
  116. if (ret)
  117. return ret;
  118. ret = mutex_lock_interruptible(&dev->struct_mutex);
  119. if (ret)
  120. return ret;
  121. WARN_ON(i915_verify_lists(dev));
  122. return 0;
  123. }
  124. static inline bool
  125. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  126. {
  127. return !obj->active;
  128. }
  129. int
  130. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  131. struct drm_file *file)
  132. {
  133. struct drm_i915_gem_init *args = data;
  134. if (drm_core_check_feature(dev, DRIVER_MODESET))
  135. return -ENODEV;
  136. if (args->gtt_start >= args->gtt_end ||
  137. (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
  138. return -EINVAL;
  139. /* GEM with user mode setting was never supported on ilk and later. */
  140. if (INTEL_INFO(dev)->gen >= 5)
  141. return -ENODEV;
  142. mutex_lock(&dev->struct_mutex);
  143. i915_gem_init_global_gtt(dev, args->gtt_start,
  144. args->gtt_end, args->gtt_end);
  145. mutex_unlock(&dev->struct_mutex);
  146. return 0;
  147. }
  148. int
  149. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  150. struct drm_file *file)
  151. {
  152. struct drm_i915_private *dev_priv = dev->dev_private;
  153. struct drm_i915_gem_get_aperture *args = data;
  154. struct drm_i915_gem_object *obj;
  155. size_t pinned;
  156. pinned = 0;
  157. mutex_lock(&dev->struct_mutex);
  158. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
  159. if (obj->pin_count)
  160. pinned += obj->gtt_space->size;
  161. mutex_unlock(&dev->struct_mutex);
  162. args->aper_size = dev_priv->mm.gtt_total;
  163. args->aper_available_size = args->aper_size - pinned;
  164. return 0;
  165. }
  166. static int
  167. i915_gem_create(struct drm_file *file,
  168. struct drm_device *dev,
  169. uint64_t size,
  170. uint32_t *handle_p)
  171. {
  172. struct drm_i915_gem_object *obj;
  173. int ret;
  174. u32 handle;
  175. size = roundup(size, PAGE_SIZE);
  176. if (size == 0)
  177. return -EINVAL;
  178. /* Allocate the new object */
  179. obj = i915_gem_alloc_object(dev, size);
  180. if (obj == NULL)
  181. return -ENOMEM;
  182. ret = drm_gem_handle_create(file, &obj->base, &handle);
  183. if (ret) {
  184. drm_gem_object_release(&obj->base);
  185. i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
  186. kfree(obj);
  187. return ret;
  188. }
  189. /* drop reference from allocate - handle holds it now */
  190. drm_gem_object_unreference(&obj->base);
  191. trace_i915_gem_object_create(obj);
  192. *handle_p = handle;
  193. return 0;
  194. }
  195. int
  196. i915_gem_dumb_create(struct drm_file *file,
  197. struct drm_device *dev,
  198. struct drm_mode_create_dumb *args)
  199. {
  200. /* have to work out size/pitch and return them */
  201. args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
  202. args->size = args->pitch * args->height;
  203. return i915_gem_create(file, dev,
  204. args->size, &args->handle);
  205. }
  206. int i915_gem_dumb_destroy(struct drm_file *file,
  207. struct drm_device *dev,
  208. uint32_t handle)
  209. {
  210. return drm_gem_handle_delete(file, handle);
  211. }
  212. /**
  213. * Creates a new mm object and returns a handle to it.
  214. */
  215. int
  216. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  217. struct drm_file *file)
  218. {
  219. struct drm_i915_gem_create *args = data;
  220. return i915_gem_create(file, dev,
  221. args->size, &args->handle);
  222. }
  223. static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
  224. {
  225. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  226. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  227. obj->tiling_mode != I915_TILING_NONE;
  228. }
  229. static inline int
  230. __copy_to_user_swizzled(char __user *cpu_vaddr,
  231. const char *gpu_vaddr, int gpu_offset,
  232. int length)
  233. {
  234. int ret, cpu_offset = 0;
  235. while (length > 0) {
  236. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  237. int this_length = min(cacheline_end - gpu_offset, length);
  238. int swizzled_gpu_offset = gpu_offset ^ 64;
  239. ret = __copy_to_user(cpu_vaddr + cpu_offset,
  240. gpu_vaddr + swizzled_gpu_offset,
  241. this_length);
  242. if (ret)
  243. return ret + length;
  244. cpu_offset += this_length;
  245. gpu_offset += this_length;
  246. length -= this_length;
  247. }
  248. return 0;
  249. }
  250. static inline int
  251. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  252. const char __user *cpu_vaddr,
  253. int length)
  254. {
  255. int ret, cpu_offset = 0;
  256. while (length > 0) {
  257. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  258. int this_length = min(cacheline_end - gpu_offset, length);
  259. int swizzled_gpu_offset = gpu_offset ^ 64;
  260. ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  261. cpu_vaddr + cpu_offset,
  262. this_length);
  263. if (ret)
  264. return ret + length;
  265. cpu_offset += this_length;
  266. gpu_offset += this_length;
  267. length -= this_length;
  268. }
  269. return 0;
  270. }
  271. /* Per-page copy function for the shmem pread fastpath.
  272. * Flushes invalid cachelines before reading the target if
  273. * needs_clflush is set. */
  274. static int
  275. shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
  276. char __user *user_data,
  277. bool page_do_bit17_swizzling, bool needs_clflush)
  278. {
  279. char *vaddr;
  280. int ret;
  281. if (unlikely(page_do_bit17_swizzling))
  282. return -EINVAL;
  283. vaddr = kmap_atomic(page);
  284. if (needs_clflush)
  285. drm_clflush_virt_range(vaddr + shmem_page_offset,
  286. page_length);
  287. ret = __copy_to_user_inatomic(user_data,
  288. vaddr + shmem_page_offset,
  289. page_length);
  290. kunmap_atomic(vaddr);
  291. return ret;
  292. }
  293. static void
  294. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  295. bool swizzled)
  296. {
  297. if (unlikely(swizzled)) {
  298. unsigned long start = (unsigned long) addr;
  299. unsigned long end = (unsigned long) addr + length;
  300. /* For swizzling simply ensure that we always flush both
  301. * channels. Lame, but simple and it works. Swizzled
  302. * pwrite/pread is far from a hotpath - current userspace
  303. * doesn't use it at all. */
  304. start = round_down(start, 128);
  305. end = round_up(end, 128);
  306. drm_clflush_virt_range((void *)start, end - start);
  307. } else {
  308. drm_clflush_virt_range(addr, length);
  309. }
  310. }
  311. /* Only difference to the fast-path function is that this can handle bit17
  312. * and uses non-atomic copy and kmap functions. */
  313. static int
  314. shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
  315. char __user *user_data,
  316. bool page_do_bit17_swizzling, bool needs_clflush)
  317. {
  318. char *vaddr;
  319. int ret;
  320. vaddr = kmap(page);
  321. if (needs_clflush)
  322. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  323. page_length,
  324. page_do_bit17_swizzling);
  325. if (page_do_bit17_swizzling)
  326. ret = __copy_to_user_swizzled(user_data,
  327. vaddr, shmem_page_offset,
  328. page_length);
  329. else
  330. ret = __copy_to_user(user_data,
  331. vaddr + shmem_page_offset,
  332. page_length);
  333. kunmap(page);
  334. return ret;
  335. }
  336. static int
  337. i915_gem_shmem_pread(struct drm_device *dev,
  338. struct drm_i915_gem_object *obj,
  339. struct drm_i915_gem_pread *args,
  340. struct drm_file *file)
  341. {
  342. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  343. char __user *user_data;
  344. ssize_t remain;
  345. loff_t offset;
  346. int shmem_page_offset, page_length, ret = 0;
  347. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  348. int hit_slowpath = 0;
  349. int prefaulted = 0;
  350. int needs_clflush = 0;
  351. int release_page;
  352. user_data = (char __user *) (uintptr_t) args->data_ptr;
  353. remain = args->size;
  354. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  355. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
  356. /* If we're not in the cpu read domain, set ourself into the gtt
  357. * read domain and manually flush cachelines (if required). This
  358. * optimizes for the case when the gpu will dirty the data
  359. * anyway again before the next pread happens. */
  360. if (obj->cache_level == I915_CACHE_NONE)
  361. needs_clflush = 1;
  362. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  363. if (ret)
  364. return ret;
  365. }
  366. offset = args->offset;
  367. while (remain > 0) {
  368. struct page *page;
  369. /* Operation in this page
  370. *
  371. * shmem_page_offset = offset within page in shmem file
  372. * page_length = bytes to copy for this page
  373. */
  374. shmem_page_offset = offset_in_page(offset);
  375. page_length = remain;
  376. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  377. page_length = PAGE_SIZE - shmem_page_offset;
  378. if (obj->pages) {
  379. page = obj->pages[offset >> PAGE_SHIFT];
  380. release_page = 0;
  381. } else {
  382. page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
  383. if (IS_ERR(page)) {
  384. ret = PTR_ERR(page);
  385. goto out;
  386. }
  387. release_page = 1;
  388. }
  389. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  390. (page_to_phys(page) & (1 << 17)) != 0;
  391. ret = shmem_pread_fast(page, shmem_page_offset, page_length,
  392. user_data, page_do_bit17_swizzling,
  393. needs_clflush);
  394. if (ret == 0)
  395. goto next_page;
  396. hit_slowpath = 1;
  397. page_cache_get(page);
  398. mutex_unlock(&dev->struct_mutex);
  399. if (!prefaulted) {
  400. ret = fault_in_multipages_writeable(user_data, remain);
  401. /* Userspace is tricking us, but we've already clobbered
  402. * its pages with the prefault and promised to write the
  403. * data up to the first fault. Hence ignore any errors
  404. * and just continue. */
  405. (void)ret;
  406. prefaulted = 1;
  407. }
  408. ret = shmem_pread_slow(page, shmem_page_offset, page_length,
  409. user_data, page_do_bit17_swizzling,
  410. needs_clflush);
  411. mutex_lock(&dev->struct_mutex);
  412. page_cache_release(page);
  413. next_page:
  414. mark_page_accessed(page);
  415. if (release_page)
  416. page_cache_release(page);
  417. if (ret) {
  418. ret = -EFAULT;
  419. goto out;
  420. }
  421. remain -= page_length;
  422. user_data += page_length;
  423. offset += page_length;
  424. }
  425. out:
  426. if (hit_slowpath) {
  427. /* Fixup: Kill any reinstated backing storage pages */
  428. if (obj->madv == __I915_MADV_PURGED)
  429. i915_gem_object_truncate(obj);
  430. }
  431. return ret;
  432. }
  433. /**
  434. * Reads data from the object referenced by handle.
  435. *
  436. * On error, the contents of *data are undefined.
  437. */
  438. int
  439. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  440. struct drm_file *file)
  441. {
  442. struct drm_i915_gem_pread *args = data;
  443. struct drm_i915_gem_object *obj;
  444. int ret = 0;
  445. if (args->size == 0)
  446. return 0;
  447. if (!access_ok(VERIFY_WRITE,
  448. (char __user *)(uintptr_t)args->data_ptr,
  449. args->size))
  450. return -EFAULT;
  451. ret = i915_mutex_lock_interruptible(dev);
  452. if (ret)
  453. return ret;
  454. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  455. if (&obj->base == NULL) {
  456. ret = -ENOENT;
  457. goto unlock;
  458. }
  459. /* Bounds check source. */
  460. if (args->offset > obj->base.size ||
  461. args->size > obj->base.size - args->offset) {
  462. ret = -EINVAL;
  463. goto out;
  464. }
  465. /* prime objects have no backing filp to GEM pread/pwrite
  466. * pages from.
  467. */
  468. if (!obj->base.filp) {
  469. ret = -EINVAL;
  470. goto out;
  471. }
  472. trace_i915_gem_object_pread(obj, args->offset, args->size);
  473. ret = i915_gem_shmem_pread(dev, obj, args, file);
  474. out:
  475. drm_gem_object_unreference(&obj->base);
  476. unlock:
  477. mutex_unlock(&dev->struct_mutex);
  478. return ret;
  479. }
  480. /* This is the fast write path which cannot handle
  481. * page faults in the source data
  482. */
  483. static inline int
  484. fast_user_write(struct io_mapping *mapping,
  485. loff_t page_base, int page_offset,
  486. char __user *user_data,
  487. int length)
  488. {
  489. void __iomem *vaddr_atomic;
  490. void *vaddr;
  491. unsigned long unwritten;
  492. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  493. /* We can use the cpu mem copy function because this is X86. */
  494. vaddr = (void __force*)vaddr_atomic + page_offset;
  495. unwritten = __copy_from_user_inatomic_nocache(vaddr,
  496. user_data, length);
  497. io_mapping_unmap_atomic(vaddr_atomic);
  498. return unwritten;
  499. }
  500. /**
  501. * This is the fast pwrite path, where we copy the data directly from the
  502. * user into the GTT, uncached.
  503. */
  504. static int
  505. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  506. struct drm_i915_gem_object *obj,
  507. struct drm_i915_gem_pwrite *args,
  508. struct drm_file *file)
  509. {
  510. drm_i915_private_t *dev_priv = dev->dev_private;
  511. ssize_t remain;
  512. loff_t offset, page_base;
  513. char __user *user_data;
  514. int page_offset, page_length, ret;
  515. ret = i915_gem_object_pin(obj, 0, true);
  516. if (ret)
  517. goto out;
  518. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  519. if (ret)
  520. goto out_unpin;
  521. ret = i915_gem_object_put_fence(obj);
  522. if (ret)
  523. goto out_unpin;
  524. user_data = (char __user *) (uintptr_t) args->data_ptr;
  525. remain = args->size;
  526. offset = obj->gtt_offset + args->offset;
  527. while (remain > 0) {
  528. /* Operation in this page
  529. *
  530. * page_base = page offset within aperture
  531. * page_offset = offset within page
  532. * page_length = bytes to copy for this page
  533. */
  534. page_base = offset & PAGE_MASK;
  535. page_offset = offset_in_page(offset);
  536. page_length = remain;
  537. if ((page_offset + remain) > PAGE_SIZE)
  538. page_length = PAGE_SIZE - page_offset;
  539. /* If we get a fault while copying data, then (presumably) our
  540. * source page isn't available. Return the error and we'll
  541. * retry in the slow path.
  542. */
  543. if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
  544. page_offset, user_data, page_length)) {
  545. ret = -EFAULT;
  546. goto out_unpin;
  547. }
  548. remain -= page_length;
  549. user_data += page_length;
  550. offset += page_length;
  551. }
  552. out_unpin:
  553. i915_gem_object_unpin(obj);
  554. out:
  555. return ret;
  556. }
  557. /* Per-page copy function for the shmem pwrite fastpath.
  558. * Flushes invalid cachelines before writing to the target if
  559. * needs_clflush_before is set and flushes out any written cachelines after
  560. * writing if needs_clflush is set. */
  561. static int
  562. shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
  563. char __user *user_data,
  564. bool page_do_bit17_swizzling,
  565. bool needs_clflush_before,
  566. bool needs_clflush_after)
  567. {
  568. char *vaddr;
  569. int ret;
  570. if (unlikely(page_do_bit17_swizzling))
  571. return -EINVAL;
  572. vaddr = kmap_atomic(page);
  573. if (needs_clflush_before)
  574. drm_clflush_virt_range(vaddr + shmem_page_offset,
  575. page_length);
  576. ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
  577. user_data,
  578. page_length);
  579. if (needs_clflush_after)
  580. drm_clflush_virt_range(vaddr + shmem_page_offset,
  581. page_length);
  582. kunmap_atomic(vaddr);
  583. return ret;
  584. }
  585. /* Only difference to the fast-path function is that this can handle bit17
  586. * and uses non-atomic copy and kmap functions. */
  587. static int
  588. shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
  589. char __user *user_data,
  590. bool page_do_bit17_swizzling,
  591. bool needs_clflush_before,
  592. bool needs_clflush_after)
  593. {
  594. char *vaddr;
  595. int ret;
  596. vaddr = kmap(page);
  597. if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  598. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  599. page_length,
  600. page_do_bit17_swizzling);
  601. if (page_do_bit17_swizzling)
  602. ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
  603. user_data,
  604. page_length);
  605. else
  606. ret = __copy_from_user(vaddr + shmem_page_offset,
  607. user_data,
  608. page_length);
  609. if (needs_clflush_after)
  610. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  611. page_length,
  612. page_do_bit17_swizzling);
  613. kunmap(page);
  614. return ret;
  615. }
  616. static int
  617. i915_gem_shmem_pwrite(struct drm_device *dev,
  618. struct drm_i915_gem_object *obj,
  619. struct drm_i915_gem_pwrite *args,
  620. struct drm_file *file)
  621. {
  622. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  623. ssize_t remain;
  624. loff_t offset;
  625. char __user *user_data;
  626. int shmem_page_offset, page_length, ret = 0;
  627. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  628. int hit_slowpath = 0;
  629. int needs_clflush_after = 0;
  630. int needs_clflush_before = 0;
  631. int release_page;
  632. user_data = (char __user *) (uintptr_t) args->data_ptr;
  633. remain = args->size;
  634. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  635. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  636. /* If we're not in the cpu write domain, set ourself into the gtt
  637. * write domain and manually flush cachelines (if required). This
  638. * optimizes for the case when the gpu will use the data
  639. * right away and we therefore have to clflush anyway. */
  640. if (obj->cache_level == I915_CACHE_NONE)
  641. needs_clflush_after = 1;
  642. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  643. if (ret)
  644. return ret;
  645. }
  646. /* Same trick applies for invalidate partially written cachelines before
  647. * writing. */
  648. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
  649. && obj->cache_level == I915_CACHE_NONE)
  650. needs_clflush_before = 1;
  651. offset = args->offset;
  652. obj->dirty = 1;
  653. while (remain > 0) {
  654. struct page *page;
  655. int partial_cacheline_write;
  656. /* Operation in this page
  657. *
  658. * shmem_page_offset = offset within page in shmem file
  659. * page_length = bytes to copy for this page
  660. */
  661. shmem_page_offset = offset_in_page(offset);
  662. page_length = remain;
  663. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  664. page_length = PAGE_SIZE - shmem_page_offset;
  665. /* If we don't overwrite a cacheline completely we need to be
  666. * careful to have up-to-date data by first clflushing. Don't
  667. * overcomplicate things and flush the entire patch. */
  668. partial_cacheline_write = needs_clflush_before &&
  669. ((shmem_page_offset | page_length)
  670. & (boot_cpu_data.x86_clflush_size - 1));
  671. if (obj->pages) {
  672. page = obj->pages[offset >> PAGE_SHIFT];
  673. release_page = 0;
  674. } else {
  675. page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
  676. if (IS_ERR(page)) {
  677. ret = PTR_ERR(page);
  678. goto out;
  679. }
  680. release_page = 1;
  681. }
  682. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  683. (page_to_phys(page) & (1 << 17)) != 0;
  684. ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
  685. user_data, page_do_bit17_swizzling,
  686. partial_cacheline_write,
  687. needs_clflush_after);
  688. if (ret == 0)
  689. goto next_page;
  690. hit_slowpath = 1;
  691. page_cache_get(page);
  692. mutex_unlock(&dev->struct_mutex);
  693. ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
  694. user_data, page_do_bit17_swizzling,
  695. partial_cacheline_write,
  696. needs_clflush_after);
  697. mutex_lock(&dev->struct_mutex);
  698. page_cache_release(page);
  699. next_page:
  700. set_page_dirty(page);
  701. mark_page_accessed(page);
  702. if (release_page)
  703. page_cache_release(page);
  704. if (ret) {
  705. ret = -EFAULT;
  706. goto out;
  707. }
  708. remain -= page_length;
  709. user_data += page_length;
  710. offset += page_length;
  711. }
  712. out:
  713. if (hit_slowpath) {
  714. /* Fixup: Kill any reinstated backing storage pages */
  715. if (obj->madv == __I915_MADV_PURGED)
  716. i915_gem_object_truncate(obj);
  717. /* and flush dirty cachelines in case the object isn't in the cpu write
  718. * domain anymore. */
  719. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  720. i915_gem_clflush_object(obj);
  721. intel_gtt_chipset_flush();
  722. }
  723. }
  724. if (needs_clflush_after)
  725. intel_gtt_chipset_flush();
  726. return ret;
  727. }
  728. /**
  729. * Writes data to the object referenced by handle.
  730. *
  731. * On error, the contents of the buffer that were to be modified are undefined.
  732. */
  733. int
  734. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  735. struct drm_file *file)
  736. {
  737. struct drm_i915_gem_pwrite *args = data;
  738. struct drm_i915_gem_object *obj;
  739. int ret;
  740. if (args->size == 0)
  741. return 0;
  742. if (!access_ok(VERIFY_READ,
  743. (char __user *)(uintptr_t)args->data_ptr,
  744. args->size))
  745. return -EFAULT;
  746. ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
  747. args->size);
  748. if (ret)
  749. return -EFAULT;
  750. ret = i915_mutex_lock_interruptible(dev);
  751. if (ret)
  752. return ret;
  753. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  754. if (&obj->base == NULL) {
  755. ret = -ENOENT;
  756. goto unlock;
  757. }
  758. /* Bounds check destination. */
  759. if (args->offset > obj->base.size ||
  760. args->size > obj->base.size - args->offset) {
  761. ret = -EINVAL;
  762. goto out;
  763. }
  764. /* prime objects have no backing filp to GEM pread/pwrite
  765. * pages from.
  766. */
  767. if (!obj->base.filp) {
  768. ret = -EINVAL;
  769. goto out;
  770. }
  771. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  772. ret = -EFAULT;
  773. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  774. * it would end up going through the fenced access, and we'll get
  775. * different detiling behavior between reading and writing.
  776. * pread/pwrite currently are reading and writing from the CPU
  777. * perspective, requiring manual detiling by the client.
  778. */
  779. if (obj->phys_obj) {
  780. ret = i915_gem_phys_pwrite(dev, obj, args, file);
  781. goto out;
  782. }
  783. if (obj->gtt_space &&
  784. obj->cache_level == I915_CACHE_NONE &&
  785. obj->tiling_mode == I915_TILING_NONE &&
  786. obj->map_and_fenceable &&
  787. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  788. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  789. /* Note that the gtt paths might fail with non-page-backed user
  790. * pointers (e.g. gtt mappings when moving data between
  791. * textures). Fallback to the shmem path in that case. */
  792. }
  793. if (ret == -EFAULT)
  794. ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  795. out:
  796. drm_gem_object_unreference(&obj->base);
  797. unlock:
  798. mutex_unlock(&dev->struct_mutex);
  799. return ret;
  800. }
  801. /**
  802. * Called when user space prepares to use an object with the CPU, either
  803. * through the mmap ioctl's mapping or a GTT mapping.
  804. */
  805. int
  806. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  807. struct drm_file *file)
  808. {
  809. struct drm_i915_gem_set_domain *args = data;
  810. struct drm_i915_gem_object *obj;
  811. uint32_t read_domains = args->read_domains;
  812. uint32_t write_domain = args->write_domain;
  813. int ret;
  814. /* Only handle setting domains to types used by the CPU. */
  815. if (write_domain & I915_GEM_GPU_DOMAINS)
  816. return -EINVAL;
  817. if (read_domains & I915_GEM_GPU_DOMAINS)
  818. return -EINVAL;
  819. /* Having something in the write domain implies it's in the read
  820. * domain, and only that read domain. Enforce that in the request.
  821. */
  822. if (write_domain != 0 && read_domains != write_domain)
  823. return -EINVAL;
  824. ret = i915_mutex_lock_interruptible(dev);
  825. if (ret)
  826. return ret;
  827. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  828. if (&obj->base == NULL) {
  829. ret = -ENOENT;
  830. goto unlock;
  831. }
  832. if (read_domains & I915_GEM_DOMAIN_GTT) {
  833. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  834. /* Silently promote "you're not bound, there was nothing to do"
  835. * to success, since the client was just asking us to
  836. * make sure everything was done.
  837. */
  838. if (ret == -EINVAL)
  839. ret = 0;
  840. } else {
  841. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  842. }
  843. drm_gem_object_unreference(&obj->base);
  844. unlock:
  845. mutex_unlock(&dev->struct_mutex);
  846. return ret;
  847. }
  848. /**
  849. * Called when user space has done writes to this buffer
  850. */
  851. int
  852. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  853. struct drm_file *file)
  854. {
  855. struct drm_i915_gem_sw_finish *args = data;
  856. struct drm_i915_gem_object *obj;
  857. int ret = 0;
  858. ret = i915_mutex_lock_interruptible(dev);
  859. if (ret)
  860. return ret;
  861. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  862. if (&obj->base == NULL) {
  863. ret = -ENOENT;
  864. goto unlock;
  865. }
  866. /* Pinned buffers may be scanout, so flush the cache */
  867. if (obj->pin_count)
  868. i915_gem_object_flush_cpu_write_domain(obj);
  869. drm_gem_object_unreference(&obj->base);
  870. unlock:
  871. mutex_unlock(&dev->struct_mutex);
  872. return ret;
  873. }
  874. /**
  875. * Maps the contents of an object, returning the address it is mapped
  876. * into.
  877. *
  878. * While the mapping holds a reference on the contents of the object, it doesn't
  879. * imply a ref on the object itself.
  880. */
  881. int
  882. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  883. struct drm_file *file)
  884. {
  885. struct drm_i915_gem_mmap *args = data;
  886. struct drm_gem_object *obj;
  887. unsigned long addr;
  888. obj = drm_gem_object_lookup(dev, file, args->handle);
  889. if (obj == NULL)
  890. return -ENOENT;
  891. /* prime objects have no backing filp to GEM mmap
  892. * pages from.
  893. */
  894. if (!obj->filp) {
  895. drm_gem_object_unreference_unlocked(obj);
  896. return -EINVAL;
  897. }
  898. addr = vm_mmap(obj->filp, 0, args->size,
  899. PROT_READ | PROT_WRITE, MAP_SHARED,
  900. args->offset);
  901. drm_gem_object_unreference_unlocked(obj);
  902. if (IS_ERR((void *)addr))
  903. return addr;
  904. args->addr_ptr = (uint64_t) addr;
  905. return 0;
  906. }
  907. /**
  908. * i915_gem_fault - fault a page into the GTT
  909. * vma: VMA in question
  910. * vmf: fault info
  911. *
  912. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  913. * from userspace. The fault handler takes care of binding the object to
  914. * the GTT (if needed), allocating and programming a fence register (again,
  915. * only if needed based on whether the old reg is still valid or the object
  916. * is tiled) and inserting a new PTE into the faulting process.
  917. *
  918. * Note that the faulting process may involve evicting existing objects
  919. * from the GTT and/or fence registers to make room. So performance may
  920. * suffer if the GTT working set is large or there are few fence registers
  921. * left.
  922. */
  923. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  924. {
  925. struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
  926. struct drm_device *dev = obj->base.dev;
  927. drm_i915_private_t *dev_priv = dev->dev_private;
  928. pgoff_t page_offset;
  929. unsigned long pfn;
  930. int ret = 0;
  931. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  932. /* We don't use vmf->pgoff since that has the fake offset */
  933. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  934. PAGE_SHIFT;
  935. ret = i915_mutex_lock_interruptible(dev);
  936. if (ret)
  937. goto out;
  938. trace_i915_gem_object_fault(obj, page_offset, true, write);
  939. /* Now bind it into the GTT if needed */
  940. if (!obj->map_and_fenceable) {
  941. ret = i915_gem_object_unbind(obj);
  942. if (ret)
  943. goto unlock;
  944. }
  945. if (!obj->gtt_space) {
  946. ret = i915_gem_object_bind_to_gtt(obj, 0, true);
  947. if (ret)
  948. goto unlock;
  949. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  950. if (ret)
  951. goto unlock;
  952. }
  953. if (!obj->has_global_gtt_mapping)
  954. i915_gem_gtt_bind_object(obj, obj->cache_level);
  955. ret = i915_gem_object_get_fence(obj);
  956. if (ret)
  957. goto unlock;
  958. if (i915_gem_object_is_inactive(obj))
  959. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  960. obj->fault_mappable = true;
  961. pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
  962. page_offset;
  963. /* Finally, remap it using the new GTT offset */
  964. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  965. unlock:
  966. mutex_unlock(&dev->struct_mutex);
  967. out:
  968. switch (ret) {
  969. case -EIO:
  970. /* If this -EIO is due to a gpu hang, give the reset code a
  971. * chance to clean up the mess. Otherwise return the proper
  972. * SIGBUS. */
  973. if (!atomic_read(&dev_priv->mm.wedged))
  974. return VM_FAULT_SIGBUS;
  975. case -EAGAIN:
  976. /* Give the error handler a chance to run and move the
  977. * objects off the GPU active list. Next time we service the
  978. * fault, we should be able to transition the page into the
  979. * GTT without touching the GPU (and so avoid further
  980. * EIO/EGAIN). If the GPU is wedged, then there is no issue
  981. * with coherency, just lost writes.
  982. */
  983. set_need_resched();
  984. case 0:
  985. case -ERESTARTSYS:
  986. case -EINTR:
  987. return VM_FAULT_NOPAGE;
  988. case -ENOMEM:
  989. return VM_FAULT_OOM;
  990. default:
  991. return VM_FAULT_SIGBUS;
  992. }
  993. }
  994. /**
  995. * i915_gem_release_mmap - remove physical page mappings
  996. * @obj: obj in question
  997. *
  998. * Preserve the reservation of the mmapping with the DRM core code, but
  999. * relinquish ownership of the pages back to the system.
  1000. *
  1001. * It is vital that we remove the page mapping if we have mapped a tiled
  1002. * object through the GTT and then lose the fence register due to
  1003. * resource pressure. Similarly if the object has been moved out of the
  1004. * aperture, than pages mapped into userspace must be revoked. Removing the
  1005. * mapping will then trigger a page fault on the next user access, allowing
  1006. * fixup by i915_gem_fault().
  1007. */
  1008. void
  1009. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1010. {
  1011. if (!obj->fault_mappable)
  1012. return;
  1013. if (obj->base.dev->dev_mapping)
  1014. unmap_mapping_range(obj->base.dev->dev_mapping,
  1015. (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
  1016. obj->base.size, 1);
  1017. obj->fault_mappable = false;
  1018. }
  1019. static uint32_t
  1020. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  1021. {
  1022. uint32_t gtt_size;
  1023. if (INTEL_INFO(dev)->gen >= 4 ||
  1024. tiling_mode == I915_TILING_NONE)
  1025. return size;
  1026. /* Previous chips need a power-of-two fence region when tiling */
  1027. if (INTEL_INFO(dev)->gen == 3)
  1028. gtt_size = 1024*1024;
  1029. else
  1030. gtt_size = 512*1024;
  1031. while (gtt_size < size)
  1032. gtt_size <<= 1;
  1033. return gtt_size;
  1034. }
  1035. /**
  1036. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1037. * @obj: object to check
  1038. *
  1039. * Return the required GTT alignment for an object, taking into account
  1040. * potential fence register mapping.
  1041. */
  1042. static uint32_t
  1043. i915_gem_get_gtt_alignment(struct drm_device *dev,
  1044. uint32_t size,
  1045. int tiling_mode)
  1046. {
  1047. /*
  1048. * Minimum alignment is 4k (GTT page size), but might be greater
  1049. * if a fence register is needed for the object.
  1050. */
  1051. if (INTEL_INFO(dev)->gen >= 4 ||
  1052. tiling_mode == I915_TILING_NONE)
  1053. return 4096;
  1054. /*
  1055. * Previous chips need to be aligned to the size of the smallest
  1056. * fence register that can contain the object.
  1057. */
  1058. return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1059. }
  1060. /**
  1061. * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
  1062. * unfenced object
  1063. * @dev: the device
  1064. * @size: size of the object
  1065. * @tiling_mode: tiling mode of the object
  1066. *
  1067. * Return the required GTT alignment for an object, only taking into account
  1068. * unfenced tiled surface requirements.
  1069. */
  1070. uint32_t
  1071. i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
  1072. uint32_t size,
  1073. int tiling_mode)
  1074. {
  1075. /*
  1076. * Minimum alignment is 4k (GTT page size) for sane hw.
  1077. */
  1078. if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
  1079. tiling_mode == I915_TILING_NONE)
  1080. return 4096;
  1081. /* Previous hardware however needs to be aligned to a power-of-two
  1082. * tile height. The simplest method for determining this is to reuse
  1083. * the power-of-tile object size.
  1084. */
  1085. return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1086. }
  1087. int
  1088. i915_gem_mmap_gtt(struct drm_file *file,
  1089. struct drm_device *dev,
  1090. uint32_t handle,
  1091. uint64_t *offset)
  1092. {
  1093. struct drm_i915_private *dev_priv = dev->dev_private;
  1094. struct drm_i915_gem_object *obj;
  1095. int ret;
  1096. ret = i915_mutex_lock_interruptible(dev);
  1097. if (ret)
  1098. return ret;
  1099. obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  1100. if (&obj->base == NULL) {
  1101. ret = -ENOENT;
  1102. goto unlock;
  1103. }
  1104. if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
  1105. ret = -E2BIG;
  1106. goto out;
  1107. }
  1108. if (obj->madv != I915_MADV_WILLNEED) {
  1109. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1110. ret = -EINVAL;
  1111. goto out;
  1112. }
  1113. if (!obj->base.map_list.map) {
  1114. ret = drm_gem_create_mmap_offset(&obj->base);
  1115. if (ret)
  1116. goto out;
  1117. }
  1118. *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
  1119. out:
  1120. drm_gem_object_unreference(&obj->base);
  1121. unlock:
  1122. mutex_unlock(&dev->struct_mutex);
  1123. return ret;
  1124. }
  1125. /**
  1126. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1127. * @dev: DRM device
  1128. * @data: GTT mapping ioctl data
  1129. * @file: GEM object info
  1130. *
  1131. * Simply returns the fake offset to userspace so it can mmap it.
  1132. * The mmap call will end up in drm_gem_mmap(), which will set things
  1133. * up so we can get faults in the handler above.
  1134. *
  1135. * The fault handler will take care of binding the object into the GTT
  1136. * (since it may have been evicted to make room for something), allocating
  1137. * a fence register, and mapping the appropriate aperture address into
  1138. * userspace.
  1139. */
  1140. int
  1141. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1142. struct drm_file *file)
  1143. {
  1144. struct drm_i915_gem_mmap_gtt *args = data;
  1145. return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1146. }
  1147. int
  1148. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
  1149. gfp_t gfpmask)
  1150. {
  1151. int page_count, i;
  1152. struct address_space *mapping;
  1153. struct inode *inode;
  1154. struct page *page;
  1155. if (obj->pages || obj->sg_table)
  1156. return 0;
  1157. /* Get the list of pages out of our struct file. They'll be pinned
  1158. * at this point until we release them.
  1159. */
  1160. page_count = obj->base.size / PAGE_SIZE;
  1161. BUG_ON(obj->pages != NULL);
  1162. obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
  1163. if (obj->pages == NULL)
  1164. return -ENOMEM;
  1165. inode = obj->base.filp->f_path.dentry->d_inode;
  1166. mapping = inode->i_mapping;
  1167. gfpmask |= mapping_gfp_mask(mapping);
  1168. for (i = 0; i < page_count; i++) {
  1169. page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
  1170. if (IS_ERR(page))
  1171. goto err_pages;
  1172. obj->pages[i] = page;
  1173. }
  1174. if (i915_gem_object_needs_bit17_swizzle(obj))
  1175. i915_gem_object_do_bit_17_swizzle(obj);
  1176. return 0;
  1177. err_pages:
  1178. while (i--)
  1179. page_cache_release(obj->pages[i]);
  1180. drm_free_large(obj->pages);
  1181. obj->pages = NULL;
  1182. return PTR_ERR(page);
  1183. }
  1184. static void
  1185. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1186. {
  1187. int page_count = obj->base.size / PAGE_SIZE;
  1188. int i;
  1189. if (!obj->pages)
  1190. return;
  1191. BUG_ON(obj->madv == __I915_MADV_PURGED);
  1192. if (i915_gem_object_needs_bit17_swizzle(obj))
  1193. i915_gem_object_save_bit_17_swizzle(obj);
  1194. if (obj->madv == I915_MADV_DONTNEED)
  1195. obj->dirty = 0;
  1196. for (i = 0; i < page_count; i++) {
  1197. if (obj->dirty)
  1198. set_page_dirty(obj->pages[i]);
  1199. if (obj->madv == I915_MADV_WILLNEED)
  1200. mark_page_accessed(obj->pages[i]);
  1201. page_cache_release(obj->pages[i]);
  1202. }
  1203. obj->dirty = 0;
  1204. drm_free_large(obj->pages);
  1205. obj->pages = NULL;
  1206. }
  1207. void
  1208. i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1209. struct intel_ring_buffer *ring,
  1210. u32 seqno)
  1211. {
  1212. struct drm_device *dev = obj->base.dev;
  1213. struct drm_i915_private *dev_priv = dev->dev_private;
  1214. BUG_ON(ring == NULL);
  1215. obj->ring = ring;
  1216. /* Add a reference if we're newly entering the active list. */
  1217. if (!obj->active) {
  1218. drm_gem_object_reference(&obj->base);
  1219. obj->active = 1;
  1220. }
  1221. /* Move from whatever list we were on to the tail of execution. */
  1222. list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
  1223. list_move_tail(&obj->ring_list, &ring->active_list);
  1224. obj->last_rendering_seqno = seqno;
  1225. if (obj->fenced_gpu_access) {
  1226. obj->last_fenced_seqno = seqno;
  1227. /* Bump MRU to take account of the delayed flush */
  1228. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1229. struct drm_i915_fence_reg *reg;
  1230. reg = &dev_priv->fence_regs[obj->fence_reg];
  1231. list_move_tail(&reg->lru_list,
  1232. &dev_priv->mm.fence_list);
  1233. }
  1234. }
  1235. }
  1236. static void
  1237. i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
  1238. {
  1239. list_del_init(&obj->ring_list);
  1240. obj->last_rendering_seqno = 0;
  1241. obj->last_fenced_seqno = 0;
  1242. }
  1243. static void
  1244. i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
  1245. {
  1246. struct drm_device *dev = obj->base.dev;
  1247. drm_i915_private_t *dev_priv = dev->dev_private;
  1248. BUG_ON(!obj->active);
  1249. list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
  1250. i915_gem_object_move_off_active(obj);
  1251. }
  1252. static void
  1253. i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  1254. {
  1255. struct drm_device *dev = obj->base.dev;
  1256. struct drm_i915_private *dev_priv = dev->dev_private;
  1257. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  1258. BUG_ON(!list_empty(&obj->gpu_write_list));
  1259. BUG_ON(!obj->active);
  1260. obj->ring = NULL;
  1261. i915_gem_object_move_off_active(obj);
  1262. obj->fenced_gpu_access = false;
  1263. obj->active = 0;
  1264. obj->pending_gpu_write = false;
  1265. drm_gem_object_unreference(&obj->base);
  1266. WARN_ON(i915_verify_lists(dev));
  1267. }
  1268. /* Immediately discard the backing storage */
  1269. static void
  1270. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1271. {
  1272. struct inode *inode;
  1273. /* Our goal here is to return as much of the memory as
  1274. * is possible back to the system as we are called from OOM.
  1275. * To do this we must instruct the shmfs to drop all of its
  1276. * backing pages, *now*.
  1277. */
  1278. inode = obj->base.filp->f_path.dentry->d_inode;
  1279. shmem_truncate_range(inode, 0, (loff_t)-1);
  1280. if (obj->base.map_list.map)
  1281. drm_gem_free_mmap_offset(&obj->base);
  1282. obj->madv = __I915_MADV_PURGED;
  1283. }
  1284. static inline int
  1285. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
  1286. {
  1287. return obj->madv == I915_MADV_DONTNEED;
  1288. }
  1289. static void
  1290. i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
  1291. uint32_t flush_domains)
  1292. {
  1293. struct drm_i915_gem_object *obj, *next;
  1294. list_for_each_entry_safe(obj, next,
  1295. &ring->gpu_write_list,
  1296. gpu_write_list) {
  1297. if (obj->base.write_domain & flush_domains) {
  1298. uint32_t old_write_domain = obj->base.write_domain;
  1299. obj->base.write_domain = 0;
  1300. list_del_init(&obj->gpu_write_list);
  1301. i915_gem_object_move_to_active(obj, ring,
  1302. i915_gem_next_request_seqno(ring));
  1303. trace_i915_gem_object_change_domain(obj,
  1304. obj->base.read_domains,
  1305. old_write_domain);
  1306. }
  1307. }
  1308. }
  1309. static u32
  1310. i915_gem_get_seqno(struct drm_device *dev)
  1311. {
  1312. drm_i915_private_t *dev_priv = dev->dev_private;
  1313. u32 seqno = dev_priv->next_seqno;
  1314. /* reserve 0 for non-seqno */
  1315. if (++dev_priv->next_seqno == 0)
  1316. dev_priv->next_seqno = 1;
  1317. return seqno;
  1318. }
  1319. u32
  1320. i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
  1321. {
  1322. if (ring->outstanding_lazy_request == 0)
  1323. ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
  1324. return ring->outstanding_lazy_request;
  1325. }
  1326. int
  1327. i915_add_request(struct intel_ring_buffer *ring,
  1328. struct drm_file *file,
  1329. struct drm_i915_gem_request *request)
  1330. {
  1331. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1332. uint32_t seqno;
  1333. u32 request_ring_position;
  1334. int was_empty;
  1335. int ret;
  1336. /*
  1337. * Emit any outstanding flushes - execbuf can fail to emit the flush
  1338. * after having emitted the batchbuffer command. Hence we need to fix
  1339. * things up similar to emitting the lazy request. The difference here
  1340. * is that the flush _must_ happen before the next request, no matter
  1341. * what.
  1342. */
  1343. if (ring->gpu_caches_dirty) {
  1344. ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
  1345. if (ret)
  1346. return ret;
  1347. ring->gpu_caches_dirty = false;
  1348. }
  1349. BUG_ON(request == NULL);
  1350. seqno = i915_gem_next_request_seqno(ring);
  1351. /* Record the position of the start of the request so that
  1352. * should we detect the updated seqno part-way through the
  1353. * GPU processing the request, we never over-estimate the
  1354. * position of the head.
  1355. */
  1356. request_ring_position = intel_ring_get_tail(ring);
  1357. ret = ring->add_request(ring, &seqno);
  1358. if (ret)
  1359. return ret;
  1360. trace_i915_gem_request_add(ring, seqno);
  1361. request->seqno = seqno;
  1362. request->ring = ring;
  1363. request->tail = request_ring_position;
  1364. request->emitted_jiffies = jiffies;
  1365. was_empty = list_empty(&ring->request_list);
  1366. list_add_tail(&request->list, &ring->request_list);
  1367. if (file) {
  1368. struct drm_i915_file_private *file_priv = file->driver_priv;
  1369. spin_lock(&file_priv->mm.lock);
  1370. request->file_priv = file_priv;
  1371. list_add_tail(&request->client_list,
  1372. &file_priv->mm.request_list);
  1373. spin_unlock(&file_priv->mm.lock);
  1374. }
  1375. ring->outstanding_lazy_request = 0;
  1376. if (!dev_priv->mm.suspended) {
  1377. if (i915_enable_hangcheck) {
  1378. mod_timer(&dev_priv->hangcheck_timer,
  1379. jiffies +
  1380. msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
  1381. }
  1382. if (was_empty)
  1383. queue_delayed_work(dev_priv->wq,
  1384. &dev_priv->mm.retire_work, HZ);
  1385. }
  1386. WARN_ON(!list_empty(&ring->gpu_write_list));
  1387. return 0;
  1388. }
  1389. static inline void
  1390. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1391. {
  1392. struct drm_i915_file_private *file_priv = request->file_priv;
  1393. if (!file_priv)
  1394. return;
  1395. spin_lock(&file_priv->mm.lock);
  1396. if (request->file_priv) {
  1397. list_del(&request->client_list);
  1398. request->file_priv = NULL;
  1399. }
  1400. spin_unlock(&file_priv->mm.lock);
  1401. }
  1402. static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
  1403. struct intel_ring_buffer *ring)
  1404. {
  1405. while (!list_empty(&ring->request_list)) {
  1406. struct drm_i915_gem_request *request;
  1407. request = list_first_entry(&ring->request_list,
  1408. struct drm_i915_gem_request,
  1409. list);
  1410. list_del(&request->list);
  1411. i915_gem_request_remove_from_client(request);
  1412. kfree(request);
  1413. }
  1414. while (!list_empty(&ring->active_list)) {
  1415. struct drm_i915_gem_object *obj;
  1416. obj = list_first_entry(&ring->active_list,
  1417. struct drm_i915_gem_object,
  1418. ring_list);
  1419. obj->base.write_domain = 0;
  1420. list_del_init(&obj->gpu_write_list);
  1421. i915_gem_object_move_to_inactive(obj);
  1422. }
  1423. }
  1424. static void i915_gem_reset_fences(struct drm_device *dev)
  1425. {
  1426. struct drm_i915_private *dev_priv = dev->dev_private;
  1427. int i;
  1428. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  1429. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  1430. i915_gem_write_fence(dev, i, NULL);
  1431. if (reg->obj)
  1432. i915_gem_object_fence_lost(reg->obj);
  1433. reg->pin_count = 0;
  1434. reg->obj = NULL;
  1435. INIT_LIST_HEAD(&reg->lru_list);
  1436. }
  1437. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  1438. }
  1439. void i915_gem_reset(struct drm_device *dev)
  1440. {
  1441. struct drm_i915_private *dev_priv = dev->dev_private;
  1442. struct drm_i915_gem_object *obj;
  1443. struct intel_ring_buffer *ring;
  1444. int i;
  1445. for_each_ring(ring, dev_priv, i)
  1446. i915_gem_reset_ring_lists(dev_priv, ring);
  1447. /* Remove anything from the flushing lists. The GPU cache is likely
  1448. * to be lost on reset along with the data, so simply move the
  1449. * lost bo to the inactive list.
  1450. */
  1451. while (!list_empty(&dev_priv->mm.flushing_list)) {
  1452. obj = list_first_entry(&dev_priv->mm.flushing_list,
  1453. struct drm_i915_gem_object,
  1454. mm_list);
  1455. obj->base.write_domain = 0;
  1456. list_del_init(&obj->gpu_write_list);
  1457. i915_gem_object_move_to_inactive(obj);
  1458. }
  1459. /* Move everything out of the GPU domains to ensure we do any
  1460. * necessary invalidation upon reuse.
  1461. */
  1462. list_for_each_entry(obj,
  1463. &dev_priv->mm.inactive_list,
  1464. mm_list)
  1465. {
  1466. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  1467. }
  1468. /* The fence registers are invalidated so clear them out */
  1469. i915_gem_reset_fences(dev);
  1470. }
  1471. /**
  1472. * This function clears the request list as sequence numbers are passed.
  1473. */
  1474. void
  1475. i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
  1476. {
  1477. uint32_t seqno;
  1478. int i;
  1479. if (list_empty(&ring->request_list))
  1480. return;
  1481. WARN_ON(i915_verify_lists(ring->dev));
  1482. seqno = ring->get_seqno(ring);
  1483. for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
  1484. if (seqno >= ring->sync_seqno[i])
  1485. ring->sync_seqno[i] = 0;
  1486. while (!list_empty(&ring->request_list)) {
  1487. struct drm_i915_gem_request *request;
  1488. request = list_first_entry(&ring->request_list,
  1489. struct drm_i915_gem_request,
  1490. list);
  1491. if (!i915_seqno_passed(seqno, request->seqno))
  1492. break;
  1493. trace_i915_gem_request_retire(ring, request->seqno);
  1494. /* We know the GPU must have read the request to have
  1495. * sent us the seqno + interrupt, so use the position
  1496. * of tail of the request to update the last known position
  1497. * of the GPU head.
  1498. */
  1499. ring->last_retired_head = request->tail;
  1500. list_del(&request->list);
  1501. i915_gem_request_remove_from_client(request);
  1502. kfree(request);
  1503. }
  1504. /* Move any buffers on the active list that are no longer referenced
  1505. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1506. */
  1507. while (!list_empty(&ring->active_list)) {
  1508. struct drm_i915_gem_object *obj;
  1509. obj = list_first_entry(&ring->active_list,
  1510. struct drm_i915_gem_object,
  1511. ring_list);
  1512. if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
  1513. break;
  1514. if (obj->base.write_domain != 0)
  1515. i915_gem_object_move_to_flushing(obj);
  1516. else
  1517. i915_gem_object_move_to_inactive(obj);
  1518. }
  1519. if (unlikely(ring->trace_irq_seqno &&
  1520. i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
  1521. ring->irq_put(ring);
  1522. ring->trace_irq_seqno = 0;
  1523. }
  1524. WARN_ON(i915_verify_lists(ring->dev));
  1525. }
  1526. void
  1527. i915_gem_retire_requests(struct drm_device *dev)
  1528. {
  1529. drm_i915_private_t *dev_priv = dev->dev_private;
  1530. struct intel_ring_buffer *ring;
  1531. int i;
  1532. for_each_ring(ring, dev_priv, i)
  1533. i915_gem_retire_requests_ring(ring);
  1534. }
  1535. static void
  1536. i915_gem_retire_work_handler(struct work_struct *work)
  1537. {
  1538. drm_i915_private_t *dev_priv;
  1539. struct drm_device *dev;
  1540. struct intel_ring_buffer *ring;
  1541. bool idle;
  1542. int i;
  1543. dev_priv = container_of(work, drm_i915_private_t,
  1544. mm.retire_work.work);
  1545. dev = dev_priv->dev;
  1546. /* Come back later if the device is busy... */
  1547. if (!mutex_trylock(&dev->struct_mutex)) {
  1548. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1549. return;
  1550. }
  1551. i915_gem_retire_requests(dev);
  1552. /* Send a periodic flush down the ring so we don't hold onto GEM
  1553. * objects indefinitely.
  1554. */
  1555. idle = true;
  1556. for_each_ring(ring, dev_priv, i) {
  1557. if (ring->gpu_caches_dirty) {
  1558. struct drm_i915_gem_request *request;
  1559. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1560. if (request == NULL ||
  1561. i915_add_request(ring, NULL, request))
  1562. kfree(request);
  1563. }
  1564. idle &= list_empty(&ring->request_list);
  1565. }
  1566. if (!dev_priv->mm.suspended && !idle)
  1567. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1568. mutex_unlock(&dev->struct_mutex);
  1569. }
  1570. int
  1571. i915_gem_check_wedge(struct drm_i915_private *dev_priv,
  1572. bool interruptible)
  1573. {
  1574. if (atomic_read(&dev_priv->mm.wedged)) {
  1575. struct completion *x = &dev_priv->error_completion;
  1576. bool recovery_complete;
  1577. unsigned long flags;
  1578. /* Give the error handler a chance to run. */
  1579. spin_lock_irqsave(&x->wait.lock, flags);
  1580. recovery_complete = x->done > 0;
  1581. spin_unlock_irqrestore(&x->wait.lock, flags);
  1582. /* Non-interruptible callers can't handle -EAGAIN, hence return
  1583. * -EIO unconditionally for these. */
  1584. if (!interruptible)
  1585. return -EIO;
  1586. /* Recovery complete, but still wedged means reset failure. */
  1587. if (recovery_complete)
  1588. return -EIO;
  1589. return -EAGAIN;
  1590. }
  1591. return 0;
  1592. }
  1593. /*
  1594. * Compare seqno against outstanding lazy request. Emit a request if they are
  1595. * equal.
  1596. */
  1597. static int
  1598. i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  1599. {
  1600. int ret = 0;
  1601. BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
  1602. if (seqno == ring->outstanding_lazy_request) {
  1603. struct drm_i915_gem_request *request;
  1604. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1605. if (request == NULL)
  1606. return -ENOMEM;
  1607. ret = i915_add_request(ring, NULL, request);
  1608. if (ret) {
  1609. kfree(request);
  1610. return ret;
  1611. }
  1612. BUG_ON(seqno != request->seqno);
  1613. }
  1614. return ret;
  1615. }
  1616. /**
  1617. * __wait_seqno - wait until execution of seqno has finished
  1618. * @ring: the ring expected to report seqno
  1619. * @seqno: duh!
  1620. * @interruptible: do an interruptible wait (normally yes)
  1621. * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  1622. *
  1623. * Returns 0 if the seqno was found within the alloted time. Else returns the
  1624. * errno with remaining time filled in timeout argument.
  1625. */
  1626. static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
  1627. bool interruptible, struct timespec *timeout)
  1628. {
  1629. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1630. struct timespec before, now, wait_time={1,0};
  1631. unsigned long timeout_jiffies;
  1632. long end;
  1633. bool wait_forever = true;
  1634. int ret;
  1635. if (i915_seqno_passed(ring->get_seqno(ring), seqno))
  1636. return 0;
  1637. trace_i915_gem_request_wait_begin(ring, seqno);
  1638. if (timeout != NULL) {
  1639. wait_time = *timeout;
  1640. wait_forever = false;
  1641. }
  1642. timeout_jiffies = timespec_to_jiffies(&wait_time);
  1643. if (WARN_ON(!ring->irq_get(ring)))
  1644. return -ENODEV;
  1645. /* Record current time in case interrupted by signal, or wedged * */
  1646. getrawmonotonic(&before);
  1647. #define EXIT_COND \
  1648. (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
  1649. atomic_read(&dev_priv->mm.wedged))
  1650. do {
  1651. if (interruptible)
  1652. end = wait_event_interruptible_timeout(ring->irq_queue,
  1653. EXIT_COND,
  1654. timeout_jiffies);
  1655. else
  1656. end = wait_event_timeout(ring->irq_queue, EXIT_COND,
  1657. timeout_jiffies);
  1658. ret = i915_gem_check_wedge(dev_priv, interruptible);
  1659. if (ret)
  1660. end = ret;
  1661. } while (end == 0 && wait_forever);
  1662. getrawmonotonic(&now);
  1663. ring->irq_put(ring);
  1664. trace_i915_gem_request_wait_end(ring, seqno);
  1665. #undef EXIT_COND
  1666. if (timeout) {
  1667. struct timespec sleep_time = timespec_sub(now, before);
  1668. *timeout = timespec_sub(*timeout, sleep_time);
  1669. }
  1670. switch (end) {
  1671. case -EIO:
  1672. case -EAGAIN: /* Wedged */
  1673. case -ERESTARTSYS: /* Signal */
  1674. return (int)end;
  1675. case 0: /* Timeout */
  1676. if (timeout)
  1677. set_normalized_timespec(timeout, 0, 0);
  1678. return -ETIME;
  1679. default: /* Completed */
  1680. WARN_ON(end < 0); /* We're not aware of other errors */
  1681. return 0;
  1682. }
  1683. }
  1684. /**
  1685. * Waits for a sequence number to be signaled, and cleans up the
  1686. * request and object lists appropriately for that event.
  1687. */
  1688. int
  1689. i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
  1690. {
  1691. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1692. int ret = 0;
  1693. BUG_ON(seqno == 0);
  1694. ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
  1695. if (ret)
  1696. return ret;
  1697. ret = i915_gem_check_olr(ring, seqno);
  1698. if (ret)
  1699. return ret;
  1700. ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
  1701. return ret;
  1702. }
  1703. /**
  1704. * Ensures that all rendering to the object has completed and the object is
  1705. * safe to unbind from the GTT or access from the CPU.
  1706. */
  1707. int
  1708. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
  1709. {
  1710. int ret;
  1711. /* This function only exists to support waiting for existing rendering,
  1712. * not for emitting required flushes.
  1713. */
  1714. BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1715. /* If there is rendering queued on the buffer being evicted, wait for
  1716. * it.
  1717. */
  1718. if (obj->active) {
  1719. ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
  1720. if (ret)
  1721. return ret;
  1722. i915_gem_retire_requests_ring(obj->ring);
  1723. }
  1724. return 0;
  1725. }
  1726. /**
  1727. * Ensures that an object will eventually get non-busy by flushing any required
  1728. * write domains, emitting any outstanding lazy request and retiring and
  1729. * completed requests.
  1730. */
  1731. static int
  1732. i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  1733. {
  1734. int ret;
  1735. if (obj->active) {
  1736. ret = i915_gem_object_flush_gpu_write_domain(obj);
  1737. if (ret)
  1738. return ret;
  1739. ret = i915_gem_check_olr(obj->ring,
  1740. obj->last_rendering_seqno);
  1741. if (ret)
  1742. return ret;
  1743. i915_gem_retire_requests_ring(obj->ring);
  1744. }
  1745. return 0;
  1746. }
  1747. /**
  1748. * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  1749. * @DRM_IOCTL_ARGS: standard ioctl arguments
  1750. *
  1751. * Returns 0 if successful, else an error is returned with the remaining time in
  1752. * the timeout parameter.
  1753. * -ETIME: object is still busy after timeout
  1754. * -ERESTARTSYS: signal interrupted the wait
  1755. * -ENONENT: object doesn't exist
  1756. * Also possible, but rare:
  1757. * -EAGAIN: GPU wedged
  1758. * -ENOMEM: damn
  1759. * -ENODEV: Internal IRQ fail
  1760. * -E?: The add request failed
  1761. *
  1762. * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  1763. * non-zero timeout parameter the wait ioctl will wait for the given number of
  1764. * nanoseconds on an object becoming unbusy. Since the wait itself does so
  1765. * without holding struct_mutex the object may become re-busied before this
  1766. * function completes. A similar but shorter * race condition exists in the busy
  1767. * ioctl
  1768. */
  1769. int
  1770. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  1771. {
  1772. struct drm_i915_gem_wait *args = data;
  1773. struct drm_i915_gem_object *obj;
  1774. struct intel_ring_buffer *ring = NULL;
  1775. struct timespec timeout_stack, *timeout = NULL;
  1776. u32 seqno = 0;
  1777. int ret = 0;
  1778. if (args->timeout_ns >= 0) {
  1779. timeout_stack = ns_to_timespec(args->timeout_ns);
  1780. timeout = &timeout_stack;
  1781. }
  1782. ret = i915_mutex_lock_interruptible(dev);
  1783. if (ret)
  1784. return ret;
  1785. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
  1786. if (&obj->base == NULL) {
  1787. mutex_unlock(&dev->struct_mutex);
  1788. return -ENOENT;
  1789. }
  1790. /* Need to make sure the object gets inactive eventually. */
  1791. ret = i915_gem_object_flush_active(obj);
  1792. if (ret)
  1793. goto out;
  1794. if (obj->active) {
  1795. seqno = obj->last_rendering_seqno;
  1796. ring = obj->ring;
  1797. }
  1798. if (seqno == 0)
  1799. goto out;
  1800. /* Do this after OLR check to make sure we make forward progress polling
  1801. * on this IOCTL with a 0 timeout (like busy ioctl)
  1802. */
  1803. if (!args->timeout_ns) {
  1804. ret = -ETIME;
  1805. goto out;
  1806. }
  1807. drm_gem_object_unreference(&obj->base);
  1808. mutex_unlock(&dev->struct_mutex);
  1809. ret = __wait_seqno(ring, seqno, true, timeout);
  1810. if (timeout) {
  1811. WARN_ON(!timespec_valid(timeout));
  1812. args->timeout_ns = timespec_to_ns(timeout);
  1813. }
  1814. return ret;
  1815. out:
  1816. drm_gem_object_unreference(&obj->base);
  1817. mutex_unlock(&dev->struct_mutex);
  1818. return ret;
  1819. }
  1820. /**
  1821. * i915_gem_object_sync - sync an object to a ring.
  1822. *
  1823. * @obj: object which may be in use on another ring.
  1824. * @to: ring we wish to use the object on. May be NULL.
  1825. *
  1826. * This code is meant to abstract object synchronization with the GPU.
  1827. * Calling with NULL implies synchronizing the object with the CPU
  1828. * rather than a particular GPU ring.
  1829. *
  1830. * Returns 0 if successful, else propagates up the lower layer error.
  1831. */
  1832. int
  1833. i915_gem_object_sync(struct drm_i915_gem_object *obj,
  1834. struct intel_ring_buffer *to)
  1835. {
  1836. struct intel_ring_buffer *from = obj->ring;
  1837. u32 seqno;
  1838. int ret, idx;
  1839. if (from == NULL || to == from)
  1840. return 0;
  1841. if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
  1842. return i915_gem_object_wait_rendering(obj);
  1843. idx = intel_ring_sync_index(from, to);
  1844. seqno = obj->last_rendering_seqno;
  1845. if (seqno <= from->sync_seqno[idx])
  1846. return 0;
  1847. ret = i915_gem_check_olr(obj->ring, seqno);
  1848. if (ret)
  1849. return ret;
  1850. ret = to->sync_to(to, from, seqno);
  1851. if (!ret)
  1852. from->sync_seqno[idx] = seqno;
  1853. return ret;
  1854. }
  1855. static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  1856. {
  1857. u32 old_write_domain, old_read_domains;
  1858. /* Act a barrier for all accesses through the GTT */
  1859. mb();
  1860. /* Force a pagefault for domain tracking on next user access */
  1861. i915_gem_release_mmap(obj);
  1862. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  1863. return;
  1864. old_read_domains = obj->base.read_domains;
  1865. old_write_domain = obj->base.write_domain;
  1866. obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
  1867. obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
  1868. trace_i915_gem_object_change_domain(obj,
  1869. old_read_domains,
  1870. old_write_domain);
  1871. }
  1872. /**
  1873. * Unbinds an object from the GTT aperture.
  1874. */
  1875. int
  1876. i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  1877. {
  1878. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  1879. int ret = 0;
  1880. if (obj->gtt_space == NULL)
  1881. return 0;
  1882. if (obj->pin_count)
  1883. return -EBUSY;
  1884. ret = i915_gem_object_finish_gpu(obj);
  1885. if (ret)
  1886. return ret;
  1887. /* Continue on if we fail due to EIO, the GPU is hung so we
  1888. * should be safe and we need to cleanup or else we might
  1889. * cause memory corruption through use-after-free.
  1890. */
  1891. i915_gem_object_finish_gtt(obj);
  1892. /* Move the object to the CPU domain to ensure that
  1893. * any possible CPU writes while it's not in the GTT
  1894. * are flushed when we go to remap it.
  1895. */
  1896. if (ret == 0)
  1897. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1898. if (ret == -ERESTARTSYS)
  1899. return ret;
  1900. if (ret) {
  1901. /* In the event of a disaster, abandon all caches and
  1902. * hope for the best.
  1903. */
  1904. i915_gem_clflush_object(obj);
  1905. obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1906. }
  1907. /* release the fence reg _after_ flushing */
  1908. ret = i915_gem_object_put_fence(obj);
  1909. if (ret)
  1910. return ret;
  1911. trace_i915_gem_object_unbind(obj);
  1912. if (obj->has_global_gtt_mapping)
  1913. i915_gem_gtt_unbind_object(obj);
  1914. if (obj->has_aliasing_ppgtt_mapping) {
  1915. i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
  1916. obj->has_aliasing_ppgtt_mapping = 0;
  1917. }
  1918. i915_gem_gtt_finish_object(obj);
  1919. i915_gem_object_put_pages_gtt(obj);
  1920. list_del_init(&obj->gtt_list);
  1921. list_del_init(&obj->mm_list);
  1922. /* Avoid an unnecessary call to unbind on rebind. */
  1923. obj->map_and_fenceable = true;
  1924. drm_mm_put_block(obj->gtt_space);
  1925. obj->gtt_space = NULL;
  1926. obj->gtt_offset = 0;
  1927. if (i915_gem_object_is_purgeable(obj))
  1928. i915_gem_object_truncate(obj);
  1929. return ret;
  1930. }
  1931. int
  1932. i915_gem_flush_ring(struct intel_ring_buffer *ring,
  1933. uint32_t invalidate_domains,
  1934. uint32_t flush_domains)
  1935. {
  1936. int ret;
  1937. if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
  1938. return 0;
  1939. trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
  1940. ret = ring->flush(ring, invalidate_domains, flush_domains);
  1941. if (ret)
  1942. return ret;
  1943. if (flush_domains & I915_GEM_GPU_DOMAINS)
  1944. i915_gem_process_flushing_list(ring, flush_domains);
  1945. return 0;
  1946. }
  1947. static int i915_ring_idle(struct intel_ring_buffer *ring)
  1948. {
  1949. int ret;
  1950. if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
  1951. return 0;
  1952. if (!list_empty(&ring->gpu_write_list)) {
  1953. ret = i915_gem_flush_ring(ring,
  1954. I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1955. if (ret)
  1956. return ret;
  1957. }
  1958. return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
  1959. }
  1960. int i915_gpu_idle(struct drm_device *dev)
  1961. {
  1962. drm_i915_private_t *dev_priv = dev->dev_private;
  1963. struct intel_ring_buffer *ring;
  1964. int ret, i;
  1965. /* Flush everything onto the inactive list. */
  1966. for_each_ring(ring, dev_priv, i) {
  1967. ret = i915_ring_idle(ring);
  1968. if (ret)
  1969. return ret;
  1970. /* Is the device fubar? */
  1971. if (WARN_ON(!list_empty(&ring->gpu_write_list)))
  1972. return -EBUSY;
  1973. ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
  1974. if (ret)
  1975. return ret;
  1976. }
  1977. return 0;
  1978. }
  1979. static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
  1980. struct drm_i915_gem_object *obj)
  1981. {
  1982. drm_i915_private_t *dev_priv = dev->dev_private;
  1983. uint64_t val;
  1984. if (obj) {
  1985. u32 size = obj->gtt_space->size;
  1986. val = (uint64_t)((obj->gtt_offset + size - 4096) &
  1987. 0xfffff000) << 32;
  1988. val |= obj->gtt_offset & 0xfffff000;
  1989. val |= (uint64_t)((obj->stride / 128) - 1) <<
  1990. SANDYBRIDGE_FENCE_PITCH_SHIFT;
  1991. if (obj->tiling_mode == I915_TILING_Y)
  1992. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1993. val |= I965_FENCE_REG_VALID;
  1994. } else
  1995. val = 0;
  1996. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
  1997. POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
  1998. }
  1999. static void i965_write_fence_reg(struct drm_device *dev, int reg,
  2000. struct drm_i915_gem_object *obj)
  2001. {
  2002. drm_i915_private_t *dev_priv = dev->dev_private;
  2003. uint64_t val;
  2004. if (obj) {
  2005. u32 size = obj->gtt_space->size;
  2006. val = (uint64_t)((obj->gtt_offset + size - 4096) &
  2007. 0xfffff000) << 32;
  2008. val |= obj->gtt_offset & 0xfffff000;
  2009. val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  2010. if (obj->tiling_mode == I915_TILING_Y)
  2011. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  2012. val |= I965_FENCE_REG_VALID;
  2013. } else
  2014. val = 0;
  2015. I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
  2016. POSTING_READ(FENCE_REG_965_0 + reg * 8);
  2017. }
  2018. static void i915_write_fence_reg(struct drm_device *dev, int reg,
  2019. struct drm_i915_gem_object *obj)
  2020. {
  2021. drm_i915_private_t *dev_priv = dev->dev_private;
  2022. u32 val;
  2023. if (obj) {
  2024. u32 size = obj->gtt_space->size;
  2025. int pitch_val;
  2026. int tile_width;
  2027. WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
  2028. (size & -size) != size ||
  2029. (obj->gtt_offset & (size - 1)),
  2030. "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
  2031. obj->gtt_offset, obj->map_and_fenceable, size);
  2032. if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
  2033. tile_width = 128;
  2034. else
  2035. tile_width = 512;
  2036. /* Note: pitch better be a power of two tile widths */
  2037. pitch_val = obj->stride / tile_width;
  2038. pitch_val = ffs(pitch_val) - 1;
  2039. val = obj->gtt_offset;
  2040. if (obj->tiling_mode == I915_TILING_Y)
  2041. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2042. val |= I915_FENCE_SIZE_BITS(size);
  2043. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2044. val |= I830_FENCE_REG_VALID;
  2045. } else
  2046. val = 0;
  2047. if (reg < 8)
  2048. reg = FENCE_REG_830_0 + reg * 4;
  2049. else
  2050. reg = FENCE_REG_945_8 + (reg - 8) * 4;
  2051. I915_WRITE(reg, val);
  2052. POSTING_READ(reg);
  2053. }
  2054. static void i830_write_fence_reg(struct drm_device *dev, int reg,
  2055. struct drm_i915_gem_object *obj)
  2056. {
  2057. drm_i915_private_t *dev_priv = dev->dev_private;
  2058. uint32_t val;
  2059. if (obj) {
  2060. u32 size = obj->gtt_space->size;
  2061. uint32_t pitch_val;
  2062. WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
  2063. (size & -size) != size ||
  2064. (obj->gtt_offset & (size - 1)),
  2065. "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
  2066. obj->gtt_offset, size);
  2067. pitch_val = obj->stride / 128;
  2068. pitch_val = ffs(pitch_val) - 1;
  2069. val = obj->gtt_offset;
  2070. if (obj->tiling_mode == I915_TILING_Y)
  2071. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2072. val |= I830_FENCE_SIZE_BITS(size);
  2073. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2074. val |= I830_FENCE_REG_VALID;
  2075. } else
  2076. val = 0;
  2077. I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
  2078. POSTING_READ(FENCE_REG_830_0 + reg * 4);
  2079. }
  2080. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  2081. struct drm_i915_gem_object *obj)
  2082. {
  2083. switch (INTEL_INFO(dev)->gen) {
  2084. case 7:
  2085. case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
  2086. case 5:
  2087. case 4: i965_write_fence_reg(dev, reg, obj); break;
  2088. case 3: i915_write_fence_reg(dev, reg, obj); break;
  2089. case 2: i830_write_fence_reg(dev, reg, obj); break;
  2090. default: break;
  2091. }
  2092. }
  2093. static inline int fence_number(struct drm_i915_private *dev_priv,
  2094. struct drm_i915_fence_reg *fence)
  2095. {
  2096. return fence - dev_priv->fence_regs;
  2097. }
  2098. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  2099. struct drm_i915_fence_reg *fence,
  2100. bool enable)
  2101. {
  2102. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2103. int reg = fence_number(dev_priv, fence);
  2104. i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
  2105. if (enable) {
  2106. obj->fence_reg = reg;
  2107. fence->obj = obj;
  2108. list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
  2109. } else {
  2110. obj->fence_reg = I915_FENCE_REG_NONE;
  2111. fence->obj = NULL;
  2112. list_del_init(&fence->lru_list);
  2113. }
  2114. }
  2115. static int
  2116. i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
  2117. {
  2118. int ret;
  2119. if (obj->fenced_gpu_access) {
  2120. if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
  2121. ret = i915_gem_flush_ring(obj->ring,
  2122. 0, obj->base.write_domain);
  2123. if (ret)
  2124. return ret;
  2125. }
  2126. obj->fenced_gpu_access = false;
  2127. }
  2128. if (obj->last_fenced_seqno) {
  2129. ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
  2130. if (ret)
  2131. return ret;
  2132. obj->last_fenced_seqno = 0;
  2133. }
  2134. /* Ensure that all CPU reads are completed before installing a fence
  2135. * and all writes before removing the fence.
  2136. */
  2137. if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
  2138. mb();
  2139. return 0;
  2140. }
  2141. int
  2142. i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
  2143. {
  2144. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2145. int ret;
  2146. ret = i915_gem_object_flush_fence(obj);
  2147. if (ret)
  2148. return ret;
  2149. if (obj->fence_reg == I915_FENCE_REG_NONE)
  2150. return 0;
  2151. i915_gem_object_update_fence(obj,
  2152. &dev_priv->fence_regs[obj->fence_reg],
  2153. false);
  2154. i915_gem_object_fence_lost(obj);
  2155. return 0;
  2156. }
  2157. static struct drm_i915_fence_reg *
  2158. i915_find_fence_reg(struct drm_device *dev)
  2159. {
  2160. struct drm_i915_private *dev_priv = dev->dev_private;
  2161. struct drm_i915_fence_reg *reg, *avail;
  2162. int i;
  2163. /* First try to find a free reg */
  2164. avail = NULL;
  2165. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2166. reg = &dev_priv->fence_regs[i];
  2167. if (!reg->obj)
  2168. return reg;
  2169. if (!reg->pin_count)
  2170. avail = reg;
  2171. }
  2172. if (avail == NULL)
  2173. return NULL;
  2174. /* None available, try to steal one or wait for a user to finish */
  2175. list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
  2176. if (reg->pin_count)
  2177. continue;
  2178. return reg;
  2179. }
  2180. return NULL;
  2181. }
  2182. /**
  2183. * i915_gem_object_get_fence - set up fencing for an object
  2184. * @obj: object to map through a fence reg
  2185. *
  2186. * When mapping objects through the GTT, userspace wants to be able to write
  2187. * to them without having to worry about swizzling if the object is tiled.
  2188. * This function walks the fence regs looking for a free one for @obj,
  2189. * stealing one if it can't find any.
  2190. *
  2191. * It then sets up the reg based on the object's properties: address, pitch
  2192. * and tiling format.
  2193. *
  2194. * For an untiled surface, this removes any existing fence.
  2195. */
  2196. int
  2197. i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
  2198. {
  2199. struct drm_device *dev = obj->base.dev;
  2200. struct drm_i915_private *dev_priv = dev->dev_private;
  2201. bool enable = obj->tiling_mode != I915_TILING_NONE;
  2202. struct drm_i915_fence_reg *reg;
  2203. int ret;
  2204. /* Have we updated the tiling parameters upon the object and so
  2205. * will need to serialise the write to the associated fence register?
  2206. */
  2207. if (obj->fence_dirty) {
  2208. ret = i915_gem_object_flush_fence(obj);
  2209. if (ret)
  2210. return ret;
  2211. }
  2212. /* Just update our place in the LRU if our fence is getting reused. */
  2213. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  2214. reg = &dev_priv->fence_regs[obj->fence_reg];
  2215. if (!obj->fence_dirty) {
  2216. list_move_tail(&reg->lru_list,
  2217. &dev_priv->mm.fence_list);
  2218. return 0;
  2219. }
  2220. } else if (enable) {
  2221. reg = i915_find_fence_reg(dev);
  2222. if (reg == NULL)
  2223. return -EDEADLK;
  2224. if (reg->obj) {
  2225. struct drm_i915_gem_object *old = reg->obj;
  2226. ret = i915_gem_object_flush_fence(old);
  2227. if (ret)
  2228. return ret;
  2229. i915_gem_object_fence_lost(old);
  2230. }
  2231. } else
  2232. return 0;
  2233. i915_gem_object_update_fence(obj, reg, enable);
  2234. obj->fence_dirty = false;
  2235. return 0;
  2236. }
  2237. /**
  2238. * Finds free space in the GTT aperture and binds the object there.
  2239. */
  2240. static int
  2241. i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  2242. unsigned alignment,
  2243. bool map_and_fenceable)
  2244. {
  2245. struct drm_device *dev = obj->base.dev;
  2246. drm_i915_private_t *dev_priv = dev->dev_private;
  2247. struct drm_mm_node *free_space;
  2248. gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
  2249. u32 size, fence_size, fence_alignment, unfenced_alignment;
  2250. bool mappable, fenceable;
  2251. int ret;
  2252. if (obj->madv != I915_MADV_WILLNEED) {
  2253. DRM_ERROR("Attempting to bind a purgeable object\n");
  2254. return -EINVAL;
  2255. }
  2256. fence_size = i915_gem_get_gtt_size(dev,
  2257. obj->base.size,
  2258. obj->tiling_mode);
  2259. fence_alignment = i915_gem_get_gtt_alignment(dev,
  2260. obj->base.size,
  2261. obj->tiling_mode);
  2262. unfenced_alignment =
  2263. i915_gem_get_unfenced_gtt_alignment(dev,
  2264. obj->base.size,
  2265. obj->tiling_mode);
  2266. if (alignment == 0)
  2267. alignment = map_and_fenceable ? fence_alignment :
  2268. unfenced_alignment;
  2269. if (map_and_fenceable && alignment & (fence_alignment - 1)) {
  2270. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2271. return -EINVAL;
  2272. }
  2273. size = map_and_fenceable ? fence_size : obj->base.size;
  2274. /* If the object is bigger than the entire aperture, reject it early
  2275. * before evicting everything in a vain attempt to find space.
  2276. */
  2277. if (obj->base.size >
  2278. (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
  2279. DRM_ERROR("Attempting to bind an object larger than the aperture\n");
  2280. return -E2BIG;
  2281. }
  2282. search_free:
  2283. if (map_and_fenceable)
  2284. free_space =
  2285. drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
  2286. size, alignment,
  2287. 0, dev_priv->mm.gtt_mappable_end,
  2288. 0);
  2289. else
  2290. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2291. size, alignment, 0);
  2292. if (free_space != NULL) {
  2293. if (map_and_fenceable)
  2294. obj->gtt_space =
  2295. drm_mm_get_block_range_generic(free_space,
  2296. size, alignment, 0,
  2297. 0, dev_priv->mm.gtt_mappable_end,
  2298. 0);
  2299. else
  2300. obj->gtt_space =
  2301. drm_mm_get_block(free_space, size, alignment);
  2302. }
  2303. if (obj->gtt_space == NULL) {
  2304. /* If the gtt is empty and we're still having trouble
  2305. * fitting our object in, we're out of memory.
  2306. */
  2307. ret = i915_gem_evict_something(dev, size, alignment,
  2308. map_and_fenceable);
  2309. if (ret)
  2310. return ret;
  2311. goto search_free;
  2312. }
  2313. ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
  2314. if (ret) {
  2315. drm_mm_put_block(obj->gtt_space);
  2316. obj->gtt_space = NULL;
  2317. if (ret == -ENOMEM) {
  2318. /* first try to reclaim some memory by clearing the GTT */
  2319. ret = i915_gem_evict_everything(dev, false);
  2320. if (ret) {
  2321. /* now try to shrink everyone else */
  2322. if (gfpmask) {
  2323. gfpmask = 0;
  2324. goto search_free;
  2325. }
  2326. return -ENOMEM;
  2327. }
  2328. goto search_free;
  2329. }
  2330. return ret;
  2331. }
  2332. ret = i915_gem_gtt_prepare_object(obj);
  2333. if (ret) {
  2334. i915_gem_object_put_pages_gtt(obj);
  2335. drm_mm_put_block(obj->gtt_space);
  2336. obj->gtt_space = NULL;
  2337. if (i915_gem_evict_everything(dev, false))
  2338. return ret;
  2339. goto search_free;
  2340. }
  2341. if (!dev_priv->mm.aliasing_ppgtt)
  2342. i915_gem_gtt_bind_object(obj, obj->cache_level);
  2343. list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
  2344. list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  2345. /* Assert that the object is not currently in any GPU domain. As it
  2346. * wasn't in the GTT, there shouldn't be any way it could have been in
  2347. * a GPU cache
  2348. */
  2349. BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  2350. BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  2351. obj->gtt_offset = obj->gtt_space->start;
  2352. fenceable =
  2353. obj->gtt_space->size == fence_size &&
  2354. (obj->gtt_space->start & (fence_alignment - 1)) == 0;
  2355. mappable =
  2356. obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
  2357. obj->map_and_fenceable = mappable && fenceable;
  2358. trace_i915_gem_object_bind(obj, map_and_fenceable);
  2359. return 0;
  2360. }
  2361. void
  2362. i915_gem_clflush_object(struct drm_i915_gem_object *obj)
  2363. {
  2364. /* If we don't have a page list set up, then we're not pinned
  2365. * to GPU, and we can ignore the cache flush because it'll happen
  2366. * again at bind time.
  2367. */
  2368. if (obj->pages == NULL)
  2369. return;
  2370. /* If the GPU is snooping the contents of the CPU cache,
  2371. * we do not need to manually clear the CPU cache lines. However,
  2372. * the caches are only snooped when the render cache is
  2373. * flushed/invalidated. As we always have to emit invalidations
  2374. * and flushes when moving into and out of the RENDER domain, correct
  2375. * snooping behaviour occurs naturally as the result of our domain
  2376. * tracking.
  2377. */
  2378. if (obj->cache_level != I915_CACHE_NONE)
  2379. return;
  2380. trace_i915_gem_object_clflush(obj);
  2381. drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
  2382. }
  2383. /** Flushes any GPU write domain for the object if it's dirty. */
  2384. static int
  2385. i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
  2386. {
  2387. if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2388. return 0;
  2389. /* Queue the GPU write cache flushing we need. */
  2390. return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
  2391. }
  2392. /** Flushes the GTT write domain for the object if it's dirty. */
  2393. static void
  2394. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  2395. {
  2396. uint32_t old_write_domain;
  2397. if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  2398. return;
  2399. /* No actual flushing is required for the GTT write domain. Writes
  2400. * to it immediately go to main memory as far as we know, so there's
  2401. * no chipset flush. It also doesn't land in render cache.
  2402. *
  2403. * However, we do have to enforce the order so that all writes through
  2404. * the GTT land before any writes to the device, such as updates to
  2405. * the GATT itself.
  2406. */
  2407. wmb();
  2408. old_write_domain = obj->base.write_domain;
  2409. obj->base.write_domain = 0;
  2410. trace_i915_gem_object_change_domain(obj,
  2411. obj->base.read_domains,
  2412. old_write_domain);
  2413. }
  2414. /** Flushes the CPU write domain for the object if it's dirty. */
  2415. static void
  2416. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
  2417. {
  2418. uint32_t old_write_domain;
  2419. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  2420. return;
  2421. i915_gem_clflush_object(obj);
  2422. intel_gtt_chipset_flush();
  2423. old_write_domain = obj->base.write_domain;
  2424. obj->base.write_domain = 0;
  2425. trace_i915_gem_object_change_domain(obj,
  2426. obj->base.read_domains,
  2427. old_write_domain);
  2428. }
  2429. /**
  2430. * Moves a single object to the GTT read, and possibly write domain.
  2431. *
  2432. * This function returns when the move is complete, including waiting on
  2433. * flushes to occur.
  2434. */
  2435. int
  2436. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  2437. {
  2438. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  2439. uint32_t old_write_domain, old_read_domains;
  2440. int ret;
  2441. /* Not valid to be called on unbound objects. */
  2442. if (obj->gtt_space == NULL)
  2443. return -EINVAL;
  2444. if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
  2445. return 0;
  2446. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2447. if (ret)
  2448. return ret;
  2449. if (obj->pending_gpu_write || write) {
  2450. ret = i915_gem_object_wait_rendering(obj);
  2451. if (ret)
  2452. return ret;
  2453. }
  2454. i915_gem_object_flush_cpu_write_domain(obj);
  2455. old_write_domain = obj->base.write_domain;
  2456. old_read_domains = obj->base.read_domains;
  2457. /* It should now be out of any other write domains, and we can update
  2458. * the domain values for our changes.
  2459. */
  2460. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2461. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2462. if (write) {
  2463. obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  2464. obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  2465. obj->dirty = 1;
  2466. }
  2467. trace_i915_gem_object_change_domain(obj,
  2468. old_read_domains,
  2469. old_write_domain);
  2470. /* And bump the LRU for this access */
  2471. if (i915_gem_object_is_inactive(obj))
  2472. list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  2473. return 0;
  2474. }
  2475. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  2476. enum i915_cache_level cache_level)
  2477. {
  2478. struct drm_device *dev = obj->base.dev;
  2479. drm_i915_private_t *dev_priv = dev->dev_private;
  2480. int ret;
  2481. if (obj->cache_level == cache_level)
  2482. return 0;
  2483. if (obj->pin_count) {
  2484. DRM_DEBUG("can not change the cache level of pinned objects\n");
  2485. return -EBUSY;
  2486. }
  2487. if (obj->gtt_space) {
  2488. ret = i915_gem_object_finish_gpu(obj);
  2489. if (ret)
  2490. return ret;
  2491. i915_gem_object_finish_gtt(obj);
  2492. /* Before SandyBridge, you could not use tiling or fence
  2493. * registers with snooped memory, so relinquish any fences
  2494. * currently pointing to our region in the aperture.
  2495. */
  2496. if (INTEL_INFO(obj->base.dev)->gen < 6) {
  2497. ret = i915_gem_object_put_fence(obj);
  2498. if (ret)
  2499. return ret;
  2500. }
  2501. if (obj->has_global_gtt_mapping)
  2502. i915_gem_gtt_bind_object(obj, cache_level);
  2503. if (obj->has_aliasing_ppgtt_mapping)
  2504. i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
  2505. obj, cache_level);
  2506. }
  2507. if (cache_level == I915_CACHE_NONE) {
  2508. u32 old_read_domains, old_write_domain;
  2509. /* If we're coming from LLC cached, then we haven't
  2510. * actually been tracking whether the data is in the
  2511. * CPU cache or not, since we only allow one bit set
  2512. * in obj->write_domain and have been skipping the clflushes.
  2513. * Just set it to the CPU cache for now.
  2514. */
  2515. WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
  2516. WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
  2517. old_read_domains = obj->base.read_domains;
  2518. old_write_domain = obj->base.write_domain;
  2519. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2520. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2521. trace_i915_gem_object_change_domain(obj,
  2522. old_read_domains,
  2523. old_write_domain);
  2524. }
  2525. obj->cache_level = cache_level;
  2526. return 0;
  2527. }
  2528. /*
  2529. * Prepare buffer for display plane (scanout, cursors, etc).
  2530. * Can be called from an uninterruptible phase (modesetting) and allows
  2531. * any flushes to be pipelined (for pageflips).
  2532. */
  2533. int
  2534. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  2535. u32 alignment,
  2536. struct intel_ring_buffer *pipelined)
  2537. {
  2538. u32 old_read_domains, old_write_domain;
  2539. int ret;
  2540. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2541. if (ret)
  2542. return ret;
  2543. if (pipelined != obj->ring) {
  2544. ret = i915_gem_object_sync(obj, pipelined);
  2545. if (ret)
  2546. return ret;
  2547. }
  2548. /* The display engine is not coherent with the LLC cache on gen6. As
  2549. * a result, we make sure that the pinning that is about to occur is
  2550. * done with uncached PTEs. This is lowest common denominator for all
  2551. * chipsets.
  2552. *
  2553. * However for gen6+, we could do better by using the GFDT bit instead
  2554. * of uncaching, which would allow us to flush all the LLC-cached data
  2555. * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  2556. */
  2557. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
  2558. if (ret)
  2559. return ret;
  2560. /* As the user may map the buffer once pinned in the display plane
  2561. * (e.g. libkms for the bootup splash), we have to ensure that we
  2562. * always use map_and_fenceable for all scanout buffers.
  2563. */
  2564. ret = i915_gem_object_pin(obj, alignment, true);
  2565. if (ret)
  2566. return ret;
  2567. i915_gem_object_flush_cpu_write_domain(obj);
  2568. old_write_domain = obj->base.write_domain;
  2569. old_read_domains = obj->base.read_domains;
  2570. /* It should now be out of any other write domains, and we can update
  2571. * the domain values for our changes.
  2572. */
  2573. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2574. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2575. trace_i915_gem_object_change_domain(obj,
  2576. old_read_domains,
  2577. old_write_domain);
  2578. return 0;
  2579. }
  2580. int
  2581. i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
  2582. {
  2583. int ret;
  2584. if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
  2585. return 0;
  2586. if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
  2587. ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
  2588. if (ret)
  2589. return ret;
  2590. }
  2591. ret = i915_gem_object_wait_rendering(obj);
  2592. if (ret)
  2593. return ret;
  2594. /* Ensure that we invalidate the GPU's caches and TLBs. */
  2595. obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  2596. return 0;
  2597. }
  2598. /**
  2599. * Moves a single object to the CPU read, and possibly write domain.
  2600. *
  2601. * This function returns when the move is complete, including waiting on
  2602. * flushes to occur.
  2603. */
  2604. int
  2605. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  2606. {
  2607. uint32_t old_write_domain, old_read_domains;
  2608. int ret;
  2609. if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  2610. return 0;
  2611. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2612. if (ret)
  2613. return ret;
  2614. if (write || obj->pending_gpu_write) {
  2615. ret = i915_gem_object_wait_rendering(obj);
  2616. if (ret)
  2617. return ret;
  2618. }
  2619. i915_gem_object_flush_gtt_write_domain(obj);
  2620. old_write_domain = obj->base.write_domain;
  2621. old_read_domains = obj->base.read_domains;
  2622. /* Flush the CPU cache if it's still invalid. */
  2623. if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2624. i915_gem_clflush_object(obj);
  2625. obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  2626. }
  2627. /* It should now be out of any other write domains, and we can update
  2628. * the domain values for our changes.
  2629. */
  2630. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2631. /* If we're writing through the CPU, then the GPU read domains will
  2632. * need to be invalidated at next use.
  2633. */
  2634. if (write) {
  2635. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2636. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2637. }
  2638. trace_i915_gem_object_change_domain(obj,
  2639. old_read_domains,
  2640. old_write_domain);
  2641. return 0;
  2642. }
  2643. /* Throttle our rendering by waiting until the ring has completed our requests
  2644. * emitted over 20 msec ago.
  2645. *
  2646. * Note that if we were to use the current jiffies each time around the loop,
  2647. * we wouldn't escape the function with any frames outstanding if the time to
  2648. * render a frame was over 20ms.
  2649. *
  2650. * This should get us reasonable parallelism between CPU and GPU but also
  2651. * relatively low latency when blocking on a particular request to finish.
  2652. */
  2653. static int
  2654. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  2655. {
  2656. struct drm_i915_private *dev_priv = dev->dev_private;
  2657. struct drm_i915_file_private *file_priv = file->driver_priv;
  2658. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2659. struct drm_i915_gem_request *request;
  2660. struct intel_ring_buffer *ring = NULL;
  2661. u32 seqno = 0;
  2662. int ret;
  2663. if (atomic_read(&dev_priv->mm.wedged))
  2664. return -EIO;
  2665. spin_lock(&file_priv->mm.lock);
  2666. list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  2667. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2668. break;
  2669. ring = request->ring;
  2670. seqno = request->seqno;
  2671. }
  2672. spin_unlock(&file_priv->mm.lock);
  2673. if (seqno == 0)
  2674. return 0;
  2675. ret = __wait_seqno(ring, seqno, true, NULL);
  2676. if (ret == 0)
  2677. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  2678. return ret;
  2679. }
  2680. int
  2681. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  2682. uint32_t alignment,
  2683. bool map_and_fenceable)
  2684. {
  2685. int ret;
  2686. BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
  2687. if (obj->gtt_space != NULL) {
  2688. if ((alignment && obj->gtt_offset & (alignment - 1)) ||
  2689. (map_and_fenceable && !obj->map_and_fenceable)) {
  2690. WARN(obj->pin_count,
  2691. "bo is already pinned with incorrect alignment:"
  2692. " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
  2693. " obj->map_and_fenceable=%d\n",
  2694. obj->gtt_offset, alignment,
  2695. map_and_fenceable,
  2696. obj->map_and_fenceable);
  2697. ret = i915_gem_object_unbind(obj);
  2698. if (ret)
  2699. return ret;
  2700. }
  2701. }
  2702. if (obj->gtt_space == NULL) {
  2703. ret = i915_gem_object_bind_to_gtt(obj, alignment,
  2704. map_and_fenceable);
  2705. if (ret)
  2706. return ret;
  2707. }
  2708. if (!obj->has_global_gtt_mapping && map_and_fenceable)
  2709. i915_gem_gtt_bind_object(obj, obj->cache_level);
  2710. obj->pin_count++;
  2711. obj->pin_mappable |= map_and_fenceable;
  2712. return 0;
  2713. }
  2714. void
  2715. i915_gem_object_unpin(struct drm_i915_gem_object *obj)
  2716. {
  2717. BUG_ON(obj->pin_count == 0);
  2718. BUG_ON(obj->gtt_space == NULL);
  2719. if (--obj->pin_count == 0)
  2720. obj->pin_mappable = false;
  2721. }
  2722. int
  2723. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  2724. struct drm_file *file)
  2725. {
  2726. struct drm_i915_gem_pin *args = data;
  2727. struct drm_i915_gem_object *obj;
  2728. int ret;
  2729. ret = i915_mutex_lock_interruptible(dev);
  2730. if (ret)
  2731. return ret;
  2732. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2733. if (&obj->base == NULL) {
  2734. ret = -ENOENT;
  2735. goto unlock;
  2736. }
  2737. if (obj->madv != I915_MADV_WILLNEED) {
  2738. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  2739. ret = -EINVAL;
  2740. goto out;
  2741. }
  2742. if (obj->pin_filp != NULL && obj->pin_filp != file) {
  2743. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  2744. args->handle);
  2745. ret = -EINVAL;
  2746. goto out;
  2747. }
  2748. obj->user_pin_count++;
  2749. obj->pin_filp = file;
  2750. if (obj->user_pin_count == 1) {
  2751. ret = i915_gem_object_pin(obj, args->alignment, true);
  2752. if (ret)
  2753. goto out;
  2754. }
  2755. /* XXX - flush the CPU caches for pinned objects
  2756. * as the X server doesn't manage domains yet
  2757. */
  2758. i915_gem_object_flush_cpu_write_domain(obj);
  2759. args->offset = obj->gtt_offset;
  2760. out:
  2761. drm_gem_object_unreference(&obj->base);
  2762. unlock:
  2763. mutex_unlock(&dev->struct_mutex);
  2764. return ret;
  2765. }
  2766. int
  2767. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  2768. struct drm_file *file)
  2769. {
  2770. struct drm_i915_gem_pin *args = data;
  2771. struct drm_i915_gem_object *obj;
  2772. int ret;
  2773. ret = i915_mutex_lock_interruptible(dev);
  2774. if (ret)
  2775. return ret;
  2776. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2777. if (&obj->base == NULL) {
  2778. ret = -ENOENT;
  2779. goto unlock;
  2780. }
  2781. if (obj->pin_filp != file) {
  2782. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  2783. args->handle);
  2784. ret = -EINVAL;
  2785. goto out;
  2786. }
  2787. obj->user_pin_count--;
  2788. if (obj->user_pin_count == 0) {
  2789. obj->pin_filp = NULL;
  2790. i915_gem_object_unpin(obj);
  2791. }
  2792. out:
  2793. drm_gem_object_unreference(&obj->base);
  2794. unlock:
  2795. mutex_unlock(&dev->struct_mutex);
  2796. return ret;
  2797. }
  2798. int
  2799. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  2800. struct drm_file *file)
  2801. {
  2802. struct drm_i915_gem_busy *args = data;
  2803. struct drm_i915_gem_object *obj;
  2804. int ret;
  2805. ret = i915_mutex_lock_interruptible(dev);
  2806. if (ret)
  2807. return ret;
  2808. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  2809. if (&obj->base == NULL) {
  2810. ret = -ENOENT;
  2811. goto unlock;
  2812. }
  2813. /* Count all active objects as busy, even if they are currently not used
  2814. * by the gpu. Users of this interface expect objects to eventually
  2815. * become non-busy without any further actions, therefore emit any
  2816. * necessary flushes here.
  2817. */
  2818. ret = i915_gem_object_flush_active(obj);
  2819. args->busy = obj->active;
  2820. drm_gem_object_unreference(&obj->base);
  2821. unlock:
  2822. mutex_unlock(&dev->struct_mutex);
  2823. return ret;
  2824. }
  2825. int
  2826. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  2827. struct drm_file *file_priv)
  2828. {
  2829. return i915_gem_ring_throttle(dev, file_priv);
  2830. }
  2831. int
  2832. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  2833. struct drm_file *file_priv)
  2834. {
  2835. struct drm_i915_gem_madvise *args = data;
  2836. struct drm_i915_gem_object *obj;
  2837. int ret;
  2838. switch (args->madv) {
  2839. case I915_MADV_DONTNEED:
  2840. case I915_MADV_WILLNEED:
  2841. break;
  2842. default:
  2843. return -EINVAL;
  2844. }
  2845. ret = i915_mutex_lock_interruptible(dev);
  2846. if (ret)
  2847. return ret;
  2848. obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  2849. if (&obj->base == NULL) {
  2850. ret = -ENOENT;
  2851. goto unlock;
  2852. }
  2853. if (obj->pin_count) {
  2854. ret = -EINVAL;
  2855. goto out;
  2856. }
  2857. if (obj->madv != __I915_MADV_PURGED)
  2858. obj->madv = args->madv;
  2859. /* if the object is no longer bound, discard its backing storage */
  2860. if (i915_gem_object_is_purgeable(obj) &&
  2861. obj->gtt_space == NULL)
  2862. i915_gem_object_truncate(obj);
  2863. args->retained = obj->madv != __I915_MADV_PURGED;
  2864. out:
  2865. drm_gem_object_unreference(&obj->base);
  2866. unlock:
  2867. mutex_unlock(&dev->struct_mutex);
  2868. return ret;
  2869. }
  2870. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  2871. size_t size)
  2872. {
  2873. struct drm_i915_private *dev_priv = dev->dev_private;
  2874. struct drm_i915_gem_object *obj;
  2875. struct address_space *mapping;
  2876. u32 mask;
  2877. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  2878. if (obj == NULL)
  2879. return NULL;
  2880. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  2881. kfree(obj);
  2882. return NULL;
  2883. }
  2884. mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
  2885. if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
  2886. /* 965gm cannot relocate objects above 4GiB. */
  2887. mask &= ~__GFP_HIGHMEM;
  2888. mask |= __GFP_DMA32;
  2889. }
  2890. mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  2891. mapping_set_gfp_mask(mapping, mask);
  2892. i915_gem_info_add_obj(dev_priv, size);
  2893. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  2894. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  2895. if (HAS_LLC(dev)) {
  2896. /* On some devices, we can have the GPU use the LLC (the CPU
  2897. * cache) for about a 10% performance improvement
  2898. * compared to uncached. Graphics requests other than
  2899. * display scanout are coherent with the CPU in
  2900. * accessing this cache. This means in this mode we
  2901. * don't need to clflush on the CPU side, and on the
  2902. * GPU side we only need to flush internal caches to
  2903. * get data visible to the CPU.
  2904. *
  2905. * However, we maintain the display planes as UC, and so
  2906. * need to rebind when first used as such.
  2907. */
  2908. obj->cache_level = I915_CACHE_LLC;
  2909. } else
  2910. obj->cache_level = I915_CACHE_NONE;
  2911. obj->base.driver_private = NULL;
  2912. obj->fence_reg = I915_FENCE_REG_NONE;
  2913. INIT_LIST_HEAD(&obj->mm_list);
  2914. INIT_LIST_HEAD(&obj->gtt_list);
  2915. INIT_LIST_HEAD(&obj->ring_list);
  2916. INIT_LIST_HEAD(&obj->exec_list);
  2917. INIT_LIST_HEAD(&obj->gpu_write_list);
  2918. obj->madv = I915_MADV_WILLNEED;
  2919. /* Avoid an unnecessary call to unbind on the first bind. */
  2920. obj->map_and_fenceable = true;
  2921. return obj;
  2922. }
  2923. int i915_gem_init_object(struct drm_gem_object *obj)
  2924. {
  2925. BUG();
  2926. return 0;
  2927. }
  2928. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  2929. {
  2930. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  2931. struct drm_device *dev = obj->base.dev;
  2932. drm_i915_private_t *dev_priv = dev->dev_private;
  2933. trace_i915_gem_object_destroy(obj);
  2934. if (gem_obj->import_attach)
  2935. drm_prime_gem_destroy(gem_obj, obj->sg_table);
  2936. if (obj->phys_obj)
  2937. i915_gem_detach_phys_object(dev, obj);
  2938. obj->pin_count = 0;
  2939. if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
  2940. bool was_interruptible;
  2941. was_interruptible = dev_priv->mm.interruptible;
  2942. dev_priv->mm.interruptible = false;
  2943. WARN_ON(i915_gem_object_unbind(obj));
  2944. dev_priv->mm.interruptible = was_interruptible;
  2945. }
  2946. if (obj->base.map_list.map)
  2947. drm_gem_free_mmap_offset(&obj->base);
  2948. drm_gem_object_release(&obj->base);
  2949. i915_gem_info_remove_obj(dev_priv, obj->base.size);
  2950. kfree(obj->bit_17);
  2951. kfree(obj);
  2952. }
  2953. int
  2954. i915_gem_idle(struct drm_device *dev)
  2955. {
  2956. drm_i915_private_t *dev_priv = dev->dev_private;
  2957. int ret;
  2958. mutex_lock(&dev->struct_mutex);
  2959. if (dev_priv->mm.suspended) {
  2960. mutex_unlock(&dev->struct_mutex);
  2961. return 0;
  2962. }
  2963. ret = i915_gpu_idle(dev);
  2964. if (ret) {
  2965. mutex_unlock(&dev->struct_mutex);
  2966. return ret;
  2967. }
  2968. i915_gem_retire_requests(dev);
  2969. /* Under UMS, be paranoid and evict. */
  2970. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  2971. i915_gem_evict_everything(dev, false);
  2972. i915_gem_reset_fences(dev);
  2973. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  2974. * We need to replace this with a semaphore, or something.
  2975. * And not confound mm.suspended!
  2976. */
  2977. dev_priv->mm.suspended = 1;
  2978. del_timer_sync(&dev_priv->hangcheck_timer);
  2979. i915_kernel_lost_context(dev);
  2980. i915_gem_cleanup_ringbuffer(dev);
  2981. mutex_unlock(&dev->struct_mutex);
  2982. /* Cancel the retire work handler, which should be idle now. */
  2983. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  2984. return 0;
  2985. }
  2986. void i915_gem_l3_remap(struct drm_device *dev)
  2987. {
  2988. drm_i915_private_t *dev_priv = dev->dev_private;
  2989. u32 misccpctl;
  2990. int i;
  2991. if (!IS_IVYBRIDGE(dev))
  2992. return;
  2993. if (!dev_priv->mm.l3_remap_info)
  2994. return;
  2995. misccpctl = I915_READ(GEN7_MISCCPCTL);
  2996. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  2997. POSTING_READ(GEN7_MISCCPCTL);
  2998. for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
  2999. u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
  3000. if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
  3001. DRM_DEBUG("0x%x was already programmed to %x\n",
  3002. GEN7_L3LOG_BASE + i, remap);
  3003. if (remap && !dev_priv->mm.l3_remap_info[i/4])
  3004. DRM_DEBUG_DRIVER("Clearing remapped register\n");
  3005. I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
  3006. }
  3007. /* Make sure all the writes land before disabling dop clock gating */
  3008. POSTING_READ(GEN7_L3LOG_BASE);
  3009. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  3010. }
  3011. void i915_gem_init_swizzling(struct drm_device *dev)
  3012. {
  3013. drm_i915_private_t *dev_priv = dev->dev_private;
  3014. if (INTEL_INFO(dev)->gen < 5 ||
  3015. dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  3016. return;
  3017. I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  3018. DISP_TILE_SURFACE_SWIZZLING);
  3019. if (IS_GEN5(dev))
  3020. return;
  3021. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  3022. if (IS_GEN6(dev))
  3023. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  3024. else
  3025. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  3026. }
  3027. void i915_gem_init_ppgtt(struct drm_device *dev)
  3028. {
  3029. drm_i915_private_t *dev_priv = dev->dev_private;
  3030. uint32_t pd_offset;
  3031. struct intel_ring_buffer *ring;
  3032. struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
  3033. uint32_t __iomem *pd_addr;
  3034. uint32_t pd_entry;
  3035. int i;
  3036. if (!dev_priv->mm.aliasing_ppgtt)
  3037. return;
  3038. pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
  3039. for (i = 0; i < ppgtt->num_pd_entries; i++) {
  3040. dma_addr_t pt_addr;
  3041. if (dev_priv->mm.gtt->needs_dmar)
  3042. pt_addr = ppgtt->pt_dma_addr[i];
  3043. else
  3044. pt_addr = page_to_phys(ppgtt->pt_pages[i]);
  3045. pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
  3046. pd_entry |= GEN6_PDE_VALID;
  3047. writel(pd_entry, pd_addr + i);
  3048. }
  3049. readl(pd_addr);
  3050. pd_offset = ppgtt->pd_offset;
  3051. pd_offset /= 64; /* in cachelines, */
  3052. pd_offset <<= 16;
  3053. if (INTEL_INFO(dev)->gen == 6) {
  3054. uint32_t ecochk, gab_ctl, ecobits;
  3055. ecobits = I915_READ(GAC_ECO_BITS);
  3056. I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
  3057. gab_ctl = I915_READ(GAB_CTL);
  3058. I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
  3059. ecochk = I915_READ(GAM_ECOCHK);
  3060. I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
  3061. ECOCHK_PPGTT_CACHE64B);
  3062. I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
  3063. } else if (INTEL_INFO(dev)->gen >= 7) {
  3064. I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
  3065. /* GFX_MODE is per-ring on gen7+ */
  3066. }
  3067. for_each_ring(ring, dev_priv, i) {
  3068. if (INTEL_INFO(dev)->gen >= 7)
  3069. I915_WRITE(RING_MODE_GEN7(ring),
  3070. _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
  3071. I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
  3072. I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
  3073. }
  3074. }
  3075. static bool
  3076. intel_enable_blt(struct drm_device *dev)
  3077. {
  3078. if (!HAS_BLT(dev))
  3079. return false;
  3080. /* The blitter was dysfunctional on early prototypes */
  3081. if (IS_GEN6(dev) && dev->pdev->revision < 8) {
  3082. DRM_INFO("BLT not supported on this pre-production hardware;"
  3083. " graphics performance will be degraded.\n");
  3084. return false;
  3085. }
  3086. return true;
  3087. }
  3088. int
  3089. i915_gem_init_hw(struct drm_device *dev)
  3090. {
  3091. drm_i915_private_t *dev_priv = dev->dev_private;
  3092. int ret;
  3093. if (!intel_enable_gtt())
  3094. return -EIO;
  3095. i915_gem_l3_remap(dev);
  3096. i915_gem_init_swizzling(dev);
  3097. ret = intel_init_render_ring_buffer(dev);
  3098. if (ret)
  3099. return ret;
  3100. if (HAS_BSD(dev)) {
  3101. ret = intel_init_bsd_ring_buffer(dev);
  3102. if (ret)
  3103. goto cleanup_render_ring;
  3104. }
  3105. if (intel_enable_blt(dev)) {
  3106. ret = intel_init_blt_ring_buffer(dev);
  3107. if (ret)
  3108. goto cleanup_bsd_ring;
  3109. }
  3110. dev_priv->next_seqno = 1;
  3111. /*
  3112. * XXX: There was some w/a described somewhere suggesting loading
  3113. * contexts before PPGTT.
  3114. */
  3115. i915_gem_context_init(dev);
  3116. i915_gem_init_ppgtt(dev);
  3117. return 0;
  3118. cleanup_bsd_ring:
  3119. intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
  3120. cleanup_render_ring:
  3121. intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
  3122. return ret;
  3123. }
  3124. static bool
  3125. intel_enable_ppgtt(struct drm_device *dev)
  3126. {
  3127. if (i915_enable_ppgtt >= 0)
  3128. return i915_enable_ppgtt;
  3129. #ifdef CONFIG_INTEL_IOMMU
  3130. /* Disable ppgtt on SNB if VT-d is on. */
  3131. if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
  3132. return false;
  3133. #endif
  3134. return true;
  3135. }
  3136. int i915_gem_init(struct drm_device *dev)
  3137. {
  3138. struct drm_i915_private *dev_priv = dev->dev_private;
  3139. unsigned long gtt_size, mappable_size;
  3140. int ret;
  3141. gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
  3142. mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  3143. mutex_lock(&dev->struct_mutex);
  3144. if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
  3145. /* PPGTT pdes are stolen from global gtt ptes, so shrink the
  3146. * aperture accordingly when using aliasing ppgtt. */
  3147. gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
  3148. i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
  3149. ret = i915_gem_init_aliasing_ppgtt(dev);
  3150. if (ret) {
  3151. mutex_unlock(&dev->struct_mutex);
  3152. return ret;
  3153. }
  3154. } else {
  3155. /* Let GEM Manage all of the aperture.
  3156. *
  3157. * However, leave one page at the end still bound to the scratch
  3158. * page. There are a number of places where the hardware
  3159. * apparently prefetches past the end of the object, and we've
  3160. * seen multiple hangs with the GPU head pointer stuck in a
  3161. * batchbuffer bound at the last page of the aperture. One page
  3162. * should be enough to keep any prefetching inside of the
  3163. * aperture.
  3164. */
  3165. i915_gem_init_global_gtt(dev, 0, mappable_size,
  3166. gtt_size);
  3167. }
  3168. ret = i915_gem_init_hw(dev);
  3169. mutex_unlock(&dev->struct_mutex);
  3170. if (ret) {
  3171. i915_gem_cleanup_aliasing_ppgtt(dev);
  3172. return ret;
  3173. }
  3174. /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
  3175. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3176. dev_priv->dri1.allow_batchbuffer = 1;
  3177. return 0;
  3178. }
  3179. void
  3180. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3181. {
  3182. drm_i915_private_t *dev_priv = dev->dev_private;
  3183. struct intel_ring_buffer *ring;
  3184. int i;
  3185. for_each_ring(ring, dev_priv, i)
  3186. intel_cleanup_ring_buffer(ring);
  3187. }
  3188. int
  3189. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3190. struct drm_file *file_priv)
  3191. {
  3192. drm_i915_private_t *dev_priv = dev->dev_private;
  3193. int ret;
  3194. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3195. return 0;
  3196. if (atomic_read(&dev_priv->mm.wedged)) {
  3197. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3198. atomic_set(&dev_priv->mm.wedged, 0);
  3199. }
  3200. mutex_lock(&dev->struct_mutex);
  3201. dev_priv->mm.suspended = 0;
  3202. ret = i915_gem_init_hw(dev);
  3203. if (ret != 0) {
  3204. mutex_unlock(&dev->struct_mutex);
  3205. return ret;
  3206. }
  3207. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  3208. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  3209. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  3210. mutex_unlock(&dev->struct_mutex);
  3211. ret = drm_irq_install(dev);
  3212. if (ret)
  3213. goto cleanup_ringbuffer;
  3214. return 0;
  3215. cleanup_ringbuffer:
  3216. mutex_lock(&dev->struct_mutex);
  3217. i915_gem_cleanup_ringbuffer(dev);
  3218. dev_priv->mm.suspended = 1;
  3219. mutex_unlock(&dev->struct_mutex);
  3220. return ret;
  3221. }
  3222. int
  3223. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3224. struct drm_file *file_priv)
  3225. {
  3226. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3227. return 0;
  3228. drm_irq_uninstall(dev);
  3229. return i915_gem_idle(dev);
  3230. }
  3231. void
  3232. i915_gem_lastclose(struct drm_device *dev)
  3233. {
  3234. int ret;
  3235. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3236. return;
  3237. ret = i915_gem_idle(dev);
  3238. if (ret)
  3239. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3240. }
  3241. static void
  3242. init_ring_lists(struct intel_ring_buffer *ring)
  3243. {
  3244. INIT_LIST_HEAD(&ring->active_list);
  3245. INIT_LIST_HEAD(&ring->request_list);
  3246. INIT_LIST_HEAD(&ring->gpu_write_list);
  3247. }
  3248. void
  3249. i915_gem_load(struct drm_device *dev)
  3250. {
  3251. int i;
  3252. drm_i915_private_t *dev_priv = dev->dev_private;
  3253. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  3254. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  3255. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3256. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3257. INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
  3258. for (i = 0; i < I915_NUM_RINGS; i++)
  3259. init_ring_lists(&dev_priv->ring[i]);
  3260. for (i = 0; i < I915_MAX_NUM_FENCES; i++)
  3261. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  3262. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3263. i915_gem_retire_work_handler);
  3264. init_completion(&dev_priv->error_completion);
  3265. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  3266. if (IS_GEN3(dev)) {
  3267. I915_WRITE(MI_ARB_STATE,
  3268. _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  3269. }
  3270. dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  3271. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3272. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3273. dev_priv->fence_reg_start = 3;
  3274. if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3275. dev_priv->num_fence_regs = 16;
  3276. else
  3277. dev_priv->num_fence_regs = 8;
  3278. /* Initialize fence registers to zero */
  3279. i915_gem_reset_fences(dev);
  3280. i915_gem_detect_bit_6_swizzle(dev);
  3281. init_waitqueue_head(&dev_priv->pending_flip_queue);
  3282. dev_priv->mm.interruptible = true;
  3283. dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
  3284. dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
  3285. register_shrinker(&dev_priv->mm.inactive_shrinker);
  3286. }
  3287. /*
  3288. * Create a physically contiguous memory object for this object
  3289. * e.g. for cursor + overlay regs
  3290. */
  3291. static int i915_gem_init_phys_object(struct drm_device *dev,
  3292. int id, int size, int align)
  3293. {
  3294. drm_i915_private_t *dev_priv = dev->dev_private;
  3295. struct drm_i915_gem_phys_object *phys_obj;
  3296. int ret;
  3297. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3298. return 0;
  3299. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3300. if (!phys_obj)
  3301. return -ENOMEM;
  3302. phys_obj->id = id;
  3303. phys_obj->handle = drm_pci_alloc(dev, size, align);
  3304. if (!phys_obj->handle) {
  3305. ret = -ENOMEM;
  3306. goto kfree_obj;
  3307. }
  3308. #ifdef CONFIG_X86
  3309. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3310. #endif
  3311. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3312. return 0;
  3313. kfree_obj:
  3314. kfree(phys_obj);
  3315. return ret;
  3316. }
  3317. static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3318. {
  3319. drm_i915_private_t *dev_priv = dev->dev_private;
  3320. struct drm_i915_gem_phys_object *phys_obj;
  3321. if (!dev_priv->mm.phys_objs[id - 1])
  3322. return;
  3323. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3324. if (phys_obj->cur_obj) {
  3325. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3326. }
  3327. #ifdef CONFIG_X86
  3328. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3329. #endif
  3330. drm_pci_free(dev, phys_obj->handle);
  3331. kfree(phys_obj);
  3332. dev_priv->mm.phys_objs[id - 1] = NULL;
  3333. }
  3334. void i915_gem_free_all_phys_object(struct drm_device *dev)
  3335. {
  3336. int i;
  3337. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  3338. i915_gem_free_phys_object(dev, i);
  3339. }
  3340. void i915_gem_detach_phys_object(struct drm_device *dev,
  3341. struct drm_i915_gem_object *obj)
  3342. {
  3343. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  3344. char *vaddr;
  3345. int i;
  3346. int page_count;
  3347. if (!obj->phys_obj)
  3348. return;
  3349. vaddr = obj->phys_obj->handle->vaddr;
  3350. page_count = obj->base.size / PAGE_SIZE;
  3351. for (i = 0; i < page_count; i++) {
  3352. struct page *page = shmem_read_mapping_page(mapping, i);
  3353. if (!IS_ERR(page)) {
  3354. char *dst = kmap_atomic(page);
  3355. memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
  3356. kunmap_atomic(dst);
  3357. drm_clflush_pages(&page, 1);
  3358. set_page_dirty(page);
  3359. mark_page_accessed(page);
  3360. page_cache_release(page);
  3361. }
  3362. }
  3363. intel_gtt_chipset_flush();
  3364. obj->phys_obj->cur_obj = NULL;
  3365. obj->phys_obj = NULL;
  3366. }
  3367. int
  3368. i915_gem_attach_phys_object(struct drm_device *dev,
  3369. struct drm_i915_gem_object *obj,
  3370. int id,
  3371. int align)
  3372. {
  3373. struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
  3374. drm_i915_private_t *dev_priv = dev->dev_private;
  3375. int ret = 0;
  3376. int page_count;
  3377. int i;
  3378. if (id > I915_MAX_PHYS_OBJECT)
  3379. return -EINVAL;
  3380. if (obj->phys_obj) {
  3381. if (obj->phys_obj->id == id)
  3382. return 0;
  3383. i915_gem_detach_phys_object(dev, obj);
  3384. }
  3385. /* create a new object */
  3386. if (!dev_priv->mm.phys_objs[id - 1]) {
  3387. ret = i915_gem_init_phys_object(dev, id,
  3388. obj->base.size, align);
  3389. if (ret) {
  3390. DRM_ERROR("failed to init phys object %d size: %zu\n",
  3391. id, obj->base.size);
  3392. return ret;
  3393. }
  3394. }
  3395. /* bind to the object */
  3396. obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
  3397. obj->phys_obj->cur_obj = obj;
  3398. page_count = obj->base.size / PAGE_SIZE;
  3399. for (i = 0; i < page_count; i++) {
  3400. struct page *page;
  3401. char *dst, *src;
  3402. page = shmem_read_mapping_page(mapping, i);
  3403. if (IS_ERR(page))
  3404. return PTR_ERR(page);
  3405. src = kmap_atomic(page);
  3406. dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  3407. memcpy(dst, src, PAGE_SIZE);
  3408. kunmap_atomic(src);
  3409. mark_page_accessed(page);
  3410. page_cache_release(page);
  3411. }
  3412. return 0;
  3413. }
  3414. static int
  3415. i915_gem_phys_pwrite(struct drm_device *dev,
  3416. struct drm_i915_gem_object *obj,
  3417. struct drm_i915_gem_pwrite *args,
  3418. struct drm_file *file_priv)
  3419. {
  3420. void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
  3421. char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
  3422. if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
  3423. unsigned long unwritten;
  3424. /* The physical object once assigned is fixed for the lifetime
  3425. * of the obj, so we can safely drop the lock and continue
  3426. * to access vaddr.
  3427. */
  3428. mutex_unlock(&dev->struct_mutex);
  3429. unwritten = copy_from_user(vaddr, user_data, args->size);
  3430. mutex_lock(&dev->struct_mutex);
  3431. if (unwritten)
  3432. return -EFAULT;
  3433. }
  3434. intel_gtt_chipset_flush();
  3435. return 0;
  3436. }
  3437. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  3438. {
  3439. struct drm_i915_file_private *file_priv = file->driver_priv;
  3440. /* Clean up our request list when the client is going away, so that
  3441. * later retire_requests won't dereference our soon-to-be-gone
  3442. * file_priv.
  3443. */
  3444. spin_lock(&file_priv->mm.lock);
  3445. while (!list_empty(&file_priv->mm.request_list)) {
  3446. struct drm_i915_gem_request *request;
  3447. request = list_first_entry(&file_priv->mm.request_list,
  3448. struct drm_i915_gem_request,
  3449. client_list);
  3450. list_del(&request->client_list);
  3451. request->file_priv = NULL;
  3452. }
  3453. spin_unlock(&file_priv->mm.lock);
  3454. }
  3455. static int
  3456. i915_gpu_is_active(struct drm_device *dev)
  3457. {
  3458. drm_i915_private_t *dev_priv = dev->dev_private;
  3459. int lists_empty;
  3460. lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
  3461. list_empty(&dev_priv->mm.active_list);
  3462. return !lists_empty;
  3463. }
  3464. static int
  3465. i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
  3466. {
  3467. struct drm_i915_private *dev_priv =
  3468. container_of(shrinker,
  3469. struct drm_i915_private,
  3470. mm.inactive_shrinker);
  3471. struct drm_device *dev = dev_priv->dev;
  3472. struct drm_i915_gem_object *obj, *next;
  3473. int nr_to_scan = sc->nr_to_scan;
  3474. int cnt;
  3475. if (!mutex_trylock(&dev->struct_mutex))
  3476. return 0;
  3477. /* "fast-path" to count number of available objects */
  3478. if (nr_to_scan == 0) {
  3479. cnt = 0;
  3480. list_for_each_entry(obj,
  3481. &dev_priv->mm.inactive_list,
  3482. mm_list)
  3483. cnt++;
  3484. mutex_unlock(&dev->struct_mutex);
  3485. return cnt / 100 * sysctl_vfs_cache_pressure;
  3486. }
  3487. rescan:
  3488. /* first scan for clean buffers */
  3489. i915_gem_retire_requests(dev);
  3490. list_for_each_entry_safe(obj, next,
  3491. &dev_priv->mm.inactive_list,
  3492. mm_list) {
  3493. if (i915_gem_object_is_purgeable(obj)) {
  3494. if (i915_gem_object_unbind(obj) == 0 &&
  3495. --nr_to_scan == 0)
  3496. break;
  3497. }
  3498. }
  3499. /* second pass, evict/count anything still on the inactive list */
  3500. cnt = 0;
  3501. list_for_each_entry_safe(obj, next,
  3502. &dev_priv->mm.inactive_list,
  3503. mm_list) {
  3504. if (nr_to_scan &&
  3505. i915_gem_object_unbind(obj) == 0)
  3506. nr_to_scan--;
  3507. else
  3508. cnt++;
  3509. }
  3510. if (nr_to_scan && i915_gpu_is_active(dev)) {
  3511. /*
  3512. * We are desperate for pages, so as a last resort, wait
  3513. * for the GPU to finish and discard whatever we can.
  3514. * This has a dramatic impact to reduce the number of
  3515. * OOM-killer events whilst running the GPU aggressively.
  3516. */
  3517. if (i915_gpu_idle(dev) == 0)
  3518. goto rescan;
  3519. }
  3520. mutex_unlock(&dev->struct_mutex);
  3521. return cnt / 100 * sysctl_vfs_cache_pressure;
  3522. }