i915_gem.c 130 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/slab.h>
  34. #include <linux/swap.h>
  35. #include <linux/pci.h>
  36. #include <linux/intel-gtt.h>
  37. static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
  38. static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
  39. bool pipelined);
  40. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  41. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  42. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  43. int write);
  44. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  45. uint64_t offset,
  46. uint64_t size);
  47. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  48. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
  49. bool interruptible);
  50. static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
  51. unsigned alignment);
  52. static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
  53. static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  54. struct drm_i915_gem_pwrite *args,
  55. struct drm_file *file_priv);
  56. static void i915_gem_free_object_tail(struct drm_gem_object *obj);
  57. static int
  58. i915_gem_object_get_pages(struct drm_gem_object *obj,
  59. gfp_t gfpmask);
  60. static void
  61. i915_gem_object_put_pages(struct drm_gem_object *obj);
  62. static LIST_HEAD(shrink_list);
  63. static DEFINE_SPINLOCK(shrink_list_lock);
  64. /* some bookkeeping */
  65. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  66. size_t size)
  67. {
  68. dev_priv->mm.object_count++;
  69. dev_priv->mm.object_memory += size;
  70. }
  71. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  72. size_t size)
  73. {
  74. dev_priv->mm.object_count--;
  75. dev_priv->mm.object_memory -= size;
  76. }
  77. static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
  78. size_t size)
  79. {
  80. dev_priv->mm.gtt_count++;
  81. dev_priv->mm.gtt_memory += size;
  82. }
  83. static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
  84. size_t size)
  85. {
  86. dev_priv->mm.gtt_count--;
  87. dev_priv->mm.gtt_memory -= size;
  88. }
  89. static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
  90. size_t size)
  91. {
  92. dev_priv->mm.pin_count++;
  93. dev_priv->mm.pin_memory += size;
  94. }
  95. static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
  96. size_t size)
  97. {
  98. dev_priv->mm.pin_count--;
  99. dev_priv->mm.pin_memory -= size;
  100. }
  101. int
  102. i915_gem_check_is_wedged(struct drm_device *dev)
  103. {
  104. struct drm_i915_private *dev_priv = dev->dev_private;
  105. struct completion *x = &dev_priv->error_completion;
  106. unsigned long flags;
  107. int ret;
  108. if (!atomic_read(&dev_priv->mm.wedged))
  109. return 0;
  110. ret = wait_for_completion_interruptible(x);
  111. if (ret)
  112. return ret;
  113. /* Success, we reset the GPU! */
  114. if (!atomic_read(&dev_priv->mm.wedged))
  115. return 0;
  116. /* GPU is hung, bump the completion count to account for
  117. * the token we just consumed so that we never hit zero and
  118. * end up waiting upon a subsequent completion event that
  119. * will never happen.
  120. */
  121. spin_lock_irqsave(&x->wait.lock, flags);
  122. x->done++;
  123. spin_unlock_irqrestore(&x->wait.lock, flags);
  124. return -EIO;
  125. }
  126. static int i915_mutex_lock_interruptible(struct drm_device *dev)
  127. {
  128. struct drm_i915_private *dev_priv = dev->dev_private;
  129. int ret;
  130. ret = i915_gem_check_is_wedged(dev);
  131. if (ret)
  132. return ret;
  133. ret = mutex_lock_interruptible(&dev->struct_mutex);
  134. if (ret)
  135. return ret;
  136. if (atomic_read(&dev_priv->mm.wedged)) {
  137. mutex_unlock(&dev->struct_mutex);
  138. return -EAGAIN;
  139. }
  140. WARN_ON(i915_verify_lists(dev));
  141. return 0;
  142. }
  143. static inline bool
  144. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
  145. {
  146. return obj_priv->gtt_space &&
  147. !obj_priv->active &&
  148. obj_priv->pin_count == 0;
  149. }
  150. int i915_gem_do_init(struct drm_device *dev,
  151. unsigned long start,
  152. unsigned long end)
  153. {
  154. drm_i915_private_t *dev_priv = dev->dev_private;
  155. if (start >= end ||
  156. (start & (PAGE_SIZE - 1)) != 0 ||
  157. (end & (PAGE_SIZE - 1)) != 0) {
  158. return -EINVAL;
  159. }
  160. drm_mm_init(&dev_priv->mm.gtt_space, start,
  161. end - start);
  162. dev_priv->mm.gtt_total = end - start;
  163. return 0;
  164. }
  165. int
  166. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  167. struct drm_file *file_priv)
  168. {
  169. struct drm_i915_gem_init *args = data;
  170. int ret;
  171. mutex_lock(&dev->struct_mutex);
  172. ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
  173. mutex_unlock(&dev->struct_mutex);
  174. return ret;
  175. }
  176. int
  177. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  178. struct drm_file *file_priv)
  179. {
  180. struct drm_i915_private *dev_priv = dev->dev_private;
  181. struct drm_i915_gem_get_aperture *args = data;
  182. if (!(dev->driver->driver_features & DRIVER_GEM))
  183. return -ENODEV;
  184. mutex_lock(&dev->struct_mutex);
  185. args->aper_size = dev_priv->mm.gtt_total;
  186. args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
  187. mutex_unlock(&dev->struct_mutex);
  188. return 0;
  189. }
  190. /**
  191. * Creates a new mm object and returns a handle to it.
  192. */
  193. int
  194. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  195. struct drm_file *file_priv)
  196. {
  197. struct drm_i915_gem_create *args = data;
  198. struct drm_gem_object *obj;
  199. int ret;
  200. u32 handle;
  201. args->size = roundup(args->size, PAGE_SIZE);
  202. /* Allocate the new object */
  203. obj = i915_gem_alloc_object(dev, args->size);
  204. if (obj == NULL)
  205. return -ENOMEM;
  206. ret = drm_gem_handle_create(file_priv, obj, &handle);
  207. if (ret) {
  208. drm_gem_object_release(obj);
  209. i915_gem_info_remove_obj(dev->dev_private, obj->size);
  210. kfree(obj);
  211. return ret;
  212. }
  213. /* drop reference from allocate - handle holds it now */
  214. drm_gem_object_unreference(obj);
  215. trace_i915_gem_object_create(obj);
  216. args->handle = handle;
  217. return 0;
  218. }
  219. static inline int
  220. fast_shmem_read(struct page **pages,
  221. loff_t page_base, int page_offset,
  222. char __user *data,
  223. int length)
  224. {
  225. char *vaddr;
  226. int ret;
  227. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
  228. ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
  229. kunmap_atomic(vaddr);
  230. return ret;
  231. }
  232. static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
  233. {
  234. drm_i915_private_t *dev_priv = obj->dev->dev_private;
  235. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  236. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  237. obj_priv->tiling_mode != I915_TILING_NONE;
  238. }
  239. static inline void
  240. slow_shmem_copy(struct page *dst_page,
  241. int dst_offset,
  242. struct page *src_page,
  243. int src_offset,
  244. int length)
  245. {
  246. char *dst_vaddr, *src_vaddr;
  247. dst_vaddr = kmap(dst_page);
  248. src_vaddr = kmap(src_page);
  249. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  250. kunmap(src_page);
  251. kunmap(dst_page);
  252. }
  253. static inline void
  254. slow_shmem_bit17_copy(struct page *gpu_page,
  255. int gpu_offset,
  256. struct page *cpu_page,
  257. int cpu_offset,
  258. int length,
  259. int is_read)
  260. {
  261. char *gpu_vaddr, *cpu_vaddr;
  262. /* Use the unswizzled path if this page isn't affected. */
  263. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  264. if (is_read)
  265. return slow_shmem_copy(cpu_page, cpu_offset,
  266. gpu_page, gpu_offset, length);
  267. else
  268. return slow_shmem_copy(gpu_page, gpu_offset,
  269. cpu_page, cpu_offset, length);
  270. }
  271. gpu_vaddr = kmap(gpu_page);
  272. cpu_vaddr = kmap(cpu_page);
  273. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  274. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  275. */
  276. while (length > 0) {
  277. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  278. int this_length = min(cacheline_end - gpu_offset, length);
  279. int swizzled_gpu_offset = gpu_offset ^ 64;
  280. if (is_read) {
  281. memcpy(cpu_vaddr + cpu_offset,
  282. gpu_vaddr + swizzled_gpu_offset,
  283. this_length);
  284. } else {
  285. memcpy(gpu_vaddr + swizzled_gpu_offset,
  286. cpu_vaddr + cpu_offset,
  287. this_length);
  288. }
  289. cpu_offset += this_length;
  290. gpu_offset += this_length;
  291. length -= this_length;
  292. }
  293. kunmap(cpu_page);
  294. kunmap(gpu_page);
  295. }
  296. /**
  297. * This is the fast shmem pread path, which attempts to copy_from_user directly
  298. * from the backing pages of the object to the user's address space. On a
  299. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  300. */
  301. static int
  302. i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  303. struct drm_i915_gem_pread *args,
  304. struct drm_file *file_priv)
  305. {
  306. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  307. ssize_t remain;
  308. loff_t offset, page_base;
  309. char __user *user_data;
  310. int page_offset, page_length;
  311. user_data = (char __user *) (uintptr_t) args->data_ptr;
  312. remain = args->size;
  313. obj_priv = to_intel_bo(obj);
  314. offset = args->offset;
  315. while (remain > 0) {
  316. /* Operation in this page
  317. *
  318. * page_base = page offset within aperture
  319. * page_offset = offset within page
  320. * page_length = bytes to copy for this page
  321. */
  322. page_base = (offset & ~(PAGE_SIZE-1));
  323. page_offset = offset & (PAGE_SIZE-1);
  324. page_length = remain;
  325. if ((page_offset + remain) > PAGE_SIZE)
  326. page_length = PAGE_SIZE - page_offset;
  327. if (fast_shmem_read(obj_priv->pages,
  328. page_base, page_offset,
  329. user_data, page_length))
  330. return -EFAULT;
  331. remain -= page_length;
  332. user_data += page_length;
  333. offset += page_length;
  334. }
  335. return 0;
  336. }
  337. static int
  338. i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  339. {
  340. int ret;
  341. ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
  342. /* If we've insufficient memory to map in the pages, attempt
  343. * to make some space by throwing out some old buffers.
  344. */
  345. if (ret == -ENOMEM) {
  346. struct drm_device *dev = obj->dev;
  347. ret = i915_gem_evict_something(dev, obj->size,
  348. i915_gem_get_gtt_alignment(obj));
  349. if (ret)
  350. return ret;
  351. ret = i915_gem_object_get_pages(obj, 0);
  352. }
  353. return ret;
  354. }
  355. /**
  356. * This is the fallback shmem pread path, which allocates temporary storage
  357. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  358. * can copy out of the object's backing pages while holding the struct mutex
  359. * and not take page faults.
  360. */
  361. static int
  362. i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
  363. struct drm_i915_gem_pread *args,
  364. struct drm_file *file_priv)
  365. {
  366. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  367. struct mm_struct *mm = current->mm;
  368. struct page **user_pages;
  369. ssize_t remain;
  370. loff_t offset, pinned_pages, i;
  371. loff_t first_data_page, last_data_page, num_pages;
  372. int shmem_page_index, shmem_page_offset;
  373. int data_page_index, data_page_offset;
  374. int page_length;
  375. int ret;
  376. uint64_t data_ptr = args->data_ptr;
  377. int do_bit17_swizzling;
  378. remain = args->size;
  379. /* Pin the user pages containing the data. We can't fault while
  380. * holding the struct mutex, yet we want to hold it while
  381. * dereferencing the user data.
  382. */
  383. first_data_page = data_ptr / PAGE_SIZE;
  384. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  385. num_pages = last_data_page - first_data_page + 1;
  386. user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
  387. if (user_pages == NULL)
  388. return -ENOMEM;
  389. mutex_unlock(&dev->struct_mutex);
  390. down_read(&mm->mmap_sem);
  391. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  392. num_pages, 1, 0, user_pages, NULL);
  393. up_read(&mm->mmap_sem);
  394. mutex_lock(&dev->struct_mutex);
  395. if (pinned_pages < num_pages) {
  396. ret = -EFAULT;
  397. goto out;
  398. }
  399. ret = i915_gem_object_set_cpu_read_domain_range(obj,
  400. args->offset,
  401. args->size);
  402. if (ret)
  403. goto out;
  404. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  405. obj_priv = to_intel_bo(obj);
  406. offset = args->offset;
  407. while (remain > 0) {
  408. /* Operation in this page
  409. *
  410. * shmem_page_index = page number within shmem file
  411. * shmem_page_offset = offset within page in shmem file
  412. * data_page_index = page number in get_user_pages return
  413. * data_page_offset = offset with data_page_index page.
  414. * page_length = bytes to copy for this page
  415. */
  416. shmem_page_index = offset / PAGE_SIZE;
  417. shmem_page_offset = offset & ~PAGE_MASK;
  418. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  419. data_page_offset = data_ptr & ~PAGE_MASK;
  420. page_length = remain;
  421. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  422. page_length = PAGE_SIZE - shmem_page_offset;
  423. if ((data_page_offset + page_length) > PAGE_SIZE)
  424. page_length = PAGE_SIZE - data_page_offset;
  425. if (do_bit17_swizzling) {
  426. slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  427. shmem_page_offset,
  428. user_pages[data_page_index],
  429. data_page_offset,
  430. page_length,
  431. 1);
  432. } else {
  433. slow_shmem_copy(user_pages[data_page_index],
  434. data_page_offset,
  435. obj_priv->pages[shmem_page_index],
  436. shmem_page_offset,
  437. page_length);
  438. }
  439. remain -= page_length;
  440. data_ptr += page_length;
  441. offset += page_length;
  442. }
  443. out:
  444. for (i = 0; i < pinned_pages; i++) {
  445. SetPageDirty(user_pages[i]);
  446. page_cache_release(user_pages[i]);
  447. }
  448. drm_free_large(user_pages);
  449. return ret;
  450. }
  451. /**
  452. * Reads data from the object referenced by handle.
  453. *
  454. * On error, the contents of *data are undefined.
  455. */
  456. int
  457. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  458. struct drm_file *file_priv)
  459. {
  460. struct drm_i915_gem_pread *args = data;
  461. struct drm_gem_object *obj;
  462. struct drm_i915_gem_object *obj_priv;
  463. int ret = 0;
  464. ret = i915_mutex_lock_interruptible(dev);
  465. if (ret)
  466. return ret;
  467. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  468. if (obj == NULL) {
  469. ret = -ENOENT;
  470. goto unlock;
  471. }
  472. obj_priv = to_intel_bo(obj);
  473. /* Bounds check source. */
  474. if (args->offset > obj->size || args->size > obj->size - args->offset) {
  475. ret = -EINVAL;
  476. goto out;
  477. }
  478. if (args->size == 0)
  479. goto out;
  480. if (!access_ok(VERIFY_WRITE,
  481. (char __user *)(uintptr_t)args->data_ptr,
  482. args->size)) {
  483. ret = -EFAULT;
  484. goto out;
  485. }
  486. ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
  487. args->size);
  488. if (ret) {
  489. ret = -EFAULT;
  490. goto out;
  491. }
  492. ret = i915_gem_object_get_pages_or_evict(obj);
  493. if (ret)
  494. goto out;
  495. ret = i915_gem_object_set_cpu_read_domain_range(obj,
  496. args->offset,
  497. args->size);
  498. if (ret)
  499. goto out_put;
  500. ret = -EFAULT;
  501. if (!i915_gem_object_needs_bit17_swizzle(obj))
  502. ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
  503. if (ret == -EFAULT)
  504. ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  505. out_put:
  506. i915_gem_object_put_pages(obj);
  507. out:
  508. drm_gem_object_unreference(obj);
  509. unlock:
  510. mutex_unlock(&dev->struct_mutex);
  511. return ret;
  512. }
  513. /* This is the fast write path which cannot handle
  514. * page faults in the source data
  515. */
  516. static inline int
  517. fast_user_write(struct io_mapping *mapping,
  518. loff_t page_base, int page_offset,
  519. char __user *user_data,
  520. int length)
  521. {
  522. char *vaddr_atomic;
  523. unsigned long unwritten;
  524. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  525. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  526. user_data, length);
  527. io_mapping_unmap_atomic(vaddr_atomic);
  528. return unwritten;
  529. }
  530. /* Here's the write path which can sleep for
  531. * page faults
  532. */
  533. static inline void
  534. slow_kernel_write(struct io_mapping *mapping,
  535. loff_t gtt_base, int gtt_offset,
  536. struct page *user_page, int user_offset,
  537. int length)
  538. {
  539. char __iomem *dst_vaddr;
  540. char *src_vaddr;
  541. dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
  542. src_vaddr = kmap(user_page);
  543. memcpy_toio(dst_vaddr + gtt_offset,
  544. src_vaddr + user_offset,
  545. length);
  546. kunmap(user_page);
  547. io_mapping_unmap(dst_vaddr);
  548. }
  549. static inline int
  550. fast_shmem_write(struct page **pages,
  551. loff_t page_base, int page_offset,
  552. char __user *data,
  553. int length)
  554. {
  555. char *vaddr;
  556. int ret;
  557. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
  558. ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
  559. kunmap_atomic(vaddr);
  560. return ret;
  561. }
  562. /**
  563. * This is the fast pwrite path, where we copy the data directly from the
  564. * user into the GTT, uncached.
  565. */
  566. static int
  567. i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  568. struct drm_i915_gem_pwrite *args,
  569. struct drm_file *file_priv)
  570. {
  571. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  572. drm_i915_private_t *dev_priv = dev->dev_private;
  573. ssize_t remain;
  574. loff_t offset, page_base;
  575. char __user *user_data;
  576. int page_offset, page_length;
  577. user_data = (char __user *) (uintptr_t) args->data_ptr;
  578. remain = args->size;
  579. obj_priv = to_intel_bo(obj);
  580. offset = obj_priv->gtt_offset + args->offset;
  581. while (remain > 0) {
  582. /* Operation in this page
  583. *
  584. * page_base = page offset within aperture
  585. * page_offset = offset within page
  586. * page_length = bytes to copy for this page
  587. */
  588. page_base = (offset & ~(PAGE_SIZE-1));
  589. page_offset = offset & (PAGE_SIZE-1);
  590. page_length = remain;
  591. if ((page_offset + remain) > PAGE_SIZE)
  592. page_length = PAGE_SIZE - page_offset;
  593. /* If we get a fault while copying data, then (presumably) our
  594. * source page isn't available. Return the error and we'll
  595. * retry in the slow path.
  596. */
  597. if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
  598. page_offset, user_data, page_length))
  599. return -EFAULT;
  600. remain -= page_length;
  601. user_data += page_length;
  602. offset += page_length;
  603. }
  604. return 0;
  605. }
  606. /**
  607. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  608. * the memory and maps it using kmap_atomic for copying.
  609. *
  610. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  611. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  612. */
  613. static int
  614. i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  615. struct drm_i915_gem_pwrite *args,
  616. struct drm_file *file_priv)
  617. {
  618. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  619. drm_i915_private_t *dev_priv = dev->dev_private;
  620. ssize_t remain;
  621. loff_t gtt_page_base, offset;
  622. loff_t first_data_page, last_data_page, num_pages;
  623. loff_t pinned_pages, i;
  624. struct page **user_pages;
  625. struct mm_struct *mm = current->mm;
  626. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  627. int ret;
  628. uint64_t data_ptr = args->data_ptr;
  629. remain = args->size;
  630. /* Pin the user pages containing the data. We can't fault while
  631. * holding the struct mutex, and all of the pwrite implementations
  632. * want to hold it while dereferencing the user data.
  633. */
  634. first_data_page = data_ptr / PAGE_SIZE;
  635. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  636. num_pages = last_data_page - first_data_page + 1;
  637. user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
  638. if (user_pages == NULL)
  639. return -ENOMEM;
  640. mutex_unlock(&dev->struct_mutex);
  641. down_read(&mm->mmap_sem);
  642. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  643. num_pages, 0, 0, user_pages, NULL);
  644. up_read(&mm->mmap_sem);
  645. mutex_lock(&dev->struct_mutex);
  646. if (pinned_pages < num_pages) {
  647. ret = -EFAULT;
  648. goto out_unpin_pages;
  649. }
  650. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  651. if (ret)
  652. goto out_unpin_pages;
  653. obj_priv = to_intel_bo(obj);
  654. offset = obj_priv->gtt_offset + args->offset;
  655. while (remain > 0) {
  656. /* Operation in this page
  657. *
  658. * gtt_page_base = page offset within aperture
  659. * gtt_page_offset = offset within page in aperture
  660. * data_page_index = page number in get_user_pages return
  661. * data_page_offset = offset with data_page_index page.
  662. * page_length = bytes to copy for this page
  663. */
  664. gtt_page_base = offset & PAGE_MASK;
  665. gtt_page_offset = offset & ~PAGE_MASK;
  666. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  667. data_page_offset = data_ptr & ~PAGE_MASK;
  668. page_length = remain;
  669. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  670. page_length = PAGE_SIZE - gtt_page_offset;
  671. if ((data_page_offset + page_length) > PAGE_SIZE)
  672. page_length = PAGE_SIZE - data_page_offset;
  673. slow_kernel_write(dev_priv->mm.gtt_mapping,
  674. gtt_page_base, gtt_page_offset,
  675. user_pages[data_page_index],
  676. data_page_offset,
  677. page_length);
  678. remain -= page_length;
  679. offset += page_length;
  680. data_ptr += page_length;
  681. }
  682. out_unpin_pages:
  683. for (i = 0; i < pinned_pages; i++)
  684. page_cache_release(user_pages[i]);
  685. drm_free_large(user_pages);
  686. return ret;
  687. }
  688. /**
  689. * This is the fast shmem pwrite path, which attempts to directly
  690. * copy_from_user into the kmapped pages backing the object.
  691. */
  692. static int
  693. i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  694. struct drm_i915_gem_pwrite *args,
  695. struct drm_file *file_priv)
  696. {
  697. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  698. ssize_t remain;
  699. loff_t offset, page_base;
  700. char __user *user_data;
  701. int page_offset, page_length;
  702. user_data = (char __user *) (uintptr_t) args->data_ptr;
  703. remain = args->size;
  704. obj_priv = to_intel_bo(obj);
  705. offset = args->offset;
  706. obj_priv->dirty = 1;
  707. while (remain > 0) {
  708. /* Operation in this page
  709. *
  710. * page_base = page offset within aperture
  711. * page_offset = offset within page
  712. * page_length = bytes to copy for this page
  713. */
  714. page_base = (offset & ~(PAGE_SIZE-1));
  715. page_offset = offset & (PAGE_SIZE-1);
  716. page_length = remain;
  717. if ((page_offset + remain) > PAGE_SIZE)
  718. page_length = PAGE_SIZE - page_offset;
  719. if (fast_shmem_write(obj_priv->pages,
  720. page_base, page_offset,
  721. user_data, page_length))
  722. return -EFAULT;
  723. remain -= page_length;
  724. user_data += page_length;
  725. offset += page_length;
  726. }
  727. return 0;
  728. }
  729. /**
  730. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  731. * the memory and maps it using kmap_atomic for copying.
  732. *
  733. * This avoids taking mmap_sem for faulting on the user's address while the
  734. * struct_mutex is held.
  735. */
  736. static int
  737. i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  738. struct drm_i915_gem_pwrite *args,
  739. struct drm_file *file_priv)
  740. {
  741. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  742. struct mm_struct *mm = current->mm;
  743. struct page **user_pages;
  744. ssize_t remain;
  745. loff_t offset, pinned_pages, i;
  746. loff_t first_data_page, last_data_page, num_pages;
  747. int shmem_page_index, shmem_page_offset;
  748. int data_page_index, data_page_offset;
  749. int page_length;
  750. int ret;
  751. uint64_t data_ptr = args->data_ptr;
  752. int do_bit17_swizzling;
  753. remain = args->size;
  754. /* Pin the user pages containing the data. We can't fault while
  755. * holding the struct mutex, and all of the pwrite implementations
  756. * want to hold it while dereferencing the user data.
  757. */
  758. first_data_page = data_ptr / PAGE_SIZE;
  759. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  760. num_pages = last_data_page - first_data_page + 1;
  761. user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
  762. if (user_pages == NULL)
  763. return -ENOMEM;
  764. mutex_unlock(&dev->struct_mutex);
  765. down_read(&mm->mmap_sem);
  766. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  767. num_pages, 0, 0, user_pages, NULL);
  768. up_read(&mm->mmap_sem);
  769. mutex_lock(&dev->struct_mutex);
  770. if (pinned_pages < num_pages) {
  771. ret = -EFAULT;
  772. goto out;
  773. }
  774. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  775. if (ret)
  776. goto out;
  777. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  778. obj_priv = to_intel_bo(obj);
  779. offset = args->offset;
  780. obj_priv->dirty = 1;
  781. while (remain > 0) {
  782. /* Operation in this page
  783. *
  784. * shmem_page_index = page number within shmem file
  785. * shmem_page_offset = offset within page in shmem file
  786. * data_page_index = page number in get_user_pages return
  787. * data_page_offset = offset with data_page_index page.
  788. * page_length = bytes to copy for this page
  789. */
  790. shmem_page_index = offset / PAGE_SIZE;
  791. shmem_page_offset = offset & ~PAGE_MASK;
  792. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  793. data_page_offset = data_ptr & ~PAGE_MASK;
  794. page_length = remain;
  795. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  796. page_length = PAGE_SIZE - shmem_page_offset;
  797. if ((data_page_offset + page_length) > PAGE_SIZE)
  798. page_length = PAGE_SIZE - data_page_offset;
  799. if (do_bit17_swizzling) {
  800. slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  801. shmem_page_offset,
  802. user_pages[data_page_index],
  803. data_page_offset,
  804. page_length,
  805. 0);
  806. } else {
  807. slow_shmem_copy(obj_priv->pages[shmem_page_index],
  808. shmem_page_offset,
  809. user_pages[data_page_index],
  810. data_page_offset,
  811. page_length);
  812. }
  813. remain -= page_length;
  814. data_ptr += page_length;
  815. offset += page_length;
  816. }
  817. out:
  818. for (i = 0; i < pinned_pages; i++)
  819. page_cache_release(user_pages[i]);
  820. drm_free_large(user_pages);
  821. return ret;
  822. }
  823. /**
  824. * Writes data to the object referenced by handle.
  825. *
  826. * On error, the contents of the buffer that were to be modified are undefined.
  827. */
  828. int
  829. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  830. struct drm_file *file)
  831. {
  832. struct drm_i915_gem_pwrite *args = data;
  833. struct drm_gem_object *obj;
  834. struct drm_i915_gem_object *obj_priv;
  835. int ret = 0;
  836. ret = i915_mutex_lock_interruptible(dev);
  837. if (ret)
  838. return ret;
  839. obj = drm_gem_object_lookup(dev, file, args->handle);
  840. if (obj == NULL) {
  841. ret = -ENOENT;
  842. goto unlock;
  843. }
  844. obj_priv = to_intel_bo(obj);
  845. /* Bounds check destination. */
  846. if (args->offset > obj->size || args->size > obj->size - args->offset) {
  847. ret = -EINVAL;
  848. goto out;
  849. }
  850. if (args->size == 0)
  851. goto out;
  852. if (!access_ok(VERIFY_READ,
  853. (char __user *)(uintptr_t)args->data_ptr,
  854. args->size)) {
  855. ret = -EFAULT;
  856. goto out;
  857. }
  858. ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
  859. args->size);
  860. if (ret) {
  861. ret = -EFAULT;
  862. goto out;
  863. }
  864. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  865. * it would end up going through the fenced access, and we'll get
  866. * different detiling behavior between reading and writing.
  867. * pread/pwrite currently are reading and writing from the CPU
  868. * perspective, requiring manual detiling by the client.
  869. */
  870. if (obj_priv->phys_obj)
  871. ret = i915_gem_phys_pwrite(dev, obj, args, file);
  872. else if (obj_priv->tiling_mode == I915_TILING_NONE &&
  873. obj_priv->gtt_space &&
  874. obj->write_domain != I915_GEM_DOMAIN_CPU) {
  875. ret = i915_gem_object_pin(obj, 0);
  876. if (ret)
  877. goto out;
  878. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  879. if (ret)
  880. goto out_unpin;
  881. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  882. if (ret == -EFAULT)
  883. ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
  884. out_unpin:
  885. i915_gem_object_unpin(obj);
  886. } else {
  887. ret = i915_gem_object_get_pages_or_evict(obj);
  888. if (ret)
  889. goto out;
  890. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  891. if (ret)
  892. goto out_put;
  893. ret = -EFAULT;
  894. if (!i915_gem_object_needs_bit17_swizzle(obj))
  895. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
  896. if (ret == -EFAULT)
  897. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
  898. out_put:
  899. i915_gem_object_put_pages(obj);
  900. }
  901. out:
  902. drm_gem_object_unreference(obj);
  903. unlock:
  904. mutex_unlock(&dev->struct_mutex);
  905. return ret;
  906. }
  907. /**
  908. * Called when user space prepares to use an object with the CPU, either
  909. * through the mmap ioctl's mapping or a GTT mapping.
  910. */
  911. int
  912. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  913. struct drm_file *file_priv)
  914. {
  915. struct drm_i915_private *dev_priv = dev->dev_private;
  916. struct drm_i915_gem_set_domain *args = data;
  917. struct drm_gem_object *obj;
  918. struct drm_i915_gem_object *obj_priv;
  919. uint32_t read_domains = args->read_domains;
  920. uint32_t write_domain = args->write_domain;
  921. int ret;
  922. if (!(dev->driver->driver_features & DRIVER_GEM))
  923. return -ENODEV;
  924. /* Only handle setting domains to types used by the CPU. */
  925. if (write_domain & I915_GEM_GPU_DOMAINS)
  926. return -EINVAL;
  927. if (read_domains & I915_GEM_GPU_DOMAINS)
  928. return -EINVAL;
  929. /* Having something in the write domain implies it's in the read
  930. * domain, and only that read domain. Enforce that in the request.
  931. */
  932. if (write_domain != 0 && read_domains != write_domain)
  933. return -EINVAL;
  934. ret = i915_mutex_lock_interruptible(dev);
  935. if (ret)
  936. return ret;
  937. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  938. if (obj == NULL) {
  939. ret = -ENOENT;
  940. goto unlock;
  941. }
  942. obj_priv = to_intel_bo(obj);
  943. intel_mark_busy(dev, obj);
  944. if (read_domains & I915_GEM_DOMAIN_GTT) {
  945. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  946. /* Update the LRU on the fence for the CPU access that's
  947. * about to occur.
  948. */
  949. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  950. struct drm_i915_fence_reg *reg =
  951. &dev_priv->fence_regs[obj_priv->fence_reg];
  952. list_move_tail(&reg->lru_list,
  953. &dev_priv->mm.fence_list);
  954. }
  955. /* Silently promote "you're not bound, there was nothing to do"
  956. * to success, since the client was just asking us to
  957. * make sure everything was done.
  958. */
  959. if (ret == -EINVAL)
  960. ret = 0;
  961. } else {
  962. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  963. }
  964. /* Maintain LRU order of "inactive" objects */
  965. if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
  966. list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
  967. drm_gem_object_unreference(obj);
  968. unlock:
  969. mutex_unlock(&dev->struct_mutex);
  970. return ret;
  971. }
  972. /**
  973. * Called when user space has done writes to this buffer
  974. */
  975. int
  976. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  977. struct drm_file *file_priv)
  978. {
  979. struct drm_i915_gem_sw_finish *args = data;
  980. struct drm_gem_object *obj;
  981. int ret = 0;
  982. if (!(dev->driver->driver_features & DRIVER_GEM))
  983. return -ENODEV;
  984. ret = i915_mutex_lock_interruptible(dev);
  985. if (ret)
  986. return ret;
  987. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  988. if (obj == NULL) {
  989. ret = -ENOENT;
  990. goto unlock;
  991. }
  992. /* Pinned buffers may be scanout, so flush the cache */
  993. if (to_intel_bo(obj)->pin_count)
  994. i915_gem_object_flush_cpu_write_domain(obj);
  995. drm_gem_object_unreference(obj);
  996. unlock:
  997. mutex_unlock(&dev->struct_mutex);
  998. return ret;
  999. }
  1000. /**
  1001. * Maps the contents of an object, returning the address it is mapped
  1002. * into.
  1003. *
  1004. * While the mapping holds a reference on the contents of the object, it doesn't
  1005. * imply a ref on the object itself.
  1006. */
  1007. int
  1008. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1009. struct drm_file *file_priv)
  1010. {
  1011. struct drm_i915_gem_mmap *args = data;
  1012. struct drm_gem_object *obj;
  1013. loff_t offset;
  1014. unsigned long addr;
  1015. if (!(dev->driver->driver_features & DRIVER_GEM))
  1016. return -ENODEV;
  1017. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1018. if (obj == NULL)
  1019. return -ENOENT;
  1020. offset = args->offset;
  1021. down_write(&current->mm->mmap_sem);
  1022. addr = do_mmap(obj->filp, 0, args->size,
  1023. PROT_READ | PROT_WRITE, MAP_SHARED,
  1024. args->offset);
  1025. up_write(&current->mm->mmap_sem);
  1026. drm_gem_object_unreference_unlocked(obj);
  1027. if (IS_ERR((void *)addr))
  1028. return addr;
  1029. args->addr_ptr = (uint64_t) addr;
  1030. return 0;
  1031. }
  1032. /**
  1033. * i915_gem_fault - fault a page into the GTT
  1034. * vma: VMA in question
  1035. * vmf: fault info
  1036. *
  1037. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1038. * from userspace. The fault handler takes care of binding the object to
  1039. * the GTT (if needed), allocating and programming a fence register (again,
  1040. * only if needed based on whether the old reg is still valid or the object
  1041. * is tiled) and inserting a new PTE into the faulting process.
  1042. *
  1043. * Note that the faulting process may involve evicting existing objects
  1044. * from the GTT and/or fence registers to make room. So performance may
  1045. * suffer if the GTT working set is large or there are few fence registers
  1046. * left.
  1047. */
  1048. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1049. {
  1050. struct drm_gem_object *obj = vma->vm_private_data;
  1051. struct drm_device *dev = obj->dev;
  1052. drm_i915_private_t *dev_priv = dev->dev_private;
  1053. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1054. pgoff_t page_offset;
  1055. unsigned long pfn;
  1056. int ret = 0;
  1057. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1058. /* We don't use vmf->pgoff since that has the fake offset */
  1059. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1060. PAGE_SHIFT;
  1061. /* Now bind it into the GTT if needed */
  1062. mutex_lock(&dev->struct_mutex);
  1063. if (!obj_priv->gtt_space) {
  1064. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1065. if (ret)
  1066. goto unlock;
  1067. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1068. if (ret)
  1069. goto unlock;
  1070. }
  1071. /* Need a new fence register? */
  1072. if (obj_priv->tiling_mode != I915_TILING_NONE) {
  1073. ret = i915_gem_object_get_fence_reg(obj, true);
  1074. if (ret)
  1075. goto unlock;
  1076. }
  1077. if (i915_gem_object_is_inactive(obj_priv))
  1078. list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
  1079. pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
  1080. page_offset;
  1081. /* Finally, remap it using the new GTT offset */
  1082. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1083. unlock:
  1084. mutex_unlock(&dev->struct_mutex);
  1085. switch (ret) {
  1086. case 0:
  1087. case -ERESTARTSYS:
  1088. return VM_FAULT_NOPAGE;
  1089. case -ENOMEM:
  1090. case -EAGAIN:
  1091. return VM_FAULT_OOM;
  1092. default:
  1093. return VM_FAULT_SIGBUS;
  1094. }
  1095. }
  1096. /**
  1097. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1098. * @obj: obj in question
  1099. *
  1100. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1101. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1102. * up the object based on the offset and sets up the various memory mapping
  1103. * structures.
  1104. *
  1105. * This routine allocates and attaches a fake offset for @obj.
  1106. */
  1107. static int
  1108. i915_gem_create_mmap_offset(struct drm_gem_object *obj)
  1109. {
  1110. struct drm_device *dev = obj->dev;
  1111. struct drm_gem_mm *mm = dev->mm_private;
  1112. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1113. struct drm_map_list *list;
  1114. struct drm_local_map *map;
  1115. int ret = 0;
  1116. /* Set the object up for mmap'ing */
  1117. list = &obj->map_list;
  1118. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1119. if (!list->map)
  1120. return -ENOMEM;
  1121. map = list->map;
  1122. map->type = _DRM_GEM;
  1123. map->size = obj->size;
  1124. map->handle = obj;
  1125. /* Get a DRM GEM mmap offset allocated... */
  1126. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1127. obj->size / PAGE_SIZE, 0, 0);
  1128. if (!list->file_offset_node) {
  1129. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  1130. ret = -ENOSPC;
  1131. goto out_free_list;
  1132. }
  1133. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1134. obj->size / PAGE_SIZE, 0);
  1135. if (!list->file_offset_node) {
  1136. ret = -ENOMEM;
  1137. goto out_free_list;
  1138. }
  1139. list->hash.key = list->file_offset_node->start;
  1140. ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  1141. if (ret) {
  1142. DRM_ERROR("failed to add to map hash\n");
  1143. goto out_free_mm;
  1144. }
  1145. /* By now we should be all set, any drm_mmap request on the offset
  1146. * below will get to our mmap & fault handler */
  1147. obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  1148. return 0;
  1149. out_free_mm:
  1150. drm_mm_put_block(list->file_offset_node);
  1151. out_free_list:
  1152. kfree(list->map);
  1153. return ret;
  1154. }
  1155. /**
  1156. * i915_gem_release_mmap - remove physical page mappings
  1157. * @obj: obj in question
  1158. *
  1159. * Preserve the reservation of the mmapping with the DRM core code, but
  1160. * relinquish ownership of the pages back to the system.
  1161. *
  1162. * It is vital that we remove the page mapping if we have mapped a tiled
  1163. * object through the GTT and then lose the fence register due to
  1164. * resource pressure. Similarly if the object has been moved out of the
  1165. * aperture, than pages mapped into userspace must be revoked. Removing the
  1166. * mapping will then trigger a page fault on the next user access, allowing
  1167. * fixup by i915_gem_fault().
  1168. */
  1169. void
  1170. i915_gem_release_mmap(struct drm_gem_object *obj)
  1171. {
  1172. struct drm_device *dev = obj->dev;
  1173. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1174. if (dev->dev_mapping)
  1175. unmap_mapping_range(dev->dev_mapping,
  1176. obj_priv->mmap_offset, obj->size, 1);
  1177. }
  1178. static void
  1179. i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  1180. {
  1181. struct drm_device *dev = obj->dev;
  1182. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1183. struct drm_gem_mm *mm = dev->mm_private;
  1184. struct drm_map_list *list;
  1185. list = &obj->map_list;
  1186. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1187. if (list->file_offset_node) {
  1188. drm_mm_put_block(list->file_offset_node);
  1189. list->file_offset_node = NULL;
  1190. }
  1191. if (list->map) {
  1192. kfree(list->map);
  1193. list->map = NULL;
  1194. }
  1195. obj_priv->mmap_offset = 0;
  1196. }
  1197. /**
  1198. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1199. * @obj: object to check
  1200. *
  1201. * Return the required GTT alignment for an object, taking into account
  1202. * potential fence register mapping if needed.
  1203. */
  1204. static uint32_t
  1205. i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
  1206. {
  1207. struct drm_device *dev = obj->dev;
  1208. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1209. int start, i;
  1210. /*
  1211. * Minimum alignment is 4k (GTT page size), but might be greater
  1212. * if a fence register is needed for the object.
  1213. */
  1214. if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
  1215. return 4096;
  1216. /*
  1217. * Previous chips need to be aligned to the size of the smallest
  1218. * fence register that can contain the object.
  1219. */
  1220. if (INTEL_INFO(dev)->gen == 3)
  1221. start = 1024*1024;
  1222. else
  1223. start = 512*1024;
  1224. for (i = start; i < obj->size; i <<= 1)
  1225. ;
  1226. return i;
  1227. }
  1228. /**
  1229. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1230. * @dev: DRM device
  1231. * @data: GTT mapping ioctl data
  1232. * @file_priv: GEM object info
  1233. *
  1234. * Simply returns the fake offset to userspace so it can mmap it.
  1235. * The mmap call will end up in drm_gem_mmap(), which will set things
  1236. * up so we can get faults in the handler above.
  1237. *
  1238. * The fault handler will take care of binding the object into the GTT
  1239. * (since it may have been evicted to make room for something), allocating
  1240. * a fence register, and mapping the appropriate aperture address into
  1241. * userspace.
  1242. */
  1243. int
  1244. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1245. struct drm_file *file_priv)
  1246. {
  1247. struct drm_i915_gem_mmap_gtt *args = data;
  1248. struct drm_gem_object *obj;
  1249. struct drm_i915_gem_object *obj_priv;
  1250. int ret;
  1251. if (!(dev->driver->driver_features & DRIVER_GEM))
  1252. return -ENODEV;
  1253. ret = i915_mutex_lock_interruptible(dev);
  1254. if (ret)
  1255. return ret;
  1256. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1257. if (obj == NULL) {
  1258. ret = -ENOENT;
  1259. goto unlock;
  1260. }
  1261. obj_priv = to_intel_bo(obj);
  1262. if (obj_priv->madv != I915_MADV_WILLNEED) {
  1263. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1264. ret = -EINVAL;
  1265. goto out;
  1266. }
  1267. if (!obj_priv->mmap_offset) {
  1268. ret = i915_gem_create_mmap_offset(obj);
  1269. if (ret)
  1270. goto out;
  1271. }
  1272. args->offset = obj_priv->mmap_offset;
  1273. /*
  1274. * Pull it into the GTT so that we have a page list (makes the
  1275. * initial fault faster and any subsequent flushing possible).
  1276. */
  1277. if (!obj_priv->agp_mem) {
  1278. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1279. if (ret)
  1280. goto out;
  1281. }
  1282. out:
  1283. drm_gem_object_unreference(obj);
  1284. unlock:
  1285. mutex_unlock(&dev->struct_mutex);
  1286. return ret;
  1287. }
  1288. static void
  1289. i915_gem_object_put_pages(struct drm_gem_object *obj)
  1290. {
  1291. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1292. int page_count = obj->size / PAGE_SIZE;
  1293. int i;
  1294. BUG_ON(obj_priv->pages_refcount == 0);
  1295. BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
  1296. if (--obj_priv->pages_refcount != 0)
  1297. return;
  1298. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1299. i915_gem_object_save_bit_17_swizzle(obj);
  1300. if (obj_priv->madv == I915_MADV_DONTNEED)
  1301. obj_priv->dirty = 0;
  1302. for (i = 0; i < page_count; i++) {
  1303. if (obj_priv->dirty)
  1304. set_page_dirty(obj_priv->pages[i]);
  1305. if (obj_priv->madv == I915_MADV_WILLNEED)
  1306. mark_page_accessed(obj_priv->pages[i]);
  1307. page_cache_release(obj_priv->pages[i]);
  1308. }
  1309. obj_priv->dirty = 0;
  1310. drm_free_large(obj_priv->pages);
  1311. obj_priv->pages = NULL;
  1312. }
  1313. static uint32_t
  1314. i915_gem_next_request_seqno(struct drm_device *dev,
  1315. struct intel_ring_buffer *ring)
  1316. {
  1317. drm_i915_private_t *dev_priv = dev->dev_private;
  1318. ring->outstanding_lazy_request = true;
  1319. return dev_priv->next_seqno;
  1320. }
  1321. static void
  1322. i915_gem_object_move_to_active(struct drm_gem_object *obj,
  1323. struct intel_ring_buffer *ring)
  1324. {
  1325. struct drm_device *dev = obj->dev;
  1326. struct drm_i915_private *dev_priv = dev->dev_private;
  1327. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1328. uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
  1329. BUG_ON(ring == NULL);
  1330. obj_priv->ring = ring;
  1331. /* Add a reference if we're newly entering the active list. */
  1332. if (!obj_priv->active) {
  1333. drm_gem_object_reference(obj);
  1334. obj_priv->active = 1;
  1335. }
  1336. /* Move from whatever list we were on to the tail of execution. */
  1337. list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
  1338. list_move_tail(&obj_priv->ring_list, &ring->active_list);
  1339. obj_priv->last_rendering_seqno = seqno;
  1340. }
  1341. static void
  1342. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  1343. {
  1344. struct drm_device *dev = obj->dev;
  1345. drm_i915_private_t *dev_priv = dev->dev_private;
  1346. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1347. BUG_ON(!obj_priv->active);
  1348. list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
  1349. list_del_init(&obj_priv->ring_list);
  1350. obj_priv->last_rendering_seqno = 0;
  1351. }
  1352. /* Immediately discard the backing storage */
  1353. static void
  1354. i915_gem_object_truncate(struct drm_gem_object *obj)
  1355. {
  1356. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1357. struct inode *inode;
  1358. /* Our goal here is to return as much of the memory as
  1359. * is possible back to the system as we are called from OOM.
  1360. * To do this we must instruct the shmfs to drop all of its
  1361. * backing pages, *now*. Here we mirror the actions taken
  1362. * when by shmem_delete_inode() to release the backing store.
  1363. */
  1364. inode = obj->filp->f_path.dentry->d_inode;
  1365. truncate_inode_pages(inode->i_mapping, 0);
  1366. if (inode->i_op->truncate_range)
  1367. inode->i_op->truncate_range(inode, 0, (loff_t)-1);
  1368. obj_priv->madv = __I915_MADV_PURGED;
  1369. }
  1370. static inline int
  1371. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
  1372. {
  1373. return obj_priv->madv == I915_MADV_DONTNEED;
  1374. }
  1375. static void
  1376. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  1377. {
  1378. struct drm_device *dev = obj->dev;
  1379. drm_i915_private_t *dev_priv = dev->dev_private;
  1380. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1381. if (obj_priv->pin_count != 0)
  1382. list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
  1383. else
  1384. list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
  1385. list_del_init(&obj_priv->ring_list);
  1386. BUG_ON(!list_empty(&obj_priv->gpu_write_list));
  1387. obj_priv->last_rendering_seqno = 0;
  1388. obj_priv->ring = NULL;
  1389. if (obj_priv->active) {
  1390. obj_priv->active = 0;
  1391. drm_gem_object_unreference(obj);
  1392. }
  1393. WARN_ON(i915_verify_lists(dev));
  1394. }
  1395. static void
  1396. i915_gem_process_flushing_list(struct drm_device *dev,
  1397. uint32_t flush_domains,
  1398. struct intel_ring_buffer *ring)
  1399. {
  1400. drm_i915_private_t *dev_priv = dev->dev_private;
  1401. struct drm_i915_gem_object *obj_priv, *next;
  1402. list_for_each_entry_safe(obj_priv, next,
  1403. &ring->gpu_write_list,
  1404. gpu_write_list) {
  1405. struct drm_gem_object *obj = &obj_priv->base;
  1406. if (obj->write_domain & flush_domains) {
  1407. uint32_t old_write_domain = obj->write_domain;
  1408. obj->write_domain = 0;
  1409. list_del_init(&obj_priv->gpu_write_list);
  1410. i915_gem_object_move_to_active(obj, ring);
  1411. /* update the fence lru list */
  1412. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  1413. struct drm_i915_fence_reg *reg =
  1414. &dev_priv->fence_regs[obj_priv->fence_reg];
  1415. list_move_tail(&reg->lru_list,
  1416. &dev_priv->mm.fence_list);
  1417. }
  1418. trace_i915_gem_object_change_domain(obj,
  1419. obj->read_domains,
  1420. old_write_domain);
  1421. }
  1422. }
  1423. }
  1424. uint32_t
  1425. i915_add_request(struct drm_device *dev,
  1426. struct drm_file *file,
  1427. struct drm_i915_gem_request *request,
  1428. struct intel_ring_buffer *ring)
  1429. {
  1430. drm_i915_private_t *dev_priv = dev->dev_private;
  1431. struct drm_i915_file_private *file_priv = NULL;
  1432. uint32_t seqno;
  1433. int was_empty;
  1434. if (file != NULL)
  1435. file_priv = file->driver_priv;
  1436. if (request == NULL) {
  1437. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1438. if (request == NULL)
  1439. return 0;
  1440. }
  1441. seqno = ring->add_request(dev, ring, 0);
  1442. ring->outstanding_lazy_request = false;
  1443. request->seqno = seqno;
  1444. request->ring = ring;
  1445. request->emitted_jiffies = jiffies;
  1446. was_empty = list_empty(&ring->request_list);
  1447. list_add_tail(&request->list, &ring->request_list);
  1448. if (file_priv) {
  1449. spin_lock(&file_priv->mm.lock);
  1450. request->file_priv = file_priv;
  1451. list_add_tail(&request->client_list,
  1452. &file_priv->mm.request_list);
  1453. spin_unlock(&file_priv->mm.lock);
  1454. }
  1455. if (!dev_priv->mm.suspended) {
  1456. mod_timer(&dev_priv->hangcheck_timer,
  1457. jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
  1458. if (was_empty)
  1459. queue_delayed_work(dev_priv->wq,
  1460. &dev_priv->mm.retire_work, HZ);
  1461. }
  1462. return seqno;
  1463. }
  1464. /**
  1465. * Command execution barrier
  1466. *
  1467. * Ensures that all commands in the ring are finished
  1468. * before signalling the CPU
  1469. */
  1470. static void
  1471. i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
  1472. {
  1473. uint32_t flush_domains = 0;
  1474. /* The sampler always gets flushed on i965 (sigh) */
  1475. if (INTEL_INFO(dev)->gen >= 4)
  1476. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1477. ring->flush(dev, ring,
  1478. I915_GEM_DOMAIN_COMMAND, flush_domains);
  1479. }
  1480. static inline void
  1481. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1482. {
  1483. struct drm_i915_file_private *file_priv = request->file_priv;
  1484. if (!file_priv)
  1485. return;
  1486. spin_lock(&file_priv->mm.lock);
  1487. list_del(&request->client_list);
  1488. request->file_priv = NULL;
  1489. spin_unlock(&file_priv->mm.lock);
  1490. }
  1491. static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
  1492. struct intel_ring_buffer *ring)
  1493. {
  1494. while (!list_empty(&ring->request_list)) {
  1495. struct drm_i915_gem_request *request;
  1496. request = list_first_entry(&ring->request_list,
  1497. struct drm_i915_gem_request,
  1498. list);
  1499. list_del(&request->list);
  1500. i915_gem_request_remove_from_client(request);
  1501. kfree(request);
  1502. }
  1503. while (!list_empty(&ring->active_list)) {
  1504. struct drm_i915_gem_object *obj_priv;
  1505. obj_priv = list_first_entry(&ring->active_list,
  1506. struct drm_i915_gem_object,
  1507. ring_list);
  1508. obj_priv->base.write_domain = 0;
  1509. list_del_init(&obj_priv->gpu_write_list);
  1510. i915_gem_object_move_to_inactive(&obj_priv->base);
  1511. }
  1512. }
  1513. void i915_gem_reset(struct drm_device *dev)
  1514. {
  1515. struct drm_i915_private *dev_priv = dev->dev_private;
  1516. struct drm_i915_gem_object *obj_priv;
  1517. int i;
  1518. i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
  1519. i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
  1520. i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
  1521. /* Remove anything from the flushing lists. The GPU cache is likely
  1522. * to be lost on reset along with the data, so simply move the
  1523. * lost bo to the inactive list.
  1524. */
  1525. while (!list_empty(&dev_priv->mm.flushing_list)) {
  1526. obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
  1527. struct drm_i915_gem_object,
  1528. mm_list);
  1529. obj_priv->base.write_domain = 0;
  1530. list_del_init(&obj_priv->gpu_write_list);
  1531. i915_gem_object_move_to_inactive(&obj_priv->base);
  1532. }
  1533. /* Move everything out of the GPU domains to ensure we do any
  1534. * necessary invalidation upon reuse.
  1535. */
  1536. list_for_each_entry(obj_priv,
  1537. &dev_priv->mm.inactive_list,
  1538. mm_list)
  1539. {
  1540. obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  1541. }
  1542. /* The fence registers are invalidated so clear them out */
  1543. for (i = 0; i < 16; i++) {
  1544. struct drm_i915_fence_reg *reg;
  1545. reg = &dev_priv->fence_regs[i];
  1546. if (!reg->obj)
  1547. continue;
  1548. i915_gem_clear_fence_reg(reg->obj);
  1549. }
  1550. }
  1551. /**
  1552. * This function clears the request list as sequence numbers are passed.
  1553. */
  1554. static void
  1555. i915_gem_retire_requests_ring(struct drm_device *dev,
  1556. struct intel_ring_buffer *ring)
  1557. {
  1558. drm_i915_private_t *dev_priv = dev->dev_private;
  1559. uint32_t seqno;
  1560. if (!ring->status_page.page_addr ||
  1561. list_empty(&ring->request_list))
  1562. return;
  1563. WARN_ON(i915_verify_lists(dev));
  1564. seqno = ring->get_seqno(dev, ring);
  1565. while (!list_empty(&ring->request_list)) {
  1566. struct drm_i915_gem_request *request;
  1567. request = list_first_entry(&ring->request_list,
  1568. struct drm_i915_gem_request,
  1569. list);
  1570. if (!i915_seqno_passed(seqno, request->seqno))
  1571. break;
  1572. trace_i915_gem_request_retire(dev, request->seqno);
  1573. list_del(&request->list);
  1574. i915_gem_request_remove_from_client(request);
  1575. kfree(request);
  1576. }
  1577. /* Move any buffers on the active list that are no longer referenced
  1578. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1579. */
  1580. while (!list_empty(&ring->active_list)) {
  1581. struct drm_gem_object *obj;
  1582. struct drm_i915_gem_object *obj_priv;
  1583. obj_priv = list_first_entry(&ring->active_list,
  1584. struct drm_i915_gem_object,
  1585. ring_list);
  1586. if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
  1587. break;
  1588. obj = &obj_priv->base;
  1589. if (obj->write_domain != 0)
  1590. i915_gem_object_move_to_flushing(obj);
  1591. else
  1592. i915_gem_object_move_to_inactive(obj);
  1593. }
  1594. if (unlikely (dev_priv->trace_irq_seqno &&
  1595. i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
  1596. ring->user_irq_put(dev, ring);
  1597. dev_priv->trace_irq_seqno = 0;
  1598. }
  1599. WARN_ON(i915_verify_lists(dev));
  1600. }
  1601. void
  1602. i915_gem_retire_requests(struct drm_device *dev)
  1603. {
  1604. drm_i915_private_t *dev_priv = dev->dev_private;
  1605. if (!list_empty(&dev_priv->mm.deferred_free_list)) {
  1606. struct drm_i915_gem_object *obj_priv, *tmp;
  1607. /* We must be careful that during unbind() we do not
  1608. * accidentally infinitely recurse into retire requests.
  1609. * Currently:
  1610. * retire -> free -> unbind -> wait -> retire_ring
  1611. */
  1612. list_for_each_entry_safe(obj_priv, tmp,
  1613. &dev_priv->mm.deferred_free_list,
  1614. mm_list)
  1615. i915_gem_free_object_tail(&obj_priv->base);
  1616. }
  1617. i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
  1618. i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
  1619. i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
  1620. }
  1621. static void
  1622. i915_gem_retire_work_handler(struct work_struct *work)
  1623. {
  1624. drm_i915_private_t *dev_priv;
  1625. struct drm_device *dev;
  1626. dev_priv = container_of(work, drm_i915_private_t,
  1627. mm.retire_work.work);
  1628. dev = dev_priv->dev;
  1629. /* Come back later if the device is busy... */
  1630. if (!mutex_trylock(&dev->struct_mutex)) {
  1631. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1632. return;
  1633. }
  1634. i915_gem_retire_requests(dev);
  1635. if (!dev_priv->mm.suspended &&
  1636. (!list_empty(&dev_priv->render_ring.request_list) ||
  1637. !list_empty(&dev_priv->bsd_ring.request_list) ||
  1638. !list_empty(&dev_priv->blt_ring.request_list)))
  1639. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1640. mutex_unlock(&dev->struct_mutex);
  1641. }
  1642. int
  1643. i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
  1644. bool interruptible, struct intel_ring_buffer *ring)
  1645. {
  1646. drm_i915_private_t *dev_priv = dev->dev_private;
  1647. u32 ier;
  1648. int ret = 0;
  1649. BUG_ON(seqno == 0);
  1650. if (atomic_read(&dev_priv->mm.wedged))
  1651. return -EAGAIN;
  1652. if (ring->outstanding_lazy_request) {
  1653. seqno = i915_add_request(dev, NULL, NULL, ring);
  1654. if (seqno == 0)
  1655. return -ENOMEM;
  1656. }
  1657. BUG_ON(seqno == dev_priv->next_seqno);
  1658. if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
  1659. if (HAS_PCH_SPLIT(dev))
  1660. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1661. else
  1662. ier = I915_READ(IER);
  1663. if (!ier) {
  1664. DRM_ERROR("something (likely vbetool) disabled "
  1665. "interrupts, re-enabling\n");
  1666. i915_driver_irq_preinstall(dev);
  1667. i915_driver_irq_postinstall(dev);
  1668. }
  1669. trace_i915_gem_request_wait_begin(dev, seqno);
  1670. ring->waiting_gem_seqno = seqno;
  1671. ring->user_irq_get(dev, ring);
  1672. if (interruptible)
  1673. ret = wait_event_interruptible(ring->irq_queue,
  1674. i915_seqno_passed(
  1675. ring->get_seqno(dev, ring), seqno)
  1676. || atomic_read(&dev_priv->mm.wedged));
  1677. else
  1678. wait_event(ring->irq_queue,
  1679. i915_seqno_passed(
  1680. ring->get_seqno(dev, ring), seqno)
  1681. || atomic_read(&dev_priv->mm.wedged));
  1682. ring->user_irq_put(dev, ring);
  1683. ring->waiting_gem_seqno = 0;
  1684. trace_i915_gem_request_wait_end(dev, seqno);
  1685. }
  1686. if (atomic_read(&dev_priv->mm.wedged))
  1687. ret = -EAGAIN;
  1688. if (ret && ret != -ERESTARTSYS)
  1689. DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
  1690. __func__, ret, seqno, ring->get_seqno(dev, ring),
  1691. dev_priv->next_seqno);
  1692. /* Directly dispatch request retiring. While we have the work queue
  1693. * to handle this, the waiter on a request often wants an associated
  1694. * buffer to have made it to the inactive list, and we would need
  1695. * a separate wait queue to handle that.
  1696. */
  1697. if (ret == 0)
  1698. i915_gem_retire_requests_ring(dev, ring);
  1699. return ret;
  1700. }
  1701. /**
  1702. * Waits for a sequence number to be signaled, and cleans up the
  1703. * request and object lists appropriately for that event.
  1704. */
  1705. static int
  1706. i915_wait_request(struct drm_device *dev, uint32_t seqno,
  1707. struct intel_ring_buffer *ring)
  1708. {
  1709. return i915_do_wait_request(dev, seqno, 1, ring);
  1710. }
  1711. static void
  1712. i915_gem_flush_ring(struct drm_device *dev,
  1713. struct drm_file *file_priv,
  1714. struct intel_ring_buffer *ring,
  1715. uint32_t invalidate_domains,
  1716. uint32_t flush_domains)
  1717. {
  1718. ring->flush(dev, ring, invalidate_domains, flush_domains);
  1719. i915_gem_process_flushing_list(dev, flush_domains, ring);
  1720. }
  1721. static void
  1722. i915_gem_flush(struct drm_device *dev,
  1723. struct drm_file *file_priv,
  1724. uint32_t invalidate_domains,
  1725. uint32_t flush_domains,
  1726. uint32_t flush_rings)
  1727. {
  1728. drm_i915_private_t *dev_priv = dev->dev_private;
  1729. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1730. drm_agp_chipset_flush(dev);
  1731. if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
  1732. if (flush_rings & RING_RENDER)
  1733. i915_gem_flush_ring(dev, file_priv,
  1734. &dev_priv->render_ring,
  1735. invalidate_domains, flush_domains);
  1736. if (flush_rings & RING_BSD)
  1737. i915_gem_flush_ring(dev, file_priv,
  1738. &dev_priv->bsd_ring,
  1739. invalidate_domains, flush_domains);
  1740. if (flush_rings & RING_BLT)
  1741. i915_gem_flush_ring(dev, file_priv,
  1742. &dev_priv->blt_ring,
  1743. invalidate_domains, flush_domains);
  1744. }
  1745. }
  1746. /**
  1747. * Ensures that all rendering to the object has completed and the object is
  1748. * safe to unbind from the GTT or access from the CPU.
  1749. */
  1750. static int
  1751. i915_gem_object_wait_rendering(struct drm_gem_object *obj,
  1752. bool interruptible)
  1753. {
  1754. struct drm_device *dev = obj->dev;
  1755. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1756. int ret;
  1757. /* This function only exists to support waiting for existing rendering,
  1758. * not for emitting required flushes.
  1759. */
  1760. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1761. /* If there is rendering queued on the buffer being evicted, wait for
  1762. * it.
  1763. */
  1764. if (obj_priv->active) {
  1765. ret = i915_do_wait_request(dev,
  1766. obj_priv->last_rendering_seqno,
  1767. interruptible,
  1768. obj_priv->ring);
  1769. if (ret)
  1770. return ret;
  1771. }
  1772. return 0;
  1773. }
  1774. /**
  1775. * Unbinds an object from the GTT aperture.
  1776. */
  1777. int
  1778. i915_gem_object_unbind(struct drm_gem_object *obj)
  1779. {
  1780. struct drm_device *dev = obj->dev;
  1781. struct drm_i915_private *dev_priv = dev->dev_private;
  1782. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1783. int ret = 0;
  1784. if (obj_priv->gtt_space == NULL)
  1785. return 0;
  1786. if (obj_priv->pin_count != 0) {
  1787. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1788. return -EINVAL;
  1789. }
  1790. /* blow away mappings if mapped through GTT */
  1791. i915_gem_release_mmap(obj);
  1792. /* Move the object to the CPU domain to ensure that
  1793. * any possible CPU writes while it's not in the GTT
  1794. * are flushed when we go to remap it. This will
  1795. * also ensure that all pending GPU writes are finished
  1796. * before we unbind.
  1797. */
  1798. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1799. if (ret == -ERESTARTSYS)
  1800. return ret;
  1801. /* Continue on if we fail due to EIO, the GPU is hung so we
  1802. * should be safe and we need to cleanup or else we might
  1803. * cause memory corruption through use-after-free.
  1804. */
  1805. if (ret) {
  1806. i915_gem_clflush_object(obj);
  1807. obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
  1808. }
  1809. /* release the fence reg _after_ flushing */
  1810. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1811. i915_gem_clear_fence_reg(obj);
  1812. drm_unbind_agp(obj_priv->agp_mem);
  1813. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  1814. i915_gem_object_put_pages(obj);
  1815. BUG_ON(obj_priv->pages_refcount);
  1816. i915_gem_info_remove_gtt(dev_priv, obj->size);
  1817. list_del_init(&obj_priv->mm_list);
  1818. drm_mm_put_block(obj_priv->gtt_space);
  1819. obj_priv->gtt_space = NULL;
  1820. obj_priv->gtt_offset = 0;
  1821. if (i915_gem_object_is_purgeable(obj_priv))
  1822. i915_gem_object_truncate(obj);
  1823. trace_i915_gem_object_unbind(obj);
  1824. return ret;
  1825. }
  1826. static int i915_ring_idle(struct drm_device *dev,
  1827. struct intel_ring_buffer *ring)
  1828. {
  1829. if (list_empty(&ring->gpu_write_list))
  1830. return 0;
  1831. i915_gem_flush_ring(dev, NULL, ring,
  1832. I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1833. return i915_wait_request(dev,
  1834. i915_gem_next_request_seqno(dev, ring),
  1835. ring);
  1836. }
  1837. int
  1838. i915_gpu_idle(struct drm_device *dev)
  1839. {
  1840. drm_i915_private_t *dev_priv = dev->dev_private;
  1841. bool lists_empty;
  1842. int ret;
  1843. lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
  1844. list_empty(&dev_priv->render_ring.active_list) &&
  1845. list_empty(&dev_priv->bsd_ring.active_list) &&
  1846. list_empty(&dev_priv->blt_ring.active_list));
  1847. if (lists_empty)
  1848. return 0;
  1849. /* Flush everything onto the inactive list. */
  1850. ret = i915_ring_idle(dev, &dev_priv->render_ring);
  1851. if (ret)
  1852. return ret;
  1853. ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
  1854. if (ret)
  1855. return ret;
  1856. ret = i915_ring_idle(dev, &dev_priv->blt_ring);
  1857. if (ret)
  1858. return ret;
  1859. return 0;
  1860. }
  1861. static int
  1862. i915_gem_object_get_pages(struct drm_gem_object *obj,
  1863. gfp_t gfpmask)
  1864. {
  1865. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1866. int page_count, i;
  1867. struct address_space *mapping;
  1868. struct inode *inode;
  1869. struct page *page;
  1870. BUG_ON(obj_priv->pages_refcount
  1871. == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
  1872. if (obj_priv->pages_refcount++ != 0)
  1873. return 0;
  1874. /* Get the list of pages out of our struct file. They'll be pinned
  1875. * at this point until we release them.
  1876. */
  1877. page_count = obj->size / PAGE_SIZE;
  1878. BUG_ON(obj_priv->pages != NULL);
  1879. obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
  1880. if (obj_priv->pages == NULL) {
  1881. obj_priv->pages_refcount--;
  1882. return -ENOMEM;
  1883. }
  1884. inode = obj->filp->f_path.dentry->d_inode;
  1885. mapping = inode->i_mapping;
  1886. for (i = 0; i < page_count; i++) {
  1887. page = read_cache_page_gfp(mapping, i,
  1888. GFP_HIGHUSER |
  1889. __GFP_COLD |
  1890. __GFP_RECLAIMABLE |
  1891. gfpmask);
  1892. if (IS_ERR(page))
  1893. goto err_pages;
  1894. obj_priv->pages[i] = page;
  1895. }
  1896. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1897. i915_gem_object_do_bit_17_swizzle(obj);
  1898. return 0;
  1899. err_pages:
  1900. while (i--)
  1901. page_cache_release(obj_priv->pages[i]);
  1902. drm_free_large(obj_priv->pages);
  1903. obj_priv->pages = NULL;
  1904. obj_priv->pages_refcount--;
  1905. return PTR_ERR(page);
  1906. }
  1907. static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
  1908. {
  1909. struct drm_gem_object *obj = reg->obj;
  1910. struct drm_device *dev = obj->dev;
  1911. drm_i915_private_t *dev_priv = dev->dev_private;
  1912. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1913. int regnum = obj_priv->fence_reg;
  1914. uint64_t val;
  1915. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1916. 0xfffff000) << 32;
  1917. val |= obj_priv->gtt_offset & 0xfffff000;
  1918. val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
  1919. SANDYBRIDGE_FENCE_PITCH_SHIFT;
  1920. if (obj_priv->tiling_mode == I915_TILING_Y)
  1921. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1922. val |= I965_FENCE_REG_VALID;
  1923. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
  1924. }
  1925. static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
  1926. {
  1927. struct drm_gem_object *obj = reg->obj;
  1928. struct drm_device *dev = obj->dev;
  1929. drm_i915_private_t *dev_priv = dev->dev_private;
  1930. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1931. int regnum = obj_priv->fence_reg;
  1932. uint64_t val;
  1933. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1934. 0xfffff000) << 32;
  1935. val |= obj_priv->gtt_offset & 0xfffff000;
  1936. val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1937. if (obj_priv->tiling_mode == I915_TILING_Y)
  1938. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1939. val |= I965_FENCE_REG_VALID;
  1940. I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
  1941. }
  1942. static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
  1943. {
  1944. struct drm_gem_object *obj = reg->obj;
  1945. struct drm_device *dev = obj->dev;
  1946. drm_i915_private_t *dev_priv = dev->dev_private;
  1947. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1948. int regnum = obj_priv->fence_reg;
  1949. int tile_width;
  1950. uint32_t fence_reg, val;
  1951. uint32_t pitch_val;
  1952. if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
  1953. (obj_priv->gtt_offset & (obj->size - 1))) {
  1954. WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
  1955. __func__, obj_priv->gtt_offset, obj->size);
  1956. return;
  1957. }
  1958. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1959. HAS_128_BYTE_Y_TILING(dev))
  1960. tile_width = 128;
  1961. else
  1962. tile_width = 512;
  1963. /* Note: pitch better be a power of two tile widths */
  1964. pitch_val = obj_priv->stride / tile_width;
  1965. pitch_val = ffs(pitch_val) - 1;
  1966. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1967. HAS_128_BYTE_Y_TILING(dev))
  1968. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  1969. else
  1970. WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
  1971. val = obj_priv->gtt_offset;
  1972. if (obj_priv->tiling_mode == I915_TILING_Y)
  1973. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1974. val |= I915_FENCE_SIZE_BITS(obj->size);
  1975. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1976. val |= I830_FENCE_REG_VALID;
  1977. if (regnum < 8)
  1978. fence_reg = FENCE_REG_830_0 + (regnum * 4);
  1979. else
  1980. fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
  1981. I915_WRITE(fence_reg, val);
  1982. }
  1983. static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  1984. {
  1985. struct drm_gem_object *obj = reg->obj;
  1986. struct drm_device *dev = obj->dev;
  1987. drm_i915_private_t *dev_priv = dev->dev_private;
  1988. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1989. int regnum = obj_priv->fence_reg;
  1990. uint32_t val;
  1991. uint32_t pitch_val;
  1992. uint32_t fence_size_bits;
  1993. if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
  1994. (obj_priv->gtt_offset & (obj->size - 1))) {
  1995. WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
  1996. __func__, obj_priv->gtt_offset);
  1997. return;
  1998. }
  1999. pitch_val = obj_priv->stride / 128;
  2000. pitch_val = ffs(pitch_val) - 1;
  2001. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  2002. val = obj_priv->gtt_offset;
  2003. if (obj_priv->tiling_mode == I915_TILING_Y)
  2004. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2005. fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
  2006. WARN_ON(fence_size_bits & ~0x00000f00);
  2007. val |= fence_size_bits;
  2008. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2009. val |= I830_FENCE_REG_VALID;
  2010. I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  2011. }
  2012. static int i915_find_fence_reg(struct drm_device *dev,
  2013. bool interruptible)
  2014. {
  2015. struct drm_i915_fence_reg *reg = NULL;
  2016. struct drm_i915_gem_object *obj_priv = NULL;
  2017. struct drm_i915_private *dev_priv = dev->dev_private;
  2018. struct drm_gem_object *obj = NULL;
  2019. int i, avail, ret;
  2020. /* First try to find a free reg */
  2021. avail = 0;
  2022. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2023. reg = &dev_priv->fence_regs[i];
  2024. if (!reg->obj)
  2025. return i;
  2026. obj_priv = to_intel_bo(reg->obj);
  2027. if (!obj_priv->pin_count)
  2028. avail++;
  2029. }
  2030. if (avail == 0)
  2031. return -ENOSPC;
  2032. /* None available, try to steal one or wait for a user to finish */
  2033. i = I915_FENCE_REG_NONE;
  2034. list_for_each_entry(reg, &dev_priv->mm.fence_list,
  2035. lru_list) {
  2036. obj = reg->obj;
  2037. obj_priv = to_intel_bo(obj);
  2038. if (obj_priv->pin_count)
  2039. continue;
  2040. /* found one! */
  2041. i = obj_priv->fence_reg;
  2042. break;
  2043. }
  2044. BUG_ON(i == I915_FENCE_REG_NONE);
  2045. /* We only have a reference on obj from the active list. put_fence_reg
  2046. * might drop that one, causing a use-after-free in it. So hold a
  2047. * private reference to obj like the other callers of put_fence_reg
  2048. * (set_tiling ioctl) do. */
  2049. drm_gem_object_reference(obj);
  2050. ret = i915_gem_object_put_fence_reg(obj, interruptible);
  2051. drm_gem_object_unreference(obj);
  2052. if (ret != 0)
  2053. return ret;
  2054. return i;
  2055. }
  2056. /**
  2057. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  2058. * @obj: object to map through a fence reg
  2059. *
  2060. * When mapping objects through the GTT, userspace wants to be able to write
  2061. * to them without having to worry about swizzling if the object is tiled.
  2062. *
  2063. * This function walks the fence regs looking for a free one for @obj,
  2064. * stealing one if it can't find any.
  2065. *
  2066. * It then sets up the reg based on the object's properties: address, pitch
  2067. * and tiling format.
  2068. */
  2069. int
  2070. i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
  2071. bool interruptible)
  2072. {
  2073. struct drm_device *dev = obj->dev;
  2074. struct drm_i915_private *dev_priv = dev->dev_private;
  2075. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2076. struct drm_i915_fence_reg *reg = NULL;
  2077. int ret;
  2078. /* Just update our place in the LRU if our fence is getting used. */
  2079. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  2080. reg = &dev_priv->fence_regs[obj_priv->fence_reg];
  2081. list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  2082. return 0;
  2083. }
  2084. switch (obj_priv->tiling_mode) {
  2085. case I915_TILING_NONE:
  2086. WARN(1, "allocating a fence for non-tiled object?\n");
  2087. break;
  2088. case I915_TILING_X:
  2089. if (!obj_priv->stride)
  2090. return -EINVAL;
  2091. WARN((obj_priv->stride & (512 - 1)),
  2092. "object 0x%08x is X tiled but has non-512B pitch\n",
  2093. obj_priv->gtt_offset);
  2094. break;
  2095. case I915_TILING_Y:
  2096. if (!obj_priv->stride)
  2097. return -EINVAL;
  2098. WARN((obj_priv->stride & (128 - 1)),
  2099. "object 0x%08x is Y tiled but has non-128B pitch\n",
  2100. obj_priv->gtt_offset);
  2101. break;
  2102. }
  2103. ret = i915_find_fence_reg(dev, interruptible);
  2104. if (ret < 0)
  2105. return ret;
  2106. obj_priv->fence_reg = ret;
  2107. reg = &dev_priv->fence_regs[obj_priv->fence_reg];
  2108. list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  2109. reg->obj = obj;
  2110. switch (INTEL_INFO(dev)->gen) {
  2111. case 6:
  2112. sandybridge_write_fence_reg(reg);
  2113. break;
  2114. case 5:
  2115. case 4:
  2116. i965_write_fence_reg(reg);
  2117. break;
  2118. case 3:
  2119. i915_write_fence_reg(reg);
  2120. break;
  2121. case 2:
  2122. i830_write_fence_reg(reg);
  2123. break;
  2124. }
  2125. trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
  2126. obj_priv->tiling_mode);
  2127. return 0;
  2128. }
  2129. /**
  2130. * i915_gem_clear_fence_reg - clear out fence register info
  2131. * @obj: object to clear
  2132. *
  2133. * Zeroes out the fence register itself and clears out the associated
  2134. * data structures in dev_priv and obj_priv.
  2135. */
  2136. static void
  2137. i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  2138. {
  2139. struct drm_device *dev = obj->dev;
  2140. drm_i915_private_t *dev_priv = dev->dev_private;
  2141. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2142. struct drm_i915_fence_reg *reg =
  2143. &dev_priv->fence_regs[obj_priv->fence_reg];
  2144. uint32_t fence_reg;
  2145. switch (INTEL_INFO(dev)->gen) {
  2146. case 6:
  2147. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
  2148. (obj_priv->fence_reg * 8), 0);
  2149. break;
  2150. case 5:
  2151. case 4:
  2152. I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
  2153. break;
  2154. case 3:
  2155. if (obj_priv->fence_reg >= 8)
  2156. fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
  2157. else
  2158. case 2:
  2159. fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
  2160. I915_WRITE(fence_reg, 0);
  2161. break;
  2162. }
  2163. reg->obj = NULL;
  2164. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2165. list_del_init(&reg->lru_list);
  2166. }
  2167. /**
  2168. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2169. * to the buffer to finish, and then resets the fence register.
  2170. * @obj: tiled object holding a fence register.
  2171. * @bool: whether the wait upon the fence is interruptible
  2172. *
  2173. * Zeroes out the fence register itself and clears out the associated
  2174. * data structures in dev_priv and obj_priv.
  2175. */
  2176. int
  2177. i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
  2178. bool interruptible)
  2179. {
  2180. struct drm_device *dev = obj->dev;
  2181. struct drm_i915_private *dev_priv = dev->dev_private;
  2182. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2183. struct drm_i915_fence_reg *reg;
  2184. if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
  2185. return 0;
  2186. /* If we've changed tiling, GTT-mappings of the object
  2187. * need to re-fault to ensure that the correct fence register
  2188. * setup is in place.
  2189. */
  2190. i915_gem_release_mmap(obj);
  2191. /* On the i915, GPU access to tiled buffers is via a fence,
  2192. * therefore we must wait for any outstanding access to complete
  2193. * before clearing the fence.
  2194. */
  2195. reg = &dev_priv->fence_regs[obj_priv->fence_reg];
  2196. if (reg->gpu) {
  2197. int ret;
  2198. ret = i915_gem_object_flush_gpu_write_domain(obj, true);
  2199. if (ret)
  2200. return ret;
  2201. ret = i915_gem_object_wait_rendering(obj, interruptible);
  2202. if (ret)
  2203. return ret;
  2204. reg->gpu = false;
  2205. }
  2206. i915_gem_object_flush_gtt_write_domain(obj);
  2207. i915_gem_clear_fence_reg(obj);
  2208. return 0;
  2209. }
  2210. /**
  2211. * Finds free space in the GTT aperture and binds the object there.
  2212. */
  2213. static int
  2214. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  2215. {
  2216. struct drm_device *dev = obj->dev;
  2217. drm_i915_private_t *dev_priv = dev->dev_private;
  2218. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2219. struct drm_mm_node *free_space;
  2220. gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
  2221. int ret;
  2222. if (obj_priv->madv != I915_MADV_WILLNEED) {
  2223. DRM_ERROR("Attempting to bind a purgeable object\n");
  2224. return -EINVAL;
  2225. }
  2226. if (alignment == 0)
  2227. alignment = i915_gem_get_gtt_alignment(obj);
  2228. if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
  2229. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2230. return -EINVAL;
  2231. }
  2232. /* If the object is bigger than the entire aperture, reject it early
  2233. * before evicting everything in a vain attempt to find space.
  2234. */
  2235. if (obj->size > dev_priv->mm.gtt_total) {
  2236. DRM_ERROR("Attempting to bind an object larger than the aperture\n");
  2237. return -E2BIG;
  2238. }
  2239. search_free:
  2240. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2241. obj->size, alignment, 0);
  2242. if (free_space != NULL)
  2243. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  2244. alignment);
  2245. if (obj_priv->gtt_space == NULL) {
  2246. /* If the gtt is empty and we're still having trouble
  2247. * fitting our object in, we're out of memory.
  2248. */
  2249. ret = i915_gem_evict_something(dev, obj->size, alignment);
  2250. if (ret)
  2251. return ret;
  2252. goto search_free;
  2253. }
  2254. ret = i915_gem_object_get_pages(obj, gfpmask);
  2255. if (ret) {
  2256. drm_mm_put_block(obj_priv->gtt_space);
  2257. obj_priv->gtt_space = NULL;
  2258. if (ret == -ENOMEM) {
  2259. /* first try to clear up some space from the GTT */
  2260. ret = i915_gem_evict_something(dev, obj->size,
  2261. alignment);
  2262. if (ret) {
  2263. /* now try to shrink everyone else */
  2264. if (gfpmask) {
  2265. gfpmask = 0;
  2266. goto search_free;
  2267. }
  2268. return ret;
  2269. }
  2270. goto search_free;
  2271. }
  2272. return ret;
  2273. }
  2274. /* Create an AGP memory structure pointing at our pages, and bind it
  2275. * into the GTT.
  2276. */
  2277. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  2278. obj_priv->pages,
  2279. obj->size >> PAGE_SHIFT,
  2280. obj_priv->gtt_space->start,
  2281. obj_priv->agp_type);
  2282. if (obj_priv->agp_mem == NULL) {
  2283. i915_gem_object_put_pages(obj);
  2284. drm_mm_put_block(obj_priv->gtt_space);
  2285. obj_priv->gtt_space = NULL;
  2286. ret = i915_gem_evict_something(dev, obj->size, alignment);
  2287. if (ret)
  2288. return ret;
  2289. goto search_free;
  2290. }
  2291. /* keep track of bounds object by adding it to the inactive list */
  2292. list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
  2293. i915_gem_info_add_gtt(dev_priv, obj->size);
  2294. /* Assert that the object is not currently in any GPU domain. As it
  2295. * wasn't in the GTT, there shouldn't be any way it could have been in
  2296. * a GPU cache
  2297. */
  2298. BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2299. BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2300. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  2301. trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  2302. return 0;
  2303. }
  2304. void
  2305. i915_gem_clflush_object(struct drm_gem_object *obj)
  2306. {
  2307. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2308. /* If we don't have a page list set up, then we're not pinned
  2309. * to GPU, and we can ignore the cache flush because it'll happen
  2310. * again at bind time.
  2311. */
  2312. if (obj_priv->pages == NULL)
  2313. return;
  2314. trace_i915_gem_object_clflush(obj);
  2315. drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  2316. }
  2317. /** Flushes any GPU write domain for the object if it's dirty. */
  2318. static int
  2319. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
  2320. bool pipelined)
  2321. {
  2322. struct drm_device *dev = obj->dev;
  2323. uint32_t old_write_domain;
  2324. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2325. return 0;
  2326. /* Queue the GPU write cache flushing we need. */
  2327. old_write_domain = obj->write_domain;
  2328. i915_gem_flush_ring(dev, NULL,
  2329. to_intel_bo(obj)->ring,
  2330. 0, obj->write_domain);
  2331. BUG_ON(obj->write_domain);
  2332. trace_i915_gem_object_change_domain(obj,
  2333. obj->read_domains,
  2334. old_write_domain);
  2335. if (pipelined)
  2336. return 0;
  2337. return i915_gem_object_wait_rendering(obj, true);
  2338. }
  2339. /** Flushes the GTT write domain for the object if it's dirty. */
  2340. static void
  2341. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  2342. {
  2343. uint32_t old_write_domain;
  2344. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  2345. return;
  2346. /* No actual flushing is required for the GTT write domain. Writes
  2347. * to it immediately go to main memory as far as we know, so there's
  2348. * no chipset flush. It also doesn't land in render cache.
  2349. */
  2350. old_write_domain = obj->write_domain;
  2351. obj->write_domain = 0;
  2352. trace_i915_gem_object_change_domain(obj,
  2353. obj->read_domains,
  2354. old_write_domain);
  2355. }
  2356. /** Flushes the CPU write domain for the object if it's dirty. */
  2357. static void
  2358. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  2359. {
  2360. struct drm_device *dev = obj->dev;
  2361. uint32_t old_write_domain;
  2362. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  2363. return;
  2364. i915_gem_clflush_object(obj);
  2365. drm_agp_chipset_flush(dev);
  2366. old_write_domain = obj->write_domain;
  2367. obj->write_domain = 0;
  2368. trace_i915_gem_object_change_domain(obj,
  2369. obj->read_domains,
  2370. old_write_domain);
  2371. }
  2372. /**
  2373. * Moves a single object to the GTT read, and possibly write domain.
  2374. *
  2375. * This function returns when the move is complete, including waiting on
  2376. * flushes to occur.
  2377. */
  2378. int
  2379. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  2380. {
  2381. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2382. uint32_t old_write_domain, old_read_domains;
  2383. int ret;
  2384. /* Not valid to be called on unbound objects. */
  2385. if (obj_priv->gtt_space == NULL)
  2386. return -EINVAL;
  2387. ret = i915_gem_object_flush_gpu_write_domain(obj, false);
  2388. if (ret != 0)
  2389. return ret;
  2390. i915_gem_object_flush_cpu_write_domain(obj);
  2391. if (write) {
  2392. ret = i915_gem_object_wait_rendering(obj, true);
  2393. if (ret)
  2394. return ret;
  2395. }
  2396. old_write_domain = obj->write_domain;
  2397. old_read_domains = obj->read_domains;
  2398. /* It should now be out of any other write domains, and we can update
  2399. * the domain values for our changes.
  2400. */
  2401. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2402. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2403. if (write) {
  2404. obj->read_domains = I915_GEM_DOMAIN_GTT;
  2405. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2406. obj_priv->dirty = 1;
  2407. }
  2408. trace_i915_gem_object_change_domain(obj,
  2409. old_read_domains,
  2410. old_write_domain);
  2411. return 0;
  2412. }
  2413. /*
  2414. * Prepare buffer for display plane. Use uninterruptible for possible flush
  2415. * wait, as in modesetting process we're not supposed to be interrupted.
  2416. */
  2417. int
  2418. i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
  2419. bool pipelined)
  2420. {
  2421. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2422. uint32_t old_read_domains;
  2423. int ret;
  2424. /* Not valid to be called on unbound objects. */
  2425. if (obj_priv->gtt_space == NULL)
  2426. return -EINVAL;
  2427. ret = i915_gem_object_flush_gpu_write_domain(obj, true);
  2428. if (ret)
  2429. return ret;
  2430. /* Currently, we are always called from an non-interruptible context. */
  2431. if (!pipelined) {
  2432. ret = i915_gem_object_wait_rendering(obj, false);
  2433. if (ret)
  2434. return ret;
  2435. }
  2436. i915_gem_object_flush_cpu_write_domain(obj);
  2437. old_read_domains = obj->read_domains;
  2438. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2439. trace_i915_gem_object_change_domain(obj,
  2440. old_read_domains,
  2441. obj->write_domain);
  2442. return 0;
  2443. }
  2444. /**
  2445. * Moves a single object to the CPU read, and possibly write domain.
  2446. *
  2447. * This function returns when the move is complete, including waiting on
  2448. * flushes to occur.
  2449. */
  2450. static int
  2451. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  2452. {
  2453. uint32_t old_write_domain, old_read_domains;
  2454. int ret;
  2455. ret = i915_gem_object_flush_gpu_write_domain(obj, false);
  2456. if (ret != 0)
  2457. return ret;
  2458. i915_gem_object_flush_gtt_write_domain(obj);
  2459. /* If we have a partially-valid cache of the object in the CPU,
  2460. * finish invalidating it and free the per-page flags.
  2461. */
  2462. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2463. if (write) {
  2464. ret = i915_gem_object_wait_rendering(obj, true);
  2465. if (ret)
  2466. return ret;
  2467. }
  2468. old_write_domain = obj->write_domain;
  2469. old_read_domains = obj->read_domains;
  2470. /* Flush the CPU cache if it's still invalid. */
  2471. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2472. i915_gem_clflush_object(obj);
  2473. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2474. }
  2475. /* It should now be out of any other write domains, and we can update
  2476. * the domain values for our changes.
  2477. */
  2478. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2479. /* If we're writing through the CPU, then the GPU read domains will
  2480. * need to be invalidated at next use.
  2481. */
  2482. if (write) {
  2483. obj->read_domains = I915_GEM_DOMAIN_CPU;
  2484. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2485. }
  2486. trace_i915_gem_object_change_domain(obj,
  2487. old_read_domains,
  2488. old_write_domain);
  2489. return 0;
  2490. }
  2491. /*
  2492. * Set the next domain for the specified object. This
  2493. * may not actually perform the necessary flushing/invaliding though,
  2494. * as that may want to be batched with other set_domain operations
  2495. *
  2496. * This is (we hope) the only really tricky part of gem. The goal
  2497. * is fairly simple -- track which caches hold bits of the object
  2498. * and make sure they remain coherent. A few concrete examples may
  2499. * help to explain how it works. For shorthand, we use the notation
  2500. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2501. * a pair of read and write domain masks.
  2502. *
  2503. * Case 1: the batch buffer
  2504. *
  2505. * 1. Allocated
  2506. * 2. Written by CPU
  2507. * 3. Mapped to GTT
  2508. * 4. Read by GPU
  2509. * 5. Unmapped from GTT
  2510. * 6. Freed
  2511. *
  2512. * Let's take these a step at a time
  2513. *
  2514. * 1. Allocated
  2515. * Pages allocated from the kernel may still have
  2516. * cache contents, so we set them to (CPU, CPU) always.
  2517. * 2. Written by CPU (using pwrite)
  2518. * The pwrite function calls set_domain (CPU, CPU) and
  2519. * this function does nothing (as nothing changes)
  2520. * 3. Mapped by GTT
  2521. * This function asserts that the object is not
  2522. * currently in any GPU-based read or write domains
  2523. * 4. Read by GPU
  2524. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2525. * As write_domain is zero, this function adds in the
  2526. * current read domains (CPU+COMMAND, 0).
  2527. * flush_domains is set to CPU.
  2528. * invalidate_domains is set to COMMAND
  2529. * clflush is run to get data out of the CPU caches
  2530. * then i915_dev_set_domain calls i915_gem_flush to
  2531. * emit an MI_FLUSH and drm_agp_chipset_flush
  2532. * 5. Unmapped from GTT
  2533. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2534. * flush_domains and invalidate_domains end up both zero
  2535. * so no flushing/invalidating happens
  2536. * 6. Freed
  2537. * yay, done
  2538. *
  2539. * Case 2: The shared render buffer
  2540. *
  2541. * 1. Allocated
  2542. * 2. Mapped to GTT
  2543. * 3. Read/written by GPU
  2544. * 4. set_domain to (CPU,CPU)
  2545. * 5. Read/written by CPU
  2546. * 6. Read/written by GPU
  2547. *
  2548. * 1. Allocated
  2549. * Same as last example, (CPU, CPU)
  2550. * 2. Mapped to GTT
  2551. * Nothing changes (assertions find that it is not in the GPU)
  2552. * 3. Read/written by GPU
  2553. * execbuffer calls set_domain (RENDER, RENDER)
  2554. * flush_domains gets CPU
  2555. * invalidate_domains gets GPU
  2556. * clflush (obj)
  2557. * MI_FLUSH and drm_agp_chipset_flush
  2558. * 4. set_domain (CPU, CPU)
  2559. * flush_domains gets GPU
  2560. * invalidate_domains gets CPU
  2561. * wait_rendering (obj) to make sure all drawing is complete.
  2562. * This will include an MI_FLUSH to get the data from GPU
  2563. * to memory
  2564. * clflush (obj) to invalidate the CPU cache
  2565. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2566. * 5. Read/written by CPU
  2567. * cache lines are loaded and dirtied
  2568. * 6. Read written by GPU
  2569. * Same as last GPU access
  2570. *
  2571. * Case 3: The constant buffer
  2572. *
  2573. * 1. Allocated
  2574. * 2. Written by CPU
  2575. * 3. Read by GPU
  2576. * 4. Updated (written) by CPU again
  2577. * 5. Read by GPU
  2578. *
  2579. * 1. Allocated
  2580. * (CPU, CPU)
  2581. * 2. Written by CPU
  2582. * (CPU, CPU)
  2583. * 3. Read by GPU
  2584. * (CPU+RENDER, 0)
  2585. * flush_domains = CPU
  2586. * invalidate_domains = RENDER
  2587. * clflush (obj)
  2588. * MI_FLUSH
  2589. * drm_agp_chipset_flush
  2590. * 4. Updated (written) by CPU again
  2591. * (CPU, CPU)
  2592. * flush_domains = 0 (no previous write domain)
  2593. * invalidate_domains = 0 (no new read domains)
  2594. * 5. Read by GPU
  2595. * (CPU+RENDER, 0)
  2596. * flush_domains = CPU
  2597. * invalidate_domains = RENDER
  2598. * clflush (obj)
  2599. * MI_FLUSH
  2600. * drm_agp_chipset_flush
  2601. */
  2602. static void
  2603. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
  2604. struct intel_ring_buffer *ring)
  2605. {
  2606. struct drm_device *dev = obj->dev;
  2607. struct drm_i915_private *dev_priv = dev->dev_private;
  2608. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2609. uint32_t invalidate_domains = 0;
  2610. uint32_t flush_domains = 0;
  2611. uint32_t old_read_domains;
  2612. intel_mark_busy(dev, obj);
  2613. /*
  2614. * If the object isn't moving to a new write domain,
  2615. * let the object stay in multiple read domains
  2616. */
  2617. if (obj->pending_write_domain == 0)
  2618. obj->pending_read_domains |= obj->read_domains;
  2619. else
  2620. obj_priv->dirty = 1;
  2621. /*
  2622. * Flush the current write domain if
  2623. * the new read domains don't match. Invalidate
  2624. * any read domains which differ from the old
  2625. * write domain
  2626. */
  2627. if (obj->write_domain &&
  2628. obj->write_domain != obj->pending_read_domains) {
  2629. flush_domains |= obj->write_domain;
  2630. invalidate_domains |=
  2631. obj->pending_read_domains & ~obj->write_domain;
  2632. }
  2633. /*
  2634. * Invalidate any read caches which may have
  2635. * stale data. That is, any new read domains.
  2636. */
  2637. invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
  2638. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
  2639. i915_gem_clflush_object(obj);
  2640. old_read_domains = obj->read_domains;
  2641. /* The actual obj->write_domain will be updated with
  2642. * pending_write_domain after we emit the accumulated flush for all
  2643. * of our domain changes in execbuffers (which clears objects'
  2644. * write_domains). So if we have a current write domain that we
  2645. * aren't changing, set pending_write_domain to that.
  2646. */
  2647. if (flush_domains == 0 && obj->pending_write_domain == 0)
  2648. obj->pending_write_domain = obj->write_domain;
  2649. obj->read_domains = obj->pending_read_domains;
  2650. dev->invalidate_domains |= invalidate_domains;
  2651. dev->flush_domains |= flush_domains;
  2652. if (flush_domains & I915_GEM_GPU_DOMAINS)
  2653. dev_priv->mm.flush_rings |= obj_priv->ring->id;
  2654. if (invalidate_domains & I915_GEM_GPU_DOMAINS)
  2655. dev_priv->mm.flush_rings |= ring->id;
  2656. trace_i915_gem_object_change_domain(obj,
  2657. old_read_domains,
  2658. obj->write_domain);
  2659. }
  2660. /**
  2661. * Moves the object from a partially CPU read to a full one.
  2662. *
  2663. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2664. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2665. */
  2666. static void
  2667. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  2668. {
  2669. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2670. if (!obj_priv->page_cpu_valid)
  2671. return;
  2672. /* If we're partially in the CPU read domain, finish moving it in.
  2673. */
  2674. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  2675. int i;
  2676. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  2677. if (obj_priv->page_cpu_valid[i])
  2678. continue;
  2679. drm_clflush_pages(obj_priv->pages + i, 1);
  2680. }
  2681. }
  2682. /* Free the page_cpu_valid mappings which are now stale, whether
  2683. * or not we've got I915_GEM_DOMAIN_CPU.
  2684. */
  2685. kfree(obj_priv->page_cpu_valid);
  2686. obj_priv->page_cpu_valid = NULL;
  2687. }
  2688. /**
  2689. * Set the CPU read domain on a range of the object.
  2690. *
  2691. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2692. * not entirely valid. The page_cpu_valid member of the object flags which
  2693. * pages have been flushed, and will be respected by
  2694. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2695. * of the whole object.
  2696. *
  2697. * This function returns when the move is complete, including waiting on
  2698. * flushes to occur.
  2699. */
  2700. static int
  2701. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  2702. uint64_t offset, uint64_t size)
  2703. {
  2704. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2705. uint32_t old_read_domains;
  2706. int i, ret;
  2707. if (offset == 0 && size == obj->size)
  2708. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2709. ret = i915_gem_object_flush_gpu_write_domain(obj, false);
  2710. if (ret != 0)
  2711. return ret;
  2712. i915_gem_object_flush_gtt_write_domain(obj);
  2713. /* If we're already fully in the CPU read domain, we're done. */
  2714. if (obj_priv->page_cpu_valid == NULL &&
  2715. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2716. return 0;
  2717. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2718. * newly adding I915_GEM_DOMAIN_CPU
  2719. */
  2720. if (obj_priv->page_cpu_valid == NULL) {
  2721. obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
  2722. GFP_KERNEL);
  2723. if (obj_priv->page_cpu_valid == NULL)
  2724. return -ENOMEM;
  2725. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2726. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  2727. /* Flush the cache on any pages that are still invalid from the CPU's
  2728. * perspective.
  2729. */
  2730. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2731. i++) {
  2732. if (obj_priv->page_cpu_valid[i])
  2733. continue;
  2734. drm_clflush_pages(obj_priv->pages + i, 1);
  2735. obj_priv->page_cpu_valid[i] = 1;
  2736. }
  2737. /* It should now be out of any other write domains, and we can update
  2738. * the domain values for our changes.
  2739. */
  2740. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2741. old_read_domains = obj->read_domains;
  2742. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2743. trace_i915_gem_object_change_domain(obj,
  2744. old_read_domains,
  2745. obj->write_domain);
  2746. return 0;
  2747. }
  2748. /**
  2749. * Pin an object to the GTT and evaluate the relocations landing in it.
  2750. */
  2751. static int
  2752. i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
  2753. struct drm_file *file_priv,
  2754. struct drm_i915_gem_exec_object2 *entry)
  2755. {
  2756. struct drm_device *dev = obj->base.dev;
  2757. drm_i915_private_t *dev_priv = dev->dev_private;
  2758. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2759. struct drm_gem_object *target_obj = NULL;
  2760. uint32_t target_handle = 0;
  2761. int i, ret = 0;
  2762. user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
  2763. for (i = 0; i < entry->relocation_count; i++) {
  2764. struct drm_i915_gem_relocation_entry reloc;
  2765. uint32_t target_offset;
  2766. if (__copy_from_user_inatomic(&reloc,
  2767. user_relocs+i,
  2768. sizeof(reloc))) {
  2769. ret = -EFAULT;
  2770. break;
  2771. }
  2772. if (reloc.target_handle != target_handle) {
  2773. drm_gem_object_unreference(target_obj);
  2774. target_obj = drm_gem_object_lookup(dev, file_priv,
  2775. reloc.target_handle);
  2776. if (target_obj == NULL) {
  2777. ret = -ENOENT;
  2778. break;
  2779. }
  2780. target_handle = reloc.target_handle;
  2781. }
  2782. target_offset = to_intel_bo(target_obj)->gtt_offset;
  2783. #if WATCH_RELOC
  2784. DRM_INFO("%s: obj %p offset %08x target %d "
  2785. "read %08x write %08x gtt %08x "
  2786. "presumed %08x delta %08x\n",
  2787. __func__,
  2788. obj,
  2789. (int) reloc.offset,
  2790. (int) reloc.target_handle,
  2791. (int) reloc.read_domains,
  2792. (int) reloc.write_domain,
  2793. (int) target_offset,
  2794. (int) reloc.presumed_offset,
  2795. reloc.delta);
  2796. #endif
  2797. /* The target buffer should have appeared before us in the
  2798. * exec_object list, so it should have a GTT space bound by now.
  2799. */
  2800. if (target_offset == 0) {
  2801. DRM_ERROR("No GTT space found for object %d\n",
  2802. reloc.target_handle);
  2803. ret = -EINVAL;
  2804. break;
  2805. }
  2806. /* Validate that the target is in a valid r/w GPU domain */
  2807. if (reloc.write_domain & (reloc.write_domain - 1)) {
  2808. DRM_ERROR("reloc with multiple write domains: "
  2809. "obj %p target %d offset %d "
  2810. "read %08x write %08x",
  2811. obj, reloc.target_handle,
  2812. (int) reloc.offset,
  2813. reloc.read_domains,
  2814. reloc.write_domain);
  2815. ret = -EINVAL;
  2816. break;
  2817. }
  2818. if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
  2819. reloc.read_domains & I915_GEM_DOMAIN_CPU) {
  2820. DRM_ERROR("reloc with read/write CPU domains: "
  2821. "obj %p target %d offset %d "
  2822. "read %08x write %08x",
  2823. obj, reloc.target_handle,
  2824. (int) reloc.offset,
  2825. reloc.read_domains,
  2826. reloc.write_domain);
  2827. ret = -EINVAL;
  2828. break;
  2829. }
  2830. if (reloc.write_domain && target_obj->pending_write_domain &&
  2831. reloc.write_domain != target_obj->pending_write_domain) {
  2832. DRM_ERROR("Write domain conflict: "
  2833. "obj %p target %d offset %d "
  2834. "new %08x old %08x\n",
  2835. obj, reloc.target_handle,
  2836. (int) reloc.offset,
  2837. reloc.write_domain,
  2838. target_obj->pending_write_domain);
  2839. ret = -EINVAL;
  2840. break;
  2841. }
  2842. target_obj->pending_read_domains |= reloc.read_domains;
  2843. target_obj->pending_write_domain |= reloc.write_domain;
  2844. /* If the relocation already has the right value in it, no
  2845. * more work needs to be done.
  2846. */
  2847. if (target_offset == reloc.presumed_offset)
  2848. continue;
  2849. /* Check that the relocation address is valid... */
  2850. if (reloc.offset > obj->base.size - 4) {
  2851. DRM_ERROR("Relocation beyond object bounds: "
  2852. "obj %p target %d offset %d size %d.\n",
  2853. obj, reloc.target_handle,
  2854. (int) reloc.offset, (int) obj->base.size);
  2855. ret = -EINVAL;
  2856. break;
  2857. }
  2858. if (reloc.offset & 3) {
  2859. DRM_ERROR("Relocation not 4-byte aligned: "
  2860. "obj %p target %d offset %d.\n",
  2861. obj, reloc.target_handle,
  2862. (int) reloc.offset);
  2863. ret = -EINVAL;
  2864. break;
  2865. }
  2866. /* and points to somewhere within the target object. */
  2867. if (reloc.delta >= target_obj->size) {
  2868. DRM_ERROR("Relocation beyond target object bounds: "
  2869. "obj %p target %d delta %d size %d.\n",
  2870. obj, reloc.target_handle,
  2871. (int) reloc.delta, (int) target_obj->size);
  2872. ret = -EINVAL;
  2873. break;
  2874. }
  2875. reloc.delta += target_offset;
  2876. if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
  2877. uint32_t page_offset = reloc.offset & ~PAGE_MASK;
  2878. char *vaddr;
  2879. vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
  2880. *(uint32_t *)(vaddr + page_offset) = reloc.delta;
  2881. kunmap_atomic(vaddr);
  2882. } else {
  2883. uint32_t __iomem *reloc_entry;
  2884. void __iomem *reloc_page;
  2885. ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
  2886. if (ret)
  2887. break;
  2888. /* Map the page containing the relocation we're going to perform. */
  2889. reloc.offset += obj->gtt_offset;
  2890. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2891. reloc.offset & PAGE_MASK);
  2892. reloc_entry = (uint32_t __iomem *)
  2893. (reloc_page + (reloc.offset & ~PAGE_MASK));
  2894. iowrite32(reloc.delta, reloc_entry);
  2895. io_mapping_unmap_atomic(reloc_page);
  2896. }
  2897. /* and update the user's relocation entry */
  2898. reloc.presumed_offset = target_offset;
  2899. if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
  2900. &reloc.presumed_offset,
  2901. sizeof(reloc.presumed_offset))) {
  2902. ret = -EFAULT;
  2903. break;
  2904. }
  2905. }
  2906. drm_gem_object_unreference(target_obj);
  2907. return ret;
  2908. }
  2909. static int
  2910. i915_gem_execbuffer_pin(struct drm_device *dev,
  2911. struct drm_file *file,
  2912. struct drm_gem_object **object_list,
  2913. struct drm_i915_gem_exec_object2 *exec_list,
  2914. int count)
  2915. {
  2916. struct drm_i915_private *dev_priv = dev->dev_private;
  2917. int ret, i, retry;
  2918. /* attempt to pin all of the buffers into the GTT */
  2919. for (retry = 0; retry < 2; retry++) {
  2920. ret = 0;
  2921. for (i = 0; i < count; i++) {
  2922. struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
  2923. struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
  2924. bool need_fence =
  2925. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  2926. obj->tiling_mode != I915_TILING_NONE;
  2927. /* Check fence reg constraints and rebind if necessary */
  2928. if (need_fence &&
  2929. !i915_gem_object_fence_offset_ok(&obj->base,
  2930. obj->tiling_mode)) {
  2931. ret = i915_gem_object_unbind(&obj->base);
  2932. if (ret)
  2933. break;
  2934. }
  2935. ret = i915_gem_object_pin(&obj->base, entry->alignment);
  2936. if (ret)
  2937. break;
  2938. /*
  2939. * Pre-965 chips need a fence register set up in order
  2940. * to properly handle blits to/from tiled surfaces.
  2941. */
  2942. if (need_fence) {
  2943. ret = i915_gem_object_get_fence_reg(&obj->base, true);
  2944. if (ret) {
  2945. i915_gem_object_unpin(&obj->base);
  2946. break;
  2947. }
  2948. dev_priv->fence_regs[obj->fence_reg].gpu = true;
  2949. }
  2950. entry->offset = obj->gtt_offset;
  2951. }
  2952. while (i--)
  2953. i915_gem_object_unpin(object_list[i]);
  2954. if (ret == 0)
  2955. break;
  2956. if (ret != -ENOSPC || retry)
  2957. return ret;
  2958. ret = i915_gem_evict_everything(dev);
  2959. if (ret)
  2960. return ret;
  2961. }
  2962. return 0;
  2963. }
  2964. /* Throttle our rendering by waiting until the ring has completed our requests
  2965. * emitted over 20 msec ago.
  2966. *
  2967. * Note that if we were to use the current jiffies each time around the loop,
  2968. * we wouldn't escape the function with any frames outstanding if the time to
  2969. * render a frame was over 20ms.
  2970. *
  2971. * This should get us reasonable parallelism between CPU and GPU but also
  2972. * relatively low latency when blocking on a particular request to finish.
  2973. */
  2974. static int
  2975. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  2976. {
  2977. struct drm_i915_private *dev_priv = dev->dev_private;
  2978. struct drm_i915_file_private *file_priv = file->driver_priv;
  2979. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2980. struct drm_i915_gem_request *request;
  2981. struct intel_ring_buffer *ring = NULL;
  2982. u32 seqno = 0;
  2983. int ret;
  2984. spin_lock(&file_priv->mm.lock);
  2985. list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  2986. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2987. break;
  2988. ring = request->ring;
  2989. seqno = request->seqno;
  2990. }
  2991. spin_unlock(&file_priv->mm.lock);
  2992. if (seqno == 0)
  2993. return 0;
  2994. ret = 0;
  2995. if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
  2996. /* And wait for the seqno passing without holding any locks and
  2997. * causing extra latency for others. This is safe as the irq
  2998. * generation is designed to be run atomically and so is
  2999. * lockless.
  3000. */
  3001. ring->user_irq_get(dev, ring);
  3002. ret = wait_event_interruptible(ring->irq_queue,
  3003. i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
  3004. || atomic_read(&dev_priv->mm.wedged));
  3005. ring->user_irq_put(dev, ring);
  3006. if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
  3007. ret = -EIO;
  3008. }
  3009. if (ret == 0)
  3010. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  3011. return ret;
  3012. }
  3013. static int
  3014. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
  3015. uint64_t exec_offset)
  3016. {
  3017. uint32_t exec_start, exec_len;
  3018. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  3019. exec_len = (uint32_t) exec->batch_len;
  3020. if ((exec_start | exec_len) & 0x7)
  3021. return -EINVAL;
  3022. if (!exec_start)
  3023. return -EINVAL;
  3024. return 0;
  3025. }
  3026. static int
  3027. validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
  3028. int count)
  3029. {
  3030. int i;
  3031. for (i = 0; i < count; i++) {
  3032. char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
  3033. size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
  3034. if (!access_ok(VERIFY_READ, ptr, length))
  3035. return -EFAULT;
  3036. /* we may also need to update the presumed offsets */
  3037. if (!access_ok(VERIFY_WRITE, ptr, length))
  3038. return -EFAULT;
  3039. if (fault_in_pages_readable(ptr, length))
  3040. return -EFAULT;
  3041. }
  3042. return 0;
  3043. }
  3044. static int
  3045. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  3046. struct drm_file *file,
  3047. struct drm_i915_gem_execbuffer2 *args,
  3048. struct drm_i915_gem_exec_object2 *exec_list)
  3049. {
  3050. drm_i915_private_t *dev_priv = dev->dev_private;
  3051. struct drm_gem_object **object_list = NULL;
  3052. struct drm_gem_object *batch_obj;
  3053. struct drm_i915_gem_object *obj_priv;
  3054. struct drm_clip_rect *cliprects = NULL;
  3055. struct drm_i915_gem_request *request = NULL;
  3056. int ret, i, flips;
  3057. uint64_t exec_offset;
  3058. struct intel_ring_buffer *ring = NULL;
  3059. ret = i915_gem_check_is_wedged(dev);
  3060. if (ret)
  3061. return ret;
  3062. ret = validate_exec_list(exec_list, args->buffer_count);
  3063. if (ret)
  3064. return ret;
  3065. #if WATCH_EXEC
  3066. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3067. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3068. #endif
  3069. switch (args->flags & I915_EXEC_RING_MASK) {
  3070. case I915_EXEC_DEFAULT:
  3071. case I915_EXEC_RENDER:
  3072. ring = &dev_priv->render_ring;
  3073. break;
  3074. case I915_EXEC_BSD:
  3075. if (!HAS_BSD(dev)) {
  3076. DRM_ERROR("execbuf with invalid ring (BSD)\n");
  3077. return -EINVAL;
  3078. }
  3079. ring = &dev_priv->bsd_ring;
  3080. break;
  3081. case I915_EXEC_BLT:
  3082. if (!HAS_BLT(dev)) {
  3083. DRM_ERROR("execbuf with invalid ring (BLT)\n");
  3084. return -EINVAL;
  3085. }
  3086. ring = &dev_priv->blt_ring;
  3087. break;
  3088. default:
  3089. DRM_ERROR("execbuf with unknown ring: %d\n",
  3090. (int)(args->flags & I915_EXEC_RING_MASK));
  3091. return -EINVAL;
  3092. }
  3093. if (args->buffer_count < 1) {
  3094. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3095. return -EINVAL;
  3096. }
  3097. object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
  3098. if (object_list == NULL) {
  3099. DRM_ERROR("Failed to allocate object list for %d buffers\n",
  3100. args->buffer_count);
  3101. ret = -ENOMEM;
  3102. goto pre_mutex_err;
  3103. }
  3104. if (args->num_cliprects != 0) {
  3105. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3106. GFP_KERNEL);
  3107. if (cliprects == NULL) {
  3108. ret = -ENOMEM;
  3109. goto pre_mutex_err;
  3110. }
  3111. ret = copy_from_user(cliprects,
  3112. (struct drm_clip_rect __user *)
  3113. (uintptr_t) args->cliprects_ptr,
  3114. sizeof(*cliprects) * args->num_cliprects);
  3115. if (ret != 0) {
  3116. DRM_ERROR("copy %d cliprects failed: %d\n",
  3117. args->num_cliprects, ret);
  3118. ret = -EFAULT;
  3119. goto pre_mutex_err;
  3120. }
  3121. }
  3122. request = kzalloc(sizeof(*request), GFP_KERNEL);
  3123. if (request == NULL) {
  3124. ret = -ENOMEM;
  3125. goto pre_mutex_err;
  3126. }
  3127. ret = i915_mutex_lock_interruptible(dev);
  3128. if (ret)
  3129. goto pre_mutex_err;
  3130. if (dev_priv->mm.suspended) {
  3131. mutex_unlock(&dev->struct_mutex);
  3132. ret = -EBUSY;
  3133. goto pre_mutex_err;
  3134. }
  3135. /* Look up object handles */
  3136. for (i = 0; i < args->buffer_count; i++) {
  3137. object_list[i] = drm_gem_object_lookup(dev, file,
  3138. exec_list[i].handle);
  3139. if (object_list[i] == NULL) {
  3140. DRM_ERROR("Invalid object handle %d at index %d\n",
  3141. exec_list[i].handle, i);
  3142. /* prevent error path from reading uninitialized data */
  3143. args->buffer_count = i + 1;
  3144. ret = -ENOENT;
  3145. goto err;
  3146. }
  3147. obj_priv = to_intel_bo(object_list[i]);
  3148. if (obj_priv->in_execbuffer) {
  3149. DRM_ERROR("Object %p appears more than once in object list\n",
  3150. object_list[i]);
  3151. /* prevent error path from reading uninitialized data */
  3152. args->buffer_count = i + 1;
  3153. ret = -EINVAL;
  3154. goto err;
  3155. }
  3156. obj_priv->in_execbuffer = true;
  3157. }
  3158. /* Move the objects en-masse into the GTT, evicting if necessary. */
  3159. ret = i915_gem_execbuffer_pin(dev, file,
  3160. object_list, exec_list,
  3161. args->buffer_count);
  3162. if (ret)
  3163. goto err;
  3164. /* The objects are in their final locations, apply the relocations. */
  3165. for (i = 0; i < args->buffer_count; i++) {
  3166. struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
  3167. obj->base.pending_read_domains = 0;
  3168. obj->base.pending_write_domain = 0;
  3169. ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
  3170. if (ret)
  3171. goto err;
  3172. }
  3173. /* Set the pending read domains for the batch buffer to COMMAND */
  3174. batch_obj = object_list[args->buffer_count-1];
  3175. if (batch_obj->pending_write_domain) {
  3176. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3177. ret = -EINVAL;
  3178. goto err;
  3179. }
  3180. batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3181. /* Sanity check the batch buffer */
  3182. exec_offset = to_intel_bo(batch_obj)->gtt_offset;
  3183. ret = i915_gem_check_execbuffer(args, exec_offset);
  3184. if (ret != 0) {
  3185. DRM_ERROR("execbuf with invalid offset/length\n");
  3186. goto err;
  3187. }
  3188. /* Zero the global flush/invalidate flags. These
  3189. * will be modified as new domains are computed
  3190. * for each object
  3191. */
  3192. dev->invalidate_domains = 0;
  3193. dev->flush_domains = 0;
  3194. dev_priv->mm.flush_rings = 0;
  3195. for (i = 0; i < args->buffer_count; i++) {
  3196. struct drm_gem_object *obj = object_list[i];
  3197. /* Compute new gpu domains and update invalidate/flush */
  3198. i915_gem_object_set_to_gpu_domain(obj, ring);
  3199. }
  3200. if (dev->invalidate_domains | dev->flush_domains) {
  3201. #if WATCH_EXEC
  3202. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3203. __func__,
  3204. dev->invalidate_domains,
  3205. dev->flush_domains);
  3206. #endif
  3207. i915_gem_flush(dev, file,
  3208. dev->invalidate_domains,
  3209. dev->flush_domains,
  3210. dev_priv->mm.flush_rings);
  3211. }
  3212. for (i = 0; i < args->buffer_count; i++) {
  3213. struct drm_gem_object *obj = object_list[i];
  3214. uint32_t old_write_domain = obj->write_domain;
  3215. obj->write_domain = obj->pending_write_domain;
  3216. trace_i915_gem_object_change_domain(obj,
  3217. obj->read_domains,
  3218. old_write_domain);
  3219. }
  3220. #if WATCH_COHERENCY
  3221. for (i = 0; i < args->buffer_count; i++) {
  3222. i915_gem_object_check_coherency(object_list[i],
  3223. exec_list[i].handle);
  3224. }
  3225. #endif
  3226. #if WATCH_EXEC
  3227. i915_gem_dump_object(batch_obj,
  3228. args->batch_len,
  3229. __func__,
  3230. ~0);
  3231. #endif
  3232. /* Check for any pending flips. As we only maintain a flip queue depth
  3233. * of 1, we can simply insert a WAIT for the next display flip prior
  3234. * to executing the batch and avoid stalling the CPU.
  3235. */
  3236. flips = 0;
  3237. for (i = 0; i < args->buffer_count; i++) {
  3238. if (object_list[i]->write_domain)
  3239. flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
  3240. }
  3241. if (flips) {
  3242. int plane, flip_mask;
  3243. for (plane = 0; flips >> plane; plane++) {
  3244. if (((flips >> plane) & 1) == 0)
  3245. continue;
  3246. if (plane)
  3247. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  3248. else
  3249. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  3250. intel_ring_begin(dev, ring, 2);
  3251. intel_ring_emit(dev, ring,
  3252. MI_WAIT_FOR_EVENT | flip_mask);
  3253. intel_ring_emit(dev, ring, MI_NOOP);
  3254. intel_ring_advance(dev, ring);
  3255. }
  3256. }
  3257. /* Exec the batchbuffer */
  3258. ret = ring->dispatch_gem_execbuffer(dev, ring, args,
  3259. cliprects, exec_offset);
  3260. if (ret) {
  3261. DRM_ERROR("dispatch failed %d\n", ret);
  3262. goto err;
  3263. }
  3264. /*
  3265. * Ensure that the commands in the batch buffer are
  3266. * finished before the interrupt fires
  3267. */
  3268. i915_retire_commands(dev, ring);
  3269. for (i = 0; i < args->buffer_count; i++) {
  3270. struct drm_gem_object *obj = object_list[i];
  3271. i915_gem_object_move_to_active(obj, ring);
  3272. if (obj->write_domain)
  3273. list_move_tail(&to_intel_bo(obj)->gpu_write_list,
  3274. &ring->gpu_write_list);
  3275. }
  3276. i915_add_request(dev, file, request, ring);
  3277. request = NULL;
  3278. err:
  3279. for (i = 0; i < args->buffer_count; i++) {
  3280. if (object_list[i]) {
  3281. obj_priv = to_intel_bo(object_list[i]);
  3282. obj_priv->in_execbuffer = false;
  3283. }
  3284. drm_gem_object_unreference(object_list[i]);
  3285. }
  3286. mutex_unlock(&dev->struct_mutex);
  3287. pre_mutex_err:
  3288. drm_free_large(object_list);
  3289. kfree(cliprects);
  3290. kfree(request);
  3291. return ret;
  3292. }
  3293. /*
  3294. * Legacy execbuffer just creates an exec2 list from the original exec object
  3295. * list array and passes it to the real function.
  3296. */
  3297. int
  3298. i915_gem_execbuffer(struct drm_device *dev, void *data,
  3299. struct drm_file *file_priv)
  3300. {
  3301. struct drm_i915_gem_execbuffer *args = data;
  3302. struct drm_i915_gem_execbuffer2 exec2;
  3303. struct drm_i915_gem_exec_object *exec_list = NULL;
  3304. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3305. int ret, i;
  3306. #if WATCH_EXEC
  3307. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3308. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3309. #endif
  3310. if (args->buffer_count < 1) {
  3311. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3312. return -EINVAL;
  3313. }
  3314. /* Copy in the exec list from userland */
  3315. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  3316. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3317. if (exec_list == NULL || exec2_list == NULL) {
  3318. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3319. args->buffer_count);
  3320. drm_free_large(exec_list);
  3321. drm_free_large(exec2_list);
  3322. return -ENOMEM;
  3323. }
  3324. ret = copy_from_user(exec_list,
  3325. (struct drm_i915_relocation_entry __user *)
  3326. (uintptr_t) args->buffers_ptr,
  3327. sizeof(*exec_list) * args->buffer_count);
  3328. if (ret != 0) {
  3329. DRM_ERROR("copy %d exec entries failed %d\n",
  3330. args->buffer_count, ret);
  3331. drm_free_large(exec_list);
  3332. drm_free_large(exec2_list);
  3333. return -EFAULT;
  3334. }
  3335. for (i = 0; i < args->buffer_count; i++) {
  3336. exec2_list[i].handle = exec_list[i].handle;
  3337. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  3338. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  3339. exec2_list[i].alignment = exec_list[i].alignment;
  3340. exec2_list[i].offset = exec_list[i].offset;
  3341. if (INTEL_INFO(dev)->gen < 4)
  3342. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  3343. else
  3344. exec2_list[i].flags = 0;
  3345. }
  3346. exec2.buffers_ptr = args->buffers_ptr;
  3347. exec2.buffer_count = args->buffer_count;
  3348. exec2.batch_start_offset = args->batch_start_offset;
  3349. exec2.batch_len = args->batch_len;
  3350. exec2.DR1 = args->DR1;
  3351. exec2.DR4 = args->DR4;
  3352. exec2.num_cliprects = args->num_cliprects;
  3353. exec2.cliprects_ptr = args->cliprects_ptr;
  3354. exec2.flags = I915_EXEC_RENDER;
  3355. ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
  3356. if (!ret) {
  3357. /* Copy the new buffer offsets back to the user's exec list. */
  3358. for (i = 0; i < args->buffer_count; i++)
  3359. exec_list[i].offset = exec2_list[i].offset;
  3360. /* ... and back out to userspace */
  3361. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3362. (uintptr_t) args->buffers_ptr,
  3363. exec_list,
  3364. sizeof(*exec_list) * args->buffer_count);
  3365. if (ret) {
  3366. ret = -EFAULT;
  3367. DRM_ERROR("failed to copy %d exec entries "
  3368. "back to user (%d)\n",
  3369. args->buffer_count, ret);
  3370. }
  3371. }
  3372. drm_free_large(exec_list);
  3373. drm_free_large(exec2_list);
  3374. return ret;
  3375. }
  3376. int
  3377. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  3378. struct drm_file *file_priv)
  3379. {
  3380. struct drm_i915_gem_execbuffer2 *args = data;
  3381. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3382. int ret;
  3383. #if WATCH_EXEC
  3384. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3385. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3386. #endif
  3387. if (args->buffer_count < 1) {
  3388. DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
  3389. return -EINVAL;
  3390. }
  3391. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3392. if (exec2_list == NULL) {
  3393. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3394. args->buffer_count);
  3395. return -ENOMEM;
  3396. }
  3397. ret = copy_from_user(exec2_list,
  3398. (struct drm_i915_relocation_entry __user *)
  3399. (uintptr_t) args->buffers_ptr,
  3400. sizeof(*exec2_list) * args->buffer_count);
  3401. if (ret != 0) {
  3402. DRM_ERROR("copy %d exec entries failed %d\n",
  3403. args->buffer_count, ret);
  3404. drm_free_large(exec2_list);
  3405. return -EFAULT;
  3406. }
  3407. ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
  3408. if (!ret) {
  3409. /* Copy the new buffer offsets back to the user's exec list. */
  3410. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3411. (uintptr_t) args->buffers_ptr,
  3412. exec2_list,
  3413. sizeof(*exec2_list) * args->buffer_count);
  3414. if (ret) {
  3415. ret = -EFAULT;
  3416. DRM_ERROR("failed to copy %d exec entries "
  3417. "back to user (%d)\n",
  3418. args->buffer_count, ret);
  3419. }
  3420. }
  3421. drm_free_large(exec2_list);
  3422. return ret;
  3423. }
  3424. int
  3425. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  3426. {
  3427. struct drm_device *dev = obj->dev;
  3428. struct drm_i915_private *dev_priv = dev->dev_private;
  3429. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3430. int ret;
  3431. BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
  3432. WARN_ON(i915_verify_lists(dev));
  3433. if (obj_priv->gtt_space != NULL) {
  3434. if (alignment == 0)
  3435. alignment = i915_gem_get_gtt_alignment(obj);
  3436. if (obj_priv->gtt_offset & (alignment - 1)) {
  3437. WARN(obj_priv->pin_count,
  3438. "bo is already pinned with incorrect alignment:"
  3439. " offset=%x, req.alignment=%x\n",
  3440. obj_priv->gtt_offset, alignment);
  3441. ret = i915_gem_object_unbind(obj);
  3442. if (ret)
  3443. return ret;
  3444. }
  3445. }
  3446. if (obj_priv->gtt_space == NULL) {
  3447. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  3448. if (ret)
  3449. return ret;
  3450. }
  3451. obj_priv->pin_count++;
  3452. /* If the object is not active and not pending a flush,
  3453. * remove it from the inactive list
  3454. */
  3455. if (obj_priv->pin_count == 1) {
  3456. i915_gem_info_add_pin(dev_priv, obj->size);
  3457. if (!obj_priv->active)
  3458. list_move_tail(&obj_priv->mm_list,
  3459. &dev_priv->mm.pinned_list);
  3460. }
  3461. WARN_ON(i915_verify_lists(dev));
  3462. return 0;
  3463. }
  3464. void
  3465. i915_gem_object_unpin(struct drm_gem_object *obj)
  3466. {
  3467. struct drm_device *dev = obj->dev;
  3468. drm_i915_private_t *dev_priv = dev->dev_private;
  3469. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3470. WARN_ON(i915_verify_lists(dev));
  3471. obj_priv->pin_count--;
  3472. BUG_ON(obj_priv->pin_count < 0);
  3473. BUG_ON(obj_priv->gtt_space == NULL);
  3474. /* If the object is no longer pinned, and is
  3475. * neither active nor being flushed, then stick it on
  3476. * the inactive list
  3477. */
  3478. if (obj_priv->pin_count == 0) {
  3479. if (!obj_priv->active)
  3480. list_move_tail(&obj_priv->mm_list,
  3481. &dev_priv->mm.inactive_list);
  3482. i915_gem_info_remove_pin(dev_priv, obj->size);
  3483. }
  3484. WARN_ON(i915_verify_lists(dev));
  3485. }
  3486. int
  3487. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3488. struct drm_file *file_priv)
  3489. {
  3490. struct drm_i915_gem_pin *args = data;
  3491. struct drm_gem_object *obj;
  3492. struct drm_i915_gem_object *obj_priv;
  3493. int ret;
  3494. ret = i915_mutex_lock_interruptible(dev);
  3495. if (ret)
  3496. return ret;
  3497. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3498. if (obj == NULL) {
  3499. ret = -ENOENT;
  3500. goto unlock;
  3501. }
  3502. obj_priv = to_intel_bo(obj);
  3503. if (obj_priv->madv != I915_MADV_WILLNEED) {
  3504. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3505. ret = -EINVAL;
  3506. goto out;
  3507. }
  3508. if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
  3509. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3510. args->handle);
  3511. ret = -EINVAL;
  3512. goto out;
  3513. }
  3514. obj_priv->user_pin_count++;
  3515. obj_priv->pin_filp = file_priv;
  3516. if (obj_priv->user_pin_count == 1) {
  3517. ret = i915_gem_object_pin(obj, args->alignment);
  3518. if (ret)
  3519. goto out;
  3520. }
  3521. /* XXX - flush the CPU caches for pinned objects
  3522. * as the X server doesn't manage domains yet
  3523. */
  3524. i915_gem_object_flush_cpu_write_domain(obj);
  3525. args->offset = obj_priv->gtt_offset;
  3526. out:
  3527. drm_gem_object_unreference(obj);
  3528. unlock:
  3529. mutex_unlock(&dev->struct_mutex);
  3530. return ret;
  3531. }
  3532. int
  3533. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3534. struct drm_file *file_priv)
  3535. {
  3536. struct drm_i915_gem_pin *args = data;
  3537. struct drm_gem_object *obj;
  3538. struct drm_i915_gem_object *obj_priv;
  3539. int ret;
  3540. ret = i915_mutex_lock_interruptible(dev);
  3541. if (ret)
  3542. return ret;
  3543. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3544. if (obj == NULL) {
  3545. ret = -ENOENT;
  3546. goto unlock;
  3547. }
  3548. obj_priv = to_intel_bo(obj);
  3549. if (obj_priv->pin_filp != file_priv) {
  3550. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3551. args->handle);
  3552. ret = -EINVAL;
  3553. goto out;
  3554. }
  3555. obj_priv->user_pin_count--;
  3556. if (obj_priv->user_pin_count == 0) {
  3557. obj_priv->pin_filp = NULL;
  3558. i915_gem_object_unpin(obj);
  3559. }
  3560. out:
  3561. drm_gem_object_unreference(obj);
  3562. unlock:
  3563. mutex_unlock(&dev->struct_mutex);
  3564. return ret;
  3565. }
  3566. int
  3567. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3568. struct drm_file *file_priv)
  3569. {
  3570. struct drm_i915_gem_busy *args = data;
  3571. struct drm_gem_object *obj;
  3572. struct drm_i915_gem_object *obj_priv;
  3573. int ret;
  3574. ret = i915_mutex_lock_interruptible(dev);
  3575. if (ret)
  3576. return ret;
  3577. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3578. if (obj == NULL) {
  3579. ret = -ENOENT;
  3580. goto unlock;
  3581. }
  3582. obj_priv = to_intel_bo(obj);
  3583. /* Count all active objects as busy, even if they are currently not used
  3584. * by the gpu. Users of this interface expect objects to eventually
  3585. * become non-busy without any further actions, therefore emit any
  3586. * necessary flushes here.
  3587. */
  3588. args->busy = obj_priv->active;
  3589. if (args->busy) {
  3590. /* Unconditionally flush objects, even when the gpu still uses this
  3591. * object. Userspace calling this function indicates that it wants to
  3592. * use this buffer rather sooner than later, so issuing the required
  3593. * flush earlier is beneficial.
  3594. */
  3595. if (obj->write_domain & I915_GEM_GPU_DOMAINS)
  3596. i915_gem_flush_ring(dev, file_priv,
  3597. obj_priv->ring,
  3598. 0, obj->write_domain);
  3599. /* Update the active list for the hardware's current position.
  3600. * Otherwise this only updates on a delayed timer or when irqs
  3601. * are actually unmasked, and our working set ends up being
  3602. * larger than required.
  3603. */
  3604. i915_gem_retire_requests_ring(dev, obj_priv->ring);
  3605. args->busy = obj_priv->active;
  3606. }
  3607. drm_gem_object_unreference(obj);
  3608. unlock:
  3609. mutex_unlock(&dev->struct_mutex);
  3610. return ret;
  3611. }
  3612. int
  3613. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3614. struct drm_file *file_priv)
  3615. {
  3616. return i915_gem_ring_throttle(dev, file_priv);
  3617. }
  3618. int
  3619. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3620. struct drm_file *file_priv)
  3621. {
  3622. struct drm_i915_gem_madvise *args = data;
  3623. struct drm_gem_object *obj;
  3624. struct drm_i915_gem_object *obj_priv;
  3625. int ret;
  3626. switch (args->madv) {
  3627. case I915_MADV_DONTNEED:
  3628. case I915_MADV_WILLNEED:
  3629. break;
  3630. default:
  3631. return -EINVAL;
  3632. }
  3633. ret = i915_mutex_lock_interruptible(dev);
  3634. if (ret)
  3635. return ret;
  3636. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3637. if (obj == NULL) {
  3638. ret = -ENOENT;
  3639. goto unlock;
  3640. }
  3641. obj_priv = to_intel_bo(obj);
  3642. if (obj_priv->pin_count) {
  3643. ret = -EINVAL;
  3644. goto out;
  3645. }
  3646. if (obj_priv->madv != __I915_MADV_PURGED)
  3647. obj_priv->madv = args->madv;
  3648. /* if the object is no longer bound, discard its backing storage */
  3649. if (i915_gem_object_is_purgeable(obj_priv) &&
  3650. obj_priv->gtt_space == NULL)
  3651. i915_gem_object_truncate(obj);
  3652. args->retained = obj_priv->madv != __I915_MADV_PURGED;
  3653. out:
  3654. drm_gem_object_unreference(obj);
  3655. unlock:
  3656. mutex_unlock(&dev->struct_mutex);
  3657. return ret;
  3658. }
  3659. struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
  3660. size_t size)
  3661. {
  3662. struct drm_i915_private *dev_priv = dev->dev_private;
  3663. struct drm_i915_gem_object *obj;
  3664. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  3665. if (obj == NULL)
  3666. return NULL;
  3667. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3668. kfree(obj);
  3669. return NULL;
  3670. }
  3671. i915_gem_info_add_obj(dev_priv, size);
  3672. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3673. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3674. obj->agp_type = AGP_USER_MEMORY;
  3675. obj->base.driver_private = NULL;
  3676. obj->fence_reg = I915_FENCE_REG_NONE;
  3677. INIT_LIST_HEAD(&obj->mm_list);
  3678. INIT_LIST_HEAD(&obj->ring_list);
  3679. INIT_LIST_HEAD(&obj->gpu_write_list);
  3680. obj->madv = I915_MADV_WILLNEED;
  3681. return &obj->base;
  3682. }
  3683. int i915_gem_init_object(struct drm_gem_object *obj)
  3684. {
  3685. BUG();
  3686. return 0;
  3687. }
  3688. static void i915_gem_free_object_tail(struct drm_gem_object *obj)
  3689. {
  3690. struct drm_device *dev = obj->dev;
  3691. drm_i915_private_t *dev_priv = dev->dev_private;
  3692. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3693. int ret;
  3694. ret = i915_gem_object_unbind(obj);
  3695. if (ret == -ERESTARTSYS) {
  3696. list_move(&obj_priv->mm_list,
  3697. &dev_priv->mm.deferred_free_list);
  3698. return;
  3699. }
  3700. if (obj_priv->mmap_offset)
  3701. i915_gem_free_mmap_offset(obj);
  3702. drm_gem_object_release(obj);
  3703. i915_gem_info_remove_obj(dev_priv, obj->size);
  3704. kfree(obj_priv->page_cpu_valid);
  3705. kfree(obj_priv->bit_17);
  3706. kfree(obj_priv);
  3707. }
  3708. void i915_gem_free_object(struct drm_gem_object *obj)
  3709. {
  3710. struct drm_device *dev = obj->dev;
  3711. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3712. trace_i915_gem_object_destroy(obj);
  3713. while (obj_priv->pin_count > 0)
  3714. i915_gem_object_unpin(obj);
  3715. if (obj_priv->phys_obj)
  3716. i915_gem_detach_phys_object(dev, obj);
  3717. i915_gem_free_object_tail(obj);
  3718. }
  3719. int
  3720. i915_gem_idle(struct drm_device *dev)
  3721. {
  3722. drm_i915_private_t *dev_priv = dev->dev_private;
  3723. int ret;
  3724. mutex_lock(&dev->struct_mutex);
  3725. if (dev_priv->mm.suspended) {
  3726. mutex_unlock(&dev->struct_mutex);
  3727. return 0;
  3728. }
  3729. ret = i915_gpu_idle(dev);
  3730. if (ret) {
  3731. mutex_unlock(&dev->struct_mutex);
  3732. return ret;
  3733. }
  3734. /* Under UMS, be paranoid and evict. */
  3735. if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
  3736. ret = i915_gem_evict_inactive(dev);
  3737. if (ret) {
  3738. mutex_unlock(&dev->struct_mutex);
  3739. return ret;
  3740. }
  3741. }
  3742. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3743. * We need to replace this with a semaphore, or something.
  3744. * And not confound mm.suspended!
  3745. */
  3746. dev_priv->mm.suspended = 1;
  3747. del_timer_sync(&dev_priv->hangcheck_timer);
  3748. i915_kernel_lost_context(dev);
  3749. i915_gem_cleanup_ringbuffer(dev);
  3750. mutex_unlock(&dev->struct_mutex);
  3751. /* Cancel the retire work handler, which should be idle now. */
  3752. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3753. return 0;
  3754. }
  3755. /*
  3756. * 965+ support PIPE_CONTROL commands, which provide finer grained control
  3757. * over cache flushing.
  3758. */
  3759. static int
  3760. i915_gem_init_pipe_control(struct drm_device *dev)
  3761. {
  3762. drm_i915_private_t *dev_priv = dev->dev_private;
  3763. struct drm_gem_object *obj;
  3764. struct drm_i915_gem_object *obj_priv;
  3765. int ret;
  3766. obj = i915_gem_alloc_object(dev, 4096);
  3767. if (obj == NULL) {
  3768. DRM_ERROR("Failed to allocate seqno page\n");
  3769. ret = -ENOMEM;
  3770. goto err;
  3771. }
  3772. obj_priv = to_intel_bo(obj);
  3773. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  3774. ret = i915_gem_object_pin(obj, 4096);
  3775. if (ret)
  3776. goto err_unref;
  3777. dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
  3778. dev_priv->seqno_page = kmap(obj_priv->pages[0]);
  3779. if (dev_priv->seqno_page == NULL)
  3780. goto err_unpin;
  3781. dev_priv->seqno_obj = obj;
  3782. memset(dev_priv->seqno_page, 0, PAGE_SIZE);
  3783. return 0;
  3784. err_unpin:
  3785. i915_gem_object_unpin(obj);
  3786. err_unref:
  3787. drm_gem_object_unreference(obj);
  3788. err:
  3789. return ret;
  3790. }
  3791. static void
  3792. i915_gem_cleanup_pipe_control(struct drm_device *dev)
  3793. {
  3794. drm_i915_private_t *dev_priv = dev->dev_private;
  3795. struct drm_gem_object *obj;
  3796. struct drm_i915_gem_object *obj_priv;
  3797. obj = dev_priv->seqno_obj;
  3798. obj_priv = to_intel_bo(obj);
  3799. kunmap(obj_priv->pages[0]);
  3800. i915_gem_object_unpin(obj);
  3801. drm_gem_object_unreference(obj);
  3802. dev_priv->seqno_obj = NULL;
  3803. dev_priv->seqno_page = NULL;
  3804. }
  3805. int
  3806. i915_gem_init_ringbuffer(struct drm_device *dev)
  3807. {
  3808. drm_i915_private_t *dev_priv = dev->dev_private;
  3809. int ret;
  3810. if (HAS_PIPE_CONTROL(dev)) {
  3811. ret = i915_gem_init_pipe_control(dev);
  3812. if (ret)
  3813. return ret;
  3814. }
  3815. ret = intel_init_render_ring_buffer(dev);
  3816. if (ret)
  3817. goto cleanup_pipe_control;
  3818. if (HAS_BSD(dev)) {
  3819. ret = intel_init_bsd_ring_buffer(dev);
  3820. if (ret)
  3821. goto cleanup_render_ring;
  3822. }
  3823. if (HAS_BLT(dev)) {
  3824. ret = intel_init_blt_ring_buffer(dev);
  3825. if (ret)
  3826. goto cleanup_bsd_ring;
  3827. }
  3828. dev_priv->next_seqno = 1;
  3829. return 0;
  3830. cleanup_bsd_ring:
  3831. intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
  3832. cleanup_render_ring:
  3833. intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
  3834. cleanup_pipe_control:
  3835. if (HAS_PIPE_CONTROL(dev))
  3836. i915_gem_cleanup_pipe_control(dev);
  3837. return ret;
  3838. }
  3839. void
  3840. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3841. {
  3842. drm_i915_private_t *dev_priv = dev->dev_private;
  3843. intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
  3844. intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
  3845. intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
  3846. if (HAS_PIPE_CONTROL(dev))
  3847. i915_gem_cleanup_pipe_control(dev);
  3848. }
  3849. int
  3850. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3851. struct drm_file *file_priv)
  3852. {
  3853. drm_i915_private_t *dev_priv = dev->dev_private;
  3854. int ret;
  3855. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3856. return 0;
  3857. if (atomic_read(&dev_priv->mm.wedged)) {
  3858. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3859. atomic_set(&dev_priv->mm.wedged, 0);
  3860. }
  3861. mutex_lock(&dev->struct_mutex);
  3862. dev_priv->mm.suspended = 0;
  3863. ret = i915_gem_init_ringbuffer(dev);
  3864. if (ret != 0) {
  3865. mutex_unlock(&dev->struct_mutex);
  3866. return ret;
  3867. }
  3868. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  3869. BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
  3870. BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
  3871. BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
  3872. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  3873. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  3874. BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
  3875. BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
  3876. BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
  3877. mutex_unlock(&dev->struct_mutex);
  3878. ret = drm_irq_install(dev);
  3879. if (ret)
  3880. goto cleanup_ringbuffer;
  3881. return 0;
  3882. cleanup_ringbuffer:
  3883. mutex_lock(&dev->struct_mutex);
  3884. i915_gem_cleanup_ringbuffer(dev);
  3885. dev_priv->mm.suspended = 1;
  3886. mutex_unlock(&dev->struct_mutex);
  3887. return ret;
  3888. }
  3889. int
  3890. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3891. struct drm_file *file_priv)
  3892. {
  3893. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3894. return 0;
  3895. drm_irq_uninstall(dev);
  3896. return i915_gem_idle(dev);
  3897. }
  3898. void
  3899. i915_gem_lastclose(struct drm_device *dev)
  3900. {
  3901. int ret;
  3902. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3903. return;
  3904. ret = i915_gem_idle(dev);
  3905. if (ret)
  3906. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3907. }
  3908. static void
  3909. init_ring_lists(struct intel_ring_buffer *ring)
  3910. {
  3911. INIT_LIST_HEAD(&ring->active_list);
  3912. INIT_LIST_HEAD(&ring->request_list);
  3913. INIT_LIST_HEAD(&ring->gpu_write_list);
  3914. }
  3915. void
  3916. i915_gem_load(struct drm_device *dev)
  3917. {
  3918. int i;
  3919. drm_i915_private_t *dev_priv = dev->dev_private;
  3920. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  3921. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  3922. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3923. INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
  3924. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3925. INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
  3926. init_ring_lists(&dev_priv->render_ring);
  3927. init_ring_lists(&dev_priv->bsd_ring);
  3928. init_ring_lists(&dev_priv->blt_ring);
  3929. for (i = 0; i < 16; i++)
  3930. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  3931. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3932. i915_gem_retire_work_handler);
  3933. init_completion(&dev_priv->error_completion);
  3934. spin_lock(&shrink_list_lock);
  3935. list_add(&dev_priv->mm.shrink_list, &shrink_list);
  3936. spin_unlock(&shrink_list_lock);
  3937. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  3938. if (IS_GEN3(dev)) {
  3939. u32 tmp = I915_READ(MI_ARB_STATE);
  3940. if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
  3941. /* arb state is a masked write, so set bit + bit in mask */
  3942. tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
  3943. I915_WRITE(MI_ARB_STATE, tmp);
  3944. }
  3945. }
  3946. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3947. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3948. dev_priv->fence_reg_start = 3;
  3949. if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3950. dev_priv->num_fence_regs = 16;
  3951. else
  3952. dev_priv->num_fence_regs = 8;
  3953. /* Initialize fence registers to zero */
  3954. switch (INTEL_INFO(dev)->gen) {
  3955. case 6:
  3956. for (i = 0; i < 16; i++)
  3957. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
  3958. break;
  3959. case 5:
  3960. case 4:
  3961. for (i = 0; i < 16; i++)
  3962. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  3963. break;
  3964. case 3:
  3965. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3966. for (i = 0; i < 8; i++)
  3967. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  3968. case 2:
  3969. for (i = 0; i < 8; i++)
  3970. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  3971. break;
  3972. }
  3973. i915_gem_detect_bit_6_swizzle(dev);
  3974. init_waitqueue_head(&dev_priv->pending_flip_queue);
  3975. }
  3976. /*
  3977. * Create a physically contiguous memory object for this object
  3978. * e.g. for cursor + overlay regs
  3979. */
  3980. static int i915_gem_init_phys_object(struct drm_device *dev,
  3981. int id, int size, int align)
  3982. {
  3983. drm_i915_private_t *dev_priv = dev->dev_private;
  3984. struct drm_i915_gem_phys_object *phys_obj;
  3985. int ret;
  3986. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3987. return 0;
  3988. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3989. if (!phys_obj)
  3990. return -ENOMEM;
  3991. phys_obj->id = id;
  3992. phys_obj->handle = drm_pci_alloc(dev, size, align);
  3993. if (!phys_obj->handle) {
  3994. ret = -ENOMEM;
  3995. goto kfree_obj;
  3996. }
  3997. #ifdef CONFIG_X86
  3998. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3999. #endif
  4000. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  4001. return 0;
  4002. kfree_obj:
  4003. kfree(phys_obj);
  4004. return ret;
  4005. }
  4006. static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  4007. {
  4008. drm_i915_private_t *dev_priv = dev->dev_private;
  4009. struct drm_i915_gem_phys_object *phys_obj;
  4010. if (!dev_priv->mm.phys_objs[id - 1])
  4011. return;
  4012. phys_obj = dev_priv->mm.phys_objs[id - 1];
  4013. if (phys_obj->cur_obj) {
  4014. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  4015. }
  4016. #ifdef CONFIG_X86
  4017. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4018. #endif
  4019. drm_pci_free(dev, phys_obj->handle);
  4020. kfree(phys_obj);
  4021. dev_priv->mm.phys_objs[id - 1] = NULL;
  4022. }
  4023. void i915_gem_free_all_phys_object(struct drm_device *dev)
  4024. {
  4025. int i;
  4026. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  4027. i915_gem_free_phys_object(dev, i);
  4028. }
  4029. void i915_gem_detach_phys_object(struct drm_device *dev,
  4030. struct drm_gem_object *obj)
  4031. {
  4032. struct drm_i915_gem_object *obj_priv;
  4033. int i;
  4034. int ret;
  4035. int page_count;
  4036. obj_priv = to_intel_bo(obj);
  4037. if (!obj_priv->phys_obj)
  4038. return;
  4039. ret = i915_gem_object_get_pages(obj, 0);
  4040. if (ret)
  4041. goto out;
  4042. page_count = obj->size / PAGE_SIZE;
  4043. for (i = 0; i < page_count; i++) {
  4044. char *dst = kmap_atomic(obj_priv->pages[i]);
  4045. char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4046. memcpy(dst, src, PAGE_SIZE);
  4047. kunmap_atomic(dst);
  4048. }
  4049. drm_clflush_pages(obj_priv->pages, page_count);
  4050. drm_agp_chipset_flush(dev);
  4051. i915_gem_object_put_pages(obj);
  4052. out:
  4053. obj_priv->phys_obj->cur_obj = NULL;
  4054. obj_priv->phys_obj = NULL;
  4055. }
  4056. int
  4057. i915_gem_attach_phys_object(struct drm_device *dev,
  4058. struct drm_gem_object *obj,
  4059. int id,
  4060. int align)
  4061. {
  4062. drm_i915_private_t *dev_priv = dev->dev_private;
  4063. struct drm_i915_gem_object *obj_priv;
  4064. int ret = 0;
  4065. int page_count;
  4066. int i;
  4067. if (id > I915_MAX_PHYS_OBJECT)
  4068. return -EINVAL;
  4069. obj_priv = to_intel_bo(obj);
  4070. if (obj_priv->phys_obj) {
  4071. if (obj_priv->phys_obj->id == id)
  4072. return 0;
  4073. i915_gem_detach_phys_object(dev, obj);
  4074. }
  4075. /* create a new object */
  4076. if (!dev_priv->mm.phys_objs[id - 1]) {
  4077. ret = i915_gem_init_phys_object(dev, id,
  4078. obj->size, align);
  4079. if (ret) {
  4080. DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
  4081. goto out;
  4082. }
  4083. }
  4084. /* bind to the object */
  4085. obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4086. obj_priv->phys_obj->cur_obj = obj;
  4087. ret = i915_gem_object_get_pages(obj, 0);
  4088. if (ret) {
  4089. DRM_ERROR("failed to get page list\n");
  4090. goto out;
  4091. }
  4092. page_count = obj->size / PAGE_SIZE;
  4093. for (i = 0; i < page_count; i++) {
  4094. char *src = kmap_atomic(obj_priv->pages[i]);
  4095. char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4096. memcpy(dst, src, PAGE_SIZE);
  4097. kunmap_atomic(src);
  4098. }
  4099. i915_gem_object_put_pages(obj);
  4100. return 0;
  4101. out:
  4102. return ret;
  4103. }
  4104. static int
  4105. i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  4106. struct drm_i915_gem_pwrite *args,
  4107. struct drm_file *file_priv)
  4108. {
  4109. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  4110. void *obj_addr;
  4111. int ret;
  4112. char __user *user_data;
  4113. user_data = (char __user *) (uintptr_t) args->data_ptr;
  4114. obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  4115. DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
  4116. ret = copy_from_user(obj_addr, user_data, args->size);
  4117. if (ret)
  4118. return -EFAULT;
  4119. drm_agp_chipset_flush(dev);
  4120. return 0;
  4121. }
  4122. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  4123. {
  4124. struct drm_i915_file_private *file_priv = file->driver_priv;
  4125. /* Clean up our request list when the client is going away, so that
  4126. * later retire_requests won't dereference our soon-to-be-gone
  4127. * file_priv.
  4128. */
  4129. spin_lock(&file_priv->mm.lock);
  4130. while (!list_empty(&file_priv->mm.request_list)) {
  4131. struct drm_i915_gem_request *request;
  4132. request = list_first_entry(&file_priv->mm.request_list,
  4133. struct drm_i915_gem_request,
  4134. client_list);
  4135. list_del(&request->client_list);
  4136. request->file_priv = NULL;
  4137. }
  4138. spin_unlock(&file_priv->mm.lock);
  4139. }
  4140. static int
  4141. i915_gpu_is_active(struct drm_device *dev)
  4142. {
  4143. drm_i915_private_t *dev_priv = dev->dev_private;
  4144. int lists_empty;
  4145. lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
  4146. list_empty(&dev_priv->render_ring.active_list) &&
  4147. list_empty(&dev_priv->bsd_ring.active_list) &&
  4148. list_empty(&dev_priv->blt_ring.active_list);
  4149. return !lists_empty;
  4150. }
  4151. static int
  4152. i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  4153. {
  4154. drm_i915_private_t *dev_priv, *next_dev;
  4155. struct drm_i915_gem_object *obj_priv, *next_obj;
  4156. int cnt = 0;
  4157. int would_deadlock = 1;
  4158. /* "fast-path" to count number of available objects */
  4159. if (nr_to_scan == 0) {
  4160. spin_lock(&shrink_list_lock);
  4161. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4162. struct drm_device *dev = dev_priv->dev;
  4163. if (mutex_trylock(&dev->struct_mutex)) {
  4164. list_for_each_entry(obj_priv,
  4165. &dev_priv->mm.inactive_list,
  4166. mm_list)
  4167. cnt++;
  4168. mutex_unlock(&dev->struct_mutex);
  4169. }
  4170. }
  4171. spin_unlock(&shrink_list_lock);
  4172. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4173. }
  4174. spin_lock(&shrink_list_lock);
  4175. rescan:
  4176. /* first scan for clean buffers */
  4177. list_for_each_entry_safe(dev_priv, next_dev,
  4178. &shrink_list, mm.shrink_list) {
  4179. struct drm_device *dev = dev_priv->dev;
  4180. if (! mutex_trylock(&dev->struct_mutex))
  4181. continue;
  4182. spin_unlock(&shrink_list_lock);
  4183. i915_gem_retire_requests(dev);
  4184. list_for_each_entry_safe(obj_priv, next_obj,
  4185. &dev_priv->mm.inactive_list,
  4186. mm_list) {
  4187. if (i915_gem_object_is_purgeable(obj_priv)) {
  4188. i915_gem_object_unbind(&obj_priv->base);
  4189. if (--nr_to_scan <= 0)
  4190. break;
  4191. }
  4192. }
  4193. spin_lock(&shrink_list_lock);
  4194. mutex_unlock(&dev->struct_mutex);
  4195. would_deadlock = 0;
  4196. if (nr_to_scan <= 0)
  4197. break;
  4198. }
  4199. /* second pass, evict/count anything still on the inactive list */
  4200. list_for_each_entry_safe(dev_priv, next_dev,
  4201. &shrink_list, mm.shrink_list) {
  4202. struct drm_device *dev = dev_priv->dev;
  4203. if (! mutex_trylock(&dev->struct_mutex))
  4204. continue;
  4205. spin_unlock(&shrink_list_lock);
  4206. list_for_each_entry_safe(obj_priv, next_obj,
  4207. &dev_priv->mm.inactive_list,
  4208. mm_list) {
  4209. if (nr_to_scan > 0) {
  4210. i915_gem_object_unbind(&obj_priv->base);
  4211. nr_to_scan--;
  4212. } else
  4213. cnt++;
  4214. }
  4215. spin_lock(&shrink_list_lock);
  4216. mutex_unlock(&dev->struct_mutex);
  4217. would_deadlock = 0;
  4218. }
  4219. if (nr_to_scan) {
  4220. int active = 0;
  4221. /*
  4222. * We are desperate for pages, so as a last resort, wait
  4223. * for the GPU to finish and discard whatever we can.
  4224. * This has a dramatic impact to reduce the number of
  4225. * OOM-killer events whilst running the GPU aggressively.
  4226. */
  4227. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4228. struct drm_device *dev = dev_priv->dev;
  4229. if (!mutex_trylock(&dev->struct_mutex))
  4230. continue;
  4231. spin_unlock(&shrink_list_lock);
  4232. if (i915_gpu_is_active(dev)) {
  4233. i915_gpu_idle(dev);
  4234. active++;
  4235. }
  4236. spin_lock(&shrink_list_lock);
  4237. mutex_unlock(&dev->struct_mutex);
  4238. }
  4239. if (active)
  4240. goto rescan;
  4241. }
  4242. spin_unlock(&shrink_list_lock);
  4243. if (would_deadlock)
  4244. return -1;
  4245. else if (cnt > 0)
  4246. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4247. else
  4248. return 0;
  4249. }
  4250. static struct shrinker shrinker = {
  4251. .shrink = i915_gem_shrink,
  4252. .seeks = DEFAULT_SEEKS,
  4253. };
  4254. __init void
  4255. i915_gem_shrinker_init(void)
  4256. {
  4257. register_shrinker(&shrinker);
  4258. }
  4259. __exit void
  4260. i915_gem_shrinker_exit(void)
  4261. {
  4262. unregister_shrinker(&shrinker);
  4263. }