i915_gem.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/swap.h>
  34. #include <linux/pci.h>
  35. #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  36. static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
  37. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  38. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  39. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  40. int write);
  41. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  42. uint64_t offset,
  43. uint64_t size);
  44. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  45. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  46. static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
  47. unsigned alignment);
  48. static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
  49. static int i915_gem_evict_something(struct drm_device *dev, int min_size);
  50. static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
  51. static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  52. struct drm_i915_gem_pwrite *args,
  53. struct drm_file *file_priv);
  54. static LIST_HEAD(shrink_list);
  55. static DEFINE_SPINLOCK(shrink_list_lock);
  56. int i915_gem_do_init(struct drm_device *dev, unsigned long start,
  57. unsigned long end)
  58. {
  59. drm_i915_private_t *dev_priv = dev->dev_private;
  60. if (start >= end ||
  61. (start & (PAGE_SIZE - 1)) != 0 ||
  62. (end & (PAGE_SIZE - 1)) != 0) {
  63. return -EINVAL;
  64. }
  65. drm_mm_init(&dev_priv->mm.gtt_space, start,
  66. end - start);
  67. dev->gtt_total = (uint32_t) (end - start);
  68. return 0;
  69. }
  70. int
  71. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  72. struct drm_file *file_priv)
  73. {
  74. struct drm_i915_gem_init *args = data;
  75. int ret;
  76. mutex_lock(&dev->struct_mutex);
  77. ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
  78. mutex_unlock(&dev->struct_mutex);
  79. return ret;
  80. }
  81. int
  82. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  83. struct drm_file *file_priv)
  84. {
  85. struct drm_i915_gem_get_aperture *args = data;
  86. if (!(dev->driver->driver_features & DRIVER_GEM))
  87. return -ENODEV;
  88. args->aper_size = dev->gtt_total;
  89. args->aper_available_size = (args->aper_size -
  90. atomic_read(&dev->pin_memory));
  91. return 0;
  92. }
  93. /**
  94. * Creates a new mm object and returns a handle to it.
  95. */
  96. int
  97. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  98. struct drm_file *file_priv)
  99. {
  100. struct drm_i915_gem_create *args = data;
  101. struct drm_gem_object *obj;
  102. int ret;
  103. u32 handle;
  104. args->size = roundup(args->size, PAGE_SIZE);
  105. /* Allocate the new object */
  106. obj = drm_gem_object_alloc(dev, args->size);
  107. if (obj == NULL)
  108. return -ENOMEM;
  109. ret = drm_gem_handle_create(file_priv, obj, &handle);
  110. mutex_lock(&dev->struct_mutex);
  111. drm_gem_object_handle_unreference(obj);
  112. mutex_unlock(&dev->struct_mutex);
  113. if (ret)
  114. return ret;
  115. args->handle = handle;
  116. return 0;
  117. }
  118. static inline int
  119. fast_shmem_read(struct page **pages,
  120. loff_t page_base, int page_offset,
  121. char __user *data,
  122. int length)
  123. {
  124. char __iomem *vaddr;
  125. int unwritten;
  126. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  127. if (vaddr == NULL)
  128. return -ENOMEM;
  129. unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
  130. kunmap_atomic(vaddr, KM_USER0);
  131. if (unwritten)
  132. return -EFAULT;
  133. return 0;
  134. }
  135. static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
  136. {
  137. drm_i915_private_t *dev_priv = obj->dev->dev_private;
  138. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  139. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  140. obj_priv->tiling_mode != I915_TILING_NONE;
  141. }
  142. static inline int
  143. slow_shmem_copy(struct page *dst_page,
  144. int dst_offset,
  145. struct page *src_page,
  146. int src_offset,
  147. int length)
  148. {
  149. char *dst_vaddr, *src_vaddr;
  150. dst_vaddr = kmap_atomic(dst_page, KM_USER0);
  151. if (dst_vaddr == NULL)
  152. return -ENOMEM;
  153. src_vaddr = kmap_atomic(src_page, KM_USER1);
  154. if (src_vaddr == NULL) {
  155. kunmap_atomic(dst_vaddr, KM_USER0);
  156. return -ENOMEM;
  157. }
  158. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  159. kunmap_atomic(src_vaddr, KM_USER1);
  160. kunmap_atomic(dst_vaddr, KM_USER0);
  161. return 0;
  162. }
  163. static inline int
  164. slow_shmem_bit17_copy(struct page *gpu_page,
  165. int gpu_offset,
  166. struct page *cpu_page,
  167. int cpu_offset,
  168. int length,
  169. int is_read)
  170. {
  171. char *gpu_vaddr, *cpu_vaddr;
  172. /* Use the unswizzled path if this page isn't affected. */
  173. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  174. if (is_read)
  175. return slow_shmem_copy(cpu_page, cpu_offset,
  176. gpu_page, gpu_offset, length);
  177. else
  178. return slow_shmem_copy(gpu_page, gpu_offset,
  179. cpu_page, cpu_offset, length);
  180. }
  181. gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
  182. if (gpu_vaddr == NULL)
  183. return -ENOMEM;
  184. cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
  185. if (cpu_vaddr == NULL) {
  186. kunmap_atomic(gpu_vaddr, KM_USER0);
  187. return -ENOMEM;
  188. }
  189. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  190. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  191. */
  192. while (length > 0) {
  193. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  194. int this_length = min(cacheline_end - gpu_offset, length);
  195. int swizzled_gpu_offset = gpu_offset ^ 64;
  196. if (is_read) {
  197. memcpy(cpu_vaddr + cpu_offset,
  198. gpu_vaddr + swizzled_gpu_offset,
  199. this_length);
  200. } else {
  201. memcpy(gpu_vaddr + swizzled_gpu_offset,
  202. cpu_vaddr + cpu_offset,
  203. this_length);
  204. }
  205. cpu_offset += this_length;
  206. gpu_offset += this_length;
  207. length -= this_length;
  208. }
  209. kunmap_atomic(cpu_vaddr, KM_USER1);
  210. kunmap_atomic(gpu_vaddr, KM_USER0);
  211. return 0;
  212. }
  213. /**
  214. * This is the fast shmem pread path, which attempts to copy_from_user directly
  215. * from the backing pages of the object to the user's address space. On a
  216. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  217. */
  218. static int
  219. i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  220. struct drm_i915_gem_pread *args,
  221. struct drm_file *file_priv)
  222. {
  223. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  224. ssize_t remain;
  225. loff_t offset, page_base;
  226. char __user *user_data;
  227. int page_offset, page_length;
  228. int ret;
  229. user_data = (char __user *) (uintptr_t) args->data_ptr;
  230. remain = args->size;
  231. mutex_lock(&dev->struct_mutex);
  232. ret = i915_gem_object_get_pages(obj);
  233. if (ret != 0)
  234. goto fail_unlock;
  235. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  236. args->size);
  237. if (ret != 0)
  238. goto fail_put_pages;
  239. obj_priv = obj->driver_private;
  240. offset = args->offset;
  241. while (remain > 0) {
  242. /* Operation in this page
  243. *
  244. * page_base = page offset within aperture
  245. * page_offset = offset within page
  246. * page_length = bytes to copy for this page
  247. */
  248. page_base = (offset & ~(PAGE_SIZE-1));
  249. page_offset = offset & (PAGE_SIZE-1);
  250. page_length = remain;
  251. if ((page_offset + remain) > PAGE_SIZE)
  252. page_length = PAGE_SIZE - page_offset;
  253. ret = fast_shmem_read(obj_priv->pages,
  254. page_base, page_offset,
  255. user_data, page_length);
  256. if (ret)
  257. goto fail_put_pages;
  258. remain -= page_length;
  259. user_data += page_length;
  260. offset += page_length;
  261. }
  262. fail_put_pages:
  263. i915_gem_object_put_pages(obj);
  264. fail_unlock:
  265. mutex_unlock(&dev->struct_mutex);
  266. return ret;
  267. }
  268. static inline gfp_t
  269. i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
  270. {
  271. return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
  272. }
  273. static inline void
  274. i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
  275. {
  276. mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
  277. }
  278. static int
  279. i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  280. {
  281. int ret;
  282. ret = i915_gem_object_get_pages(obj);
  283. /* If we've insufficient memory to map in the pages, attempt
  284. * to make some space by throwing out some old buffers.
  285. */
  286. if (ret == -ENOMEM) {
  287. struct drm_device *dev = obj->dev;
  288. gfp_t gfp;
  289. ret = i915_gem_evict_something(dev, obj->size);
  290. if (ret)
  291. return ret;
  292. gfp = i915_gem_object_get_page_gfp_mask(obj);
  293. i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
  294. ret = i915_gem_object_get_pages(obj);
  295. i915_gem_object_set_page_gfp_mask (obj, gfp);
  296. }
  297. return ret;
  298. }
  299. /**
  300. * This is the fallback shmem pread path, which allocates temporary storage
  301. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  302. * can copy out of the object's backing pages while holding the struct mutex
  303. * and not take page faults.
  304. */
  305. static int
  306. i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
  307. struct drm_i915_gem_pread *args,
  308. struct drm_file *file_priv)
  309. {
  310. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  311. struct mm_struct *mm = current->mm;
  312. struct page **user_pages;
  313. ssize_t remain;
  314. loff_t offset, pinned_pages, i;
  315. loff_t first_data_page, last_data_page, num_pages;
  316. int shmem_page_index, shmem_page_offset;
  317. int data_page_index, data_page_offset;
  318. int page_length;
  319. int ret;
  320. uint64_t data_ptr = args->data_ptr;
  321. int do_bit17_swizzling;
  322. remain = args->size;
  323. /* Pin the user pages containing the data. We can't fault while
  324. * holding the struct mutex, yet we want to hold it while
  325. * dereferencing the user data.
  326. */
  327. first_data_page = data_ptr / PAGE_SIZE;
  328. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  329. num_pages = last_data_page - first_data_page + 1;
  330. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  331. if (user_pages == NULL)
  332. return -ENOMEM;
  333. down_read(&mm->mmap_sem);
  334. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  335. num_pages, 1, 0, user_pages, NULL);
  336. up_read(&mm->mmap_sem);
  337. if (pinned_pages < num_pages) {
  338. ret = -EFAULT;
  339. goto fail_put_user_pages;
  340. }
  341. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  342. mutex_lock(&dev->struct_mutex);
  343. ret = i915_gem_object_get_pages_or_evict(obj);
  344. if (ret)
  345. goto fail_unlock;
  346. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  347. args->size);
  348. if (ret != 0)
  349. goto fail_put_pages;
  350. obj_priv = obj->driver_private;
  351. offset = args->offset;
  352. while (remain > 0) {
  353. /* Operation in this page
  354. *
  355. * shmem_page_index = page number within shmem file
  356. * shmem_page_offset = offset within page in shmem file
  357. * data_page_index = page number in get_user_pages return
  358. * data_page_offset = offset with data_page_index page.
  359. * page_length = bytes to copy for this page
  360. */
  361. shmem_page_index = offset / PAGE_SIZE;
  362. shmem_page_offset = offset & ~PAGE_MASK;
  363. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  364. data_page_offset = data_ptr & ~PAGE_MASK;
  365. page_length = remain;
  366. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  367. page_length = PAGE_SIZE - shmem_page_offset;
  368. if ((data_page_offset + page_length) > PAGE_SIZE)
  369. page_length = PAGE_SIZE - data_page_offset;
  370. if (do_bit17_swizzling) {
  371. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  372. shmem_page_offset,
  373. user_pages[data_page_index],
  374. data_page_offset,
  375. page_length,
  376. 1);
  377. } else {
  378. ret = slow_shmem_copy(user_pages[data_page_index],
  379. data_page_offset,
  380. obj_priv->pages[shmem_page_index],
  381. shmem_page_offset,
  382. page_length);
  383. }
  384. if (ret)
  385. goto fail_put_pages;
  386. remain -= page_length;
  387. data_ptr += page_length;
  388. offset += page_length;
  389. }
  390. fail_put_pages:
  391. i915_gem_object_put_pages(obj);
  392. fail_unlock:
  393. mutex_unlock(&dev->struct_mutex);
  394. fail_put_user_pages:
  395. for (i = 0; i < pinned_pages; i++) {
  396. SetPageDirty(user_pages[i]);
  397. page_cache_release(user_pages[i]);
  398. }
  399. drm_free_large(user_pages);
  400. return ret;
  401. }
  402. /**
  403. * Reads data from the object referenced by handle.
  404. *
  405. * On error, the contents of *data are undefined.
  406. */
  407. int
  408. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  409. struct drm_file *file_priv)
  410. {
  411. struct drm_i915_gem_pread *args = data;
  412. struct drm_gem_object *obj;
  413. struct drm_i915_gem_object *obj_priv;
  414. int ret;
  415. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  416. if (obj == NULL)
  417. return -EBADF;
  418. obj_priv = obj->driver_private;
  419. /* Bounds check source.
  420. *
  421. * XXX: This could use review for overflow issues...
  422. */
  423. if (args->offset > obj->size || args->size > obj->size ||
  424. args->offset + args->size > obj->size) {
  425. drm_gem_object_unreference(obj);
  426. return -EINVAL;
  427. }
  428. if (i915_gem_object_needs_bit17_swizzle(obj)) {
  429. ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  430. } else {
  431. ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
  432. if (ret != 0)
  433. ret = i915_gem_shmem_pread_slow(dev, obj, args,
  434. file_priv);
  435. }
  436. drm_gem_object_unreference(obj);
  437. return ret;
  438. }
  439. /* This is the fast write path which cannot handle
  440. * page faults in the source data
  441. */
  442. static inline int
  443. fast_user_write(struct io_mapping *mapping,
  444. loff_t page_base, int page_offset,
  445. char __user *user_data,
  446. int length)
  447. {
  448. char *vaddr_atomic;
  449. unsigned long unwritten;
  450. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  451. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  452. user_data, length);
  453. io_mapping_unmap_atomic(vaddr_atomic);
  454. if (unwritten)
  455. return -EFAULT;
  456. return 0;
  457. }
  458. /* Here's the write path which can sleep for
  459. * page faults
  460. */
  461. static inline int
  462. slow_kernel_write(struct io_mapping *mapping,
  463. loff_t gtt_base, int gtt_offset,
  464. struct page *user_page, int user_offset,
  465. int length)
  466. {
  467. char *src_vaddr, *dst_vaddr;
  468. unsigned long unwritten;
  469. dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
  470. src_vaddr = kmap_atomic(user_page, KM_USER1);
  471. unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
  472. src_vaddr + user_offset,
  473. length);
  474. kunmap_atomic(src_vaddr, KM_USER1);
  475. io_mapping_unmap_atomic(dst_vaddr);
  476. if (unwritten)
  477. return -EFAULT;
  478. return 0;
  479. }
  480. static inline int
  481. fast_shmem_write(struct page **pages,
  482. loff_t page_base, int page_offset,
  483. char __user *data,
  484. int length)
  485. {
  486. char __iomem *vaddr;
  487. unsigned long unwritten;
  488. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  489. if (vaddr == NULL)
  490. return -ENOMEM;
  491. unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
  492. kunmap_atomic(vaddr, KM_USER0);
  493. if (unwritten)
  494. return -EFAULT;
  495. return 0;
  496. }
  497. /**
  498. * This is the fast pwrite path, where we copy the data directly from the
  499. * user into the GTT, uncached.
  500. */
  501. static int
  502. i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  503. struct drm_i915_gem_pwrite *args,
  504. struct drm_file *file_priv)
  505. {
  506. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  507. drm_i915_private_t *dev_priv = dev->dev_private;
  508. ssize_t remain;
  509. loff_t offset, page_base;
  510. char __user *user_data;
  511. int page_offset, page_length;
  512. int ret;
  513. user_data = (char __user *) (uintptr_t) args->data_ptr;
  514. remain = args->size;
  515. if (!access_ok(VERIFY_READ, user_data, remain))
  516. return -EFAULT;
  517. mutex_lock(&dev->struct_mutex);
  518. ret = i915_gem_object_pin(obj, 0);
  519. if (ret) {
  520. mutex_unlock(&dev->struct_mutex);
  521. return ret;
  522. }
  523. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  524. if (ret)
  525. goto fail;
  526. obj_priv = obj->driver_private;
  527. offset = obj_priv->gtt_offset + args->offset;
  528. while (remain > 0) {
  529. /* Operation in this page
  530. *
  531. * page_base = page offset within aperture
  532. * page_offset = offset within page
  533. * page_length = bytes to copy for this page
  534. */
  535. page_base = (offset & ~(PAGE_SIZE-1));
  536. page_offset = offset & (PAGE_SIZE-1);
  537. page_length = remain;
  538. if ((page_offset + remain) > PAGE_SIZE)
  539. page_length = PAGE_SIZE - page_offset;
  540. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  541. page_offset, user_data, page_length);
  542. /* If we get a fault while copying data, then (presumably) our
  543. * source page isn't available. Return the error and we'll
  544. * retry in the slow path.
  545. */
  546. if (ret)
  547. goto fail;
  548. remain -= page_length;
  549. user_data += page_length;
  550. offset += page_length;
  551. }
  552. fail:
  553. i915_gem_object_unpin(obj);
  554. mutex_unlock(&dev->struct_mutex);
  555. return ret;
  556. }
  557. /**
  558. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  559. * the memory and maps it using kmap_atomic for copying.
  560. *
  561. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  562. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  563. */
  564. static int
  565. i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  566. struct drm_i915_gem_pwrite *args,
  567. struct drm_file *file_priv)
  568. {
  569. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  570. drm_i915_private_t *dev_priv = dev->dev_private;
  571. ssize_t remain;
  572. loff_t gtt_page_base, offset;
  573. loff_t first_data_page, last_data_page, num_pages;
  574. loff_t pinned_pages, i;
  575. struct page **user_pages;
  576. struct mm_struct *mm = current->mm;
  577. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  578. int ret;
  579. uint64_t data_ptr = args->data_ptr;
  580. remain = args->size;
  581. /* Pin the user pages containing the data. We can't fault while
  582. * holding the struct mutex, and all of the pwrite implementations
  583. * want to hold it while dereferencing the user data.
  584. */
  585. first_data_page = data_ptr / PAGE_SIZE;
  586. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  587. num_pages = last_data_page - first_data_page + 1;
  588. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  589. if (user_pages == NULL)
  590. return -ENOMEM;
  591. down_read(&mm->mmap_sem);
  592. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  593. num_pages, 0, 0, user_pages, NULL);
  594. up_read(&mm->mmap_sem);
  595. if (pinned_pages < num_pages) {
  596. ret = -EFAULT;
  597. goto out_unpin_pages;
  598. }
  599. mutex_lock(&dev->struct_mutex);
  600. ret = i915_gem_object_pin(obj, 0);
  601. if (ret)
  602. goto out_unlock;
  603. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  604. if (ret)
  605. goto out_unpin_object;
  606. obj_priv = obj->driver_private;
  607. offset = obj_priv->gtt_offset + args->offset;
  608. while (remain > 0) {
  609. /* Operation in this page
  610. *
  611. * gtt_page_base = page offset within aperture
  612. * gtt_page_offset = offset within page in aperture
  613. * data_page_index = page number in get_user_pages return
  614. * data_page_offset = offset with data_page_index page.
  615. * page_length = bytes to copy for this page
  616. */
  617. gtt_page_base = offset & PAGE_MASK;
  618. gtt_page_offset = offset & ~PAGE_MASK;
  619. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  620. data_page_offset = data_ptr & ~PAGE_MASK;
  621. page_length = remain;
  622. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  623. page_length = PAGE_SIZE - gtt_page_offset;
  624. if ((data_page_offset + page_length) > PAGE_SIZE)
  625. page_length = PAGE_SIZE - data_page_offset;
  626. ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
  627. gtt_page_base, gtt_page_offset,
  628. user_pages[data_page_index],
  629. data_page_offset,
  630. page_length);
  631. /* If we get a fault while copying data, then (presumably) our
  632. * source page isn't available. Return the error and we'll
  633. * retry in the slow path.
  634. */
  635. if (ret)
  636. goto out_unpin_object;
  637. remain -= page_length;
  638. offset += page_length;
  639. data_ptr += page_length;
  640. }
  641. out_unpin_object:
  642. i915_gem_object_unpin(obj);
  643. out_unlock:
  644. mutex_unlock(&dev->struct_mutex);
  645. out_unpin_pages:
  646. for (i = 0; i < pinned_pages; i++)
  647. page_cache_release(user_pages[i]);
  648. drm_free_large(user_pages);
  649. return ret;
  650. }
  651. /**
  652. * This is the fast shmem pwrite path, which attempts to directly
  653. * copy_from_user into the kmapped pages backing the object.
  654. */
  655. static int
  656. i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  657. struct drm_i915_gem_pwrite *args,
  658. struct drm_file *file_priv)
  659. {
  660. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  661. ssize_t remain;
  662. loff_t offset, page_base;
  663. char __user *user_data;
  664. int page_offset, page_length;
  665. int ret;
  666. user_data = (char __user *) (uintptr_t) args->data_ptr;
  667. remain = args->size;
  668. mutex_lock(&dev->struct_mutex);
  669. ret = i915_gem_object_get_pages(obj);
  670. if (ret != 0)
  671. goto fail_unlock;
  672. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  673. if (ret != 0)
  674. goto fail_put_pages;
  675. obj_priv = obj->driver_private;
  676. offset = args->offset;
  677. obj_priv->dirty = 1;
  678. while (remain > 0) {
  679. /* Operation in this page
  680. *
  681. * page_base = page offset within aperture
  682. * page_offset = offset within page
  683. * page_length = bytes to copy for this page
  684. */
  685. page_base = (offset & ~(PAGE_SIZE-1));
  686. page_offset = offset & (PAGE_SIZE-1);
  687. page_length = remain;
  688. if ((page_offset + remain) > PAGE_SIZE)
  689. page_length = PAGE_SIZE - page_offset;
  690. ret = fast_shmem_write(obj_priv->pages,
  691. page_base, page_offset,
  692. user_data, page_length);
  693. if (ret)
  694. goto fail_put_pages;
  695. remain -= page_length;
  696. user_data += page_length;
  697. offset += page_length;
  698. }
  699. fail_put_pages:
  700. i915_gem_object_put_pages(obj);
  701. fail_unlock:
  702. mutex_unlock(&dev->struct_mutex);
  703. return ret;
  704. }
  705. /**
  706. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  707. * the memory and maps it using kmap_atomic for copying.
  708. *
  709. * This avoids taking mmap_sem for faulting on the user's address while the
  710. * struct_mutex is held.
  711. */
  712. static int
  713. i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  714. struct drm_i915_gem_pwrite *args,
  715. struct drm_file *file_priv)
  716. {
  717. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  718. struct mm_struct *mm = current->mm;
  719. struct page **user_pages;
  720. ssize_t remain;
  721. loff_t offset, pinned_pages, i;
  722. loff_t first_data_page, last_data_page, num_pages;
  723. int shmem_page_index, shmem_page_offset;
  724. int data_page_index, data_page_offset;
  725. int page_length;
  726. int ret;
  727. uint64_t data_ptr = args->data_ptr;
  728. int do_bit17_swizzling;
  729. remain = args->size;
  730. /* Pin the user pages containing the data. We can't fault while
  731. * holding the struct mutex, and all of the pwrite implementations
  732. * want to hold it while dereferencing the user data.
  733. */
  734. first_data_page = data_ptr / PAGE_SIZE;
  735. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  736. num_pages = last_data_page - first_data_page + 1;
  737. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  738. if (user_pages == NULL)
  739. return -ENOMEM;
  740. down_read(&mm->mmap_sem);
  741. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  742. num_pages, 0, 0, user_pages, NULL);
  743. up_read(&mm->mmap_sem);
  744. if (pinned_pages < num_pages) {
  745. ret = -EFAULT;
  746. goto fail_put_user_pages;
  747. }
  748. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  749. mutex_lock(&dev->struct_mutex);
  750. ret = i915_gem_object_get_pages_or_evict(obj);
  751. if (ret)
  752. goto fail_unlock;
  753. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  754. if (ret != 0)
  755. goto fail_put_pages;
  756. obj_priv = obj->driver_private;
  757. offset = args->offset;
  758. obj_priv->dirty = 1;
  759. while (remain > 0) {
  760. /* Operation in this page
  761. *
  762. * shmem_page_index = page number within shmem file
  763. * shmem_page_offset = offset within page in shmem file
  764. * data_page_index = page number in get_user_pages return
  765. * data_page_offset = offset with data_page_index page.
  766. * page_length = bytes to copy for this page
  767. */
  768. shmem_page_index = offset / PAGE_SIZE;
  769. shmem_page_offset = offset & ~PAGE_MASK;
  770. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  771. data_page_offset = data_ptr & ~PAGE_MASK;
  772. page_length = remain;
  773. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  774. page_length = PAGE_SIZE - shmem_page_offset;
  775. if ((data_page_offset + page_length) > PAGE_SIZE)
  776. page_length = PAGE_SIZE - data_page_offset;
  777. if (do_bit17_swizzling) {
  778. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  779. shmem_page_offset,
  780. user_pages[data_page_index],
  781. data_page_offset,
  782. page_length,
  783. 0);
  784. } else {
  785. ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
  786. shmem_page_offset,
  787. user_pages[data_page_index],
  788. data_page_offset,
  789. page_length);
  790. }
  791. if (ret)
  792. goto fail_put_pages;
  793. remain -= page_length;
  794. data_ptr += page_length;
  795. offset += page_length;
  796. }
  797. fail_put_pages:
  798. i915_gem_object_put_pages(obj);
  799. fail_unlock:
  800. mutex_unlock(&dev->struct_mutex);
  801. fail_put_user_pages:
  802. for (i = 0; i < pinned_pages; i++)
  803. page_cache_release(user_pages[i]);
  804. drm_free_large(user_pages);
  805. return ret;
  806. }
  807. /**
  808. * Writes data to the object referenced by handle.
  809. *
  810. * On error, the contents of the buffer that were to be modified are undefined.
  811. */
  812. int
  813. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  814. struct drm_file *file_priv)
  815. {
  816. struct drm_i915_gem_pwrite *args = data;
  817. struct drm_gem_object *obj;
  818. struct drm_i915_gem_object *obj_priv;
  819. int ret = 0;
  820. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  821. if (obj == NULL)
  822. return -EBADF;
  823. obj_priv = obj->driver_private;
  824. /* Bounds check destination.
  825. *
  826. * XXX: This could use review for overflow issues...
  827. */
  828. if (args->offset > obj->size || args->size > obj->size ||
  829. args->offset + args->size > obj->size) {
  830. drm_gem_object_unreference(obj);
  831. return -EINVAL;
  832. }
  833. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  834. * it would end up going through the fenced access, and we'll get
  835. * different detiling behavior between reading and writing.
  836. * pread/pwrite currently are reading and writing from the CPU
  837. * perspective, requiring manual detiling by the client.
  838. */
  839. if (obj_priv->phys_obj)
  840. ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
  841. else if (obj_priv->tiling_mode == I915_TILING_NONE &&
  842. dev->gtt_total != 0) {
  843. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
  844. if (ret == -EFAULT) {
  845. ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
  846. file_priv);
  847. }
  848. } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
  849. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
  850. } else {
  851. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
  852. if (ret == -EFAULT) {
  853. ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
  854. file_priv);
  855. }
  856. }
  857. #if WATCH_PWRITE
  858. if (ret)
  859. DRM_INFO("pwrite failed %d\n", ret);
  860. #endif
  861. drm_gem_object_unreference(obj);
  862. return ret;
  863. }
  864. /**
  865. * Called when user space prepares to use an object with the CPU, either
  866. * through the mmap ioctl's mapping or a GTT mapping.
  867. */
  868. int
  869. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  870. struct drm_file *file_priv)
  871. {
  872. struct drm_i915_private *dev_priv = dev->dev_private;
  873. struct drm_i915_gem_set_domain *args = data;
  874. struct drm_gem_object *obj;
  875. struct drm_i915_gem_object *obj_priv;
  876. uint32_t read_domains = args->read_domains;
  877. uint32_t write_domain = args->write_domain;
  878. int ret;
  879. if (!(dev->driver->driver_features & DRIVER_GEM))
  880. return -ENODEV;
  881. /* Only handle setting domains to types used by the CPU. */
  882. if (write_domain & I915_GEM_GPU_DOMAINS)
  883. return -EINVAL;
  884. if (read_domains & I915_GEM_GPU_DOMAINS)
  885. return -EINVAL;
  886. /* Having something in the write domain implies it's in the read
  887. * domain, and only that read domain. Enforce that in the request.
  888. */
  889. if (write_domain != 0 && read_domains != write_domain)
  890. return -EINVAL;
  891. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  892. if (obj == NULL)
  893. return -EBADF;
  894. obj_priv = obj->driver_private;
  895. mutex_lock(&dev->struct_mutex);
  896. intel_mark_busy(dev, obj);
  897. #if WATCH_BUF
  898. DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
  899. obj, obj->size, read_domains, write_domain);
  900. #endif
  901. if (read_domains & I915_GEM_DOMAIN_GTT) {
  902. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  903. /* Update the LRU on the fence for the CPU access that's
  904. * about to occur.
  905. */
  906. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  907. list_move_tail(&obj_priv->fence_list,
  908. &dev_priv->mm.fence_list);
  909. }
  910. /* Silently promote "you're not bound, there was nothing to do"
  911. * to success, since the client was just asking us to
  912. * make sure everything was done.
  913. */
  914. if (ret == -EINVAL)
  915. ret = 0;
  916. } else {
  917. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  918. }
  919. drm_gem_object_unreference(obj);
  920. mutex_unlock(&dev->struct_mutex);
  921. return ret;
  922. }
  923. /**
  924. * Called when user space has done writes to this buffer
  925. */
  926. int
  927. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  928. struct drm_file *file_priv)
  929. {
  930. struct drm_i915_gem_sw_finish *args = data;
  931. struct drm_gem_object *obj;
  932. struct drm_i915_gem_object *obj_priv;
  933. int ret = 0;
  934. if (!(dev->driver->driver_features & DRIVER_GEM))
  935. return -ENODEV;
  936. mutex_lock(&dev->struct_mutex);
  937. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  938. if (obj == NULL) {
  939. mutex_unlock(&dev->struct_mutex);
  940. return -EBADF;
  941. }
  942. #if WATCH_BUF
  943. DRM_INFO("%s: sw_finish %d (%p %zd)\n",
  944. __func__, args->handle, obj, obj->size);
  945. #endif
  946. obj_priv = obj->driver_private;
  947. /* Pinned buffers may be scanout, so flush the cache */
  948. if (obj_priv->pin_count)
  949. i915_gem_object_flush_cpu_write_domain(obj);
  950. drm_gem_object_unreference(obj);
  951. mutex_unlock(&dev->struct_mutex);
  952. return ret;
  953. }
  954. /**
  955. * Maps the contents of an object, returning the address it is mapped
  956. * into.
  957. *
  958. * While the mapping holds a reference on the contents of the object, it doesn't
  959. * imply a ref on the object itself.
  960. */
  961. int
  962. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  963. struct drm_file *file_priv)
  964. {
  965. struct drm_i915_gem_mmap *args = data;
  966. struct drm_gem_object *obj;
  967. loff_t offset;
  968. unsigned long addr;
  969. if (!(dev->driver->driver_features & DRIVER_GEM))
  970. return -ENODEV;
  971. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  972. if (obj == NULL)
  973. return -EBADF;
  974. offset = args->offset;
  975. down_write(&current->mm->mmap_sem);
  976. addr = do_mmap(obj->filp, 0, args->size,
  977. PROT_READ | PROT_WRITE, MAP_SHARED,
  978. args->offset);
  979. up_write(&current->mm->mmap_sem);
  980. mutex_lock(&dev->struct_mutex);
  981. drm_gem_object_unreference(obj);
  982. mutex_unlock(&dev->struct_mutex);
  983. if (IS_ERR((void *)addr))
  984. return addr;
  985. args->addr_ptr = (uint64_t) addr;
  986. return 0;
  987. }
  988. /**
  989. * i915_gem_fault - fault a page into the GTT
  990. * vma: VMA in question
  991. * vmf: fault info
  992. *
  993. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  994. * from userspace. The fault handler takes care of binding the object to
  995. * the GTT (if needed), allocating and programming a fence register (again,
  996. * only if needed based on whether the old reg is still valid or the object
  997. * is tiled) and inserting a new PTE into the faulting process.
  998. *
  999. * Note that the faulting process may involve evicting existing objects
  1000. * from the GTT and/or fence registers to make room. So performance may
  1001. * suffer if the GTT working set is large or there are few fence registers
  1002. * left.
  1003. */
  1004. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1005. {
  1006. struct drm_gem_object *obj = vma->vm_private_data;
  1007. struct drm_device *dev = obj->dev;
  1008. struct drm_i915_private *dev_priv = dev->dev_private;
  1009. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1010. pgoff_t page_offset;
  1011. unsigned long pfn;
  1012. int ret = 0;
  1013. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1014. /* We don't use vmf->pgoff since that has the fake offset */
  1015. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1016. PAGE_SHIFT;
  1017. /* Now bind it into the GTT if needed */
  1018. mutex_lock(&dev->struct_mutex);
  1019. if (!obj_priv->gtt_space) {
  1020. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1021. if (ret) {
  1022. mutex_unlock(&dev->struct_mutex);
  1023. return VM_FAULT_SIGBUS;
  1024. }
  1025. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1026. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1027. if (ret) {
  1028. mutex_unlock(&dev->struct_mutex);
  1029. return VM_FAULT_SIGBUS;
  1030. }
  1031. }
  1032. /* Need a new fence register? */
  1033. if (obj_priv->tiling_mode != I915_TILING_NONE) {
  1034. ret = i915_gem_object_get_fence_reg(obj);
  1035. if (ret) {
  1036. mutex_unlock(&dev->struct_mutex);
  1037. return VM_FAULT_SIGBUS;
  1038. }
  1039. }
  1040. pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
  1041. page_offset;
  1042. /* Finally, remap it using the new GTT offset */
  1043. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1044. mutex_unlock(&dev->struct_mutex);
  1045. switch (ret) {
  1046. case -ENOMEM:
  1047. case -EAGAIN:
  1048. return VM_FAULT_OOM;
  1049. case -EFAULT:
  1050. case -EINVAL:
  1051. return VM_FAULT_SIGBUS;
  1052. default:
  1053. return VM_FAULT_NOPAGE;
  1054. }
  1055. }
  1056. /**
  1057. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1058. * @obj: obj in question
  1059. *
  1060. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1061. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1062. * up the object based on the offset and sets up the various memory mapping
  1063. * structures.
  1064. *
  1065. * This routine allocates and attaches a fake offset for @obj.
  1066. */
  1067. static int
  1068. i915_gem_create_mmap_offset(struct drm_gem_object *obj)
  1069. {
  1070. struct drm_device *dev = obj->dev;
  1071. struct drm_gem_mm *mm = dev->mm_private;
  1072. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1073. struct drm_map_list *list;
  1074. struct drm_local_map *map;
  1075. int ret = 0;
  1076. /* Set the object up for mmap'ing */
  1077. list = &obj->map_list;
  1078. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1079. if (!list->map)
  1080. return -ENOMEM;
  1081. map = list->map;
  1082. map->type = _DRM_GEM;
  1083. map->size = obj->size;
  1084. map->handle = obj;
  1085. /* Get a DRM GEM mmap offset allocated... */
  1086. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1087. obj->size / PAGE_SIZE, 0, 0);
  1088. if (!list->file_offset_node) {
  1089. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  1090. ret = -ENOMEM;
  1091. goto out_free_list;
  1092. }
  1093. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1094. obj->size / PAGE_SIZE, 0);
  1095. if (!list->file_offset_node) {
  1096. ret = -ENOMEM;
  1097. goto out_free_list;
  1098. }
  1099. list->hash.key = list->file_offset_node->start;
  1100. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  1101. DRM_ERROR("failed to add to map hash\n");
  1102. goto out_free_mm;
  1103. }
  1104. /* By now we should be all set, any drm_mmap request on the offset
  1105. * below will get to our mmap & fault handler */
  1106. obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  1107. return 0;
  1108. out_free_mm:
  1109. drm_mm_put_block(list->file_offset_node);
  1110. out_free_list:
  1111. kfree(list->map);
  1112. return ret;
  1113. }
  1114. /**
  1115. * i915_gem_release_mmap - remove physical page mappings
  1116. * @obj: obj in question
  1117. *
  1118. * Preserve the reservation of the mmaping with the DRM core code, but
  1119. * relinquish ownership of the pages back to the system.
  1120. *
  1121. * It is vital that we remove the page mapping if we have mapped a tiled
  1122. * object through the GTT and then lose the fence register due to
  1123. * resource pressure. Similarly if the object has been moved out of the
  1124. * aperture, than pages mapped into userspace must be revoked. Removing the
  1125. * mapping will then trigger a page fault on the next user access, allowing
  1126. * fixup by i915_gem_fault().
  1127. */
  1128. void
  1129. i915_gem_release_mmap(struct drm_gem_object *obj)
  1130. {
  1131. struct drm_device *dev = obj->dev;
  1132. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1133. if (dev->dev_mapping)
  1134. unmap_mapping_range(dev->dev_mapping,
  1135. obj_priv->mmap_offset, obj->size, 1);
  1136. }
  1137. static void
  1138. i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  1139. {
  1140. struct drm_device *dev = obj->dev;
  1141. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1142. struct drm_gem_mm *mm = dev->mm_private;
  1143. struct drm_map_list *list;
  1144. list = &obj->map_list;
  1145. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1146. if (list->file_offset_node) {
  1147. drm_mm_put_block(list->file_offset_node);
  1148. list->file_offset_node = NULL;
  1149. }
  1150. if (list->map) {
  1151. kfree(list->map);
  1152. list->map = NULL;
  1153. }
  1154. obj_priv->mmap_offset = 0;
  1155. }
  1156. /**
  1157. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1158. * @obj: object to check
  1159. *
  1160. * Return the required GTT alignment for an object, taking into account
  1161. * potential fence register mapping if needed.
  1162. */
  1163. static uint32_t
  1164. i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
  1165. {
  1166. struct drm_device *dev = obj->dev;
  1167. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1168. int start, i;
  1169. /*
  1170. * Minimum alignment is 4k (GTT page size), but might be greater
  1171. * if a fence register is needed for the object.
  1172. */
  1173. if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
  1174. return 4096;
  1175. /*
  1176. * Previous chips need to be aligned to the size of the smallest
  1177. * fence register that can contain the object.
  1178. */
  1179. if (IS_I9XX(dev))
  1180. start = 1024*1024;
  1181. else
  1182. start = 512*1024;
  1183. for (i = start; i < obj->size; i <<= 1)
  1184. ;
  1185. return i;
  1186. }
  1187. /**
  1188. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1189. * @dev: DRM device
  1190. * @data: GTT mapping ioctl data
  1191. * @file_priv: GEM object info
  1192. *
  1193. * Simply returns the fake offset to userspace so it can mmap it.
  1194. * The mmap call will end up in drm_gem_mmap(), which will set things
  1195. * up so we can get faults in the handler above.
  1196. *
  1197. * The fault handler will take care of binding the object into the GTT
  1198. * (since it may have been evicted to make room for something), allocating
  1199. * a fence register, and mapping the appropriate aperture address into
  1200. * userspace.
  1201. */
  1202. int
  1203. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1204. struct drm_file *file_priv)
  1205. {
  1206. struct drm_i915_gem_mmap_gtt *args = data;
  1207. struct drm_i915_private *dev_priv = dev->dev_private;
  1208. struct drm_gem_object *obj;
  1209. struct drm_i915_gem_object *obj_priv;
  1210. int ret;
  1211. if (!(dev->driver->driver_features & DRIVER_GEM))
  1212. return -ENODEV;
  1213. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1214. if (obj == NULL)
  1215. return -EBADF;
  1216. mutex_lock(&dev->struct_mutex);
  1217. obj_priv = obj->driver_private;
  1218. if (!obj_priv->mmap_offset) {
  1219. ret = i915_gem_create_mmap_offset(obj);
  1220. if (ret) {
  1221. drm_gem_object_unreference(obj);
  1222. mutex_unlock(&dev->struct_mutex);
  1223. return ret;
  1224. }
  1225. }
  1226. args->offset = obj_priv->mmap_offset;
  1227. /*
  1228. * Pull it into the GTT so that we have a page list (makes the
  1229. * initial fault faster and any subsequent flushing possible).
  1230. */
  1231. if (!obj_priv->agp_mem) {
  1232. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1233. if (ret) {
  1234. drm_gem_object_unreference(obj);
  1235. mutex_unlock(&dev->struct_mutex);
  1236. return ret;
  1237. }
  1238. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1239. }
  1240. drm_gem_object_unreference(obj);
  1241. mutex_unlock(&dev->struct_mutex);
  1242. return 0;
  1243. }
  1244. void
  1245. i915_gem_object_put_pages(struct drm_gem_object *obj)
  1246. {
  1247. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1248. int page_count = obj->size / PAGE_SIZE;
  1249. int i;
  1250. BUG_ON(obj_priv->pages_refcount == 0);
  1251. BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
  1252. if (--obj_priv->pages_refcount != 0)
  1253. return;
  1254. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1255. i915_gem_object_save_bit_17_swizzle(obj);
  1256. if (obj_priv->madv == I915_MADV_DONTNEED)
  1257. obj_priv->dirty = 0;
  1258. for (i = 0; i < page_count; i++) {
  1259. if (obj_priv->pages[i] == NULL)
  1260. break;
  1261. if (obj_priv->dirty)
  1262. set_page_dirty(obj_priv->pages[i]);
  1263. if (obj_priv->madv == I915_MADV_WILLNEED)
  1264. mark_page_accessed(obj_priv->pages[i]);
  1265. page_cache_release(obj_priv->pages[i]);
  1266. }
  1267. obj_priv->dirty = 0;
  1268. drm_free_large(obj_priv->pages);
  1269. obj_priv->pages = NULL;
  1270. }
  1271. static void
  1272. i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
  1273. {
  1274. struct drm_device *dev = obj->dev;
  1275. drm_i915_private_t *dev_priv = dev->dev_private;
  1276. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1277. /* Add a reference if we're newly entering the active list. */
  1278. if (!obj_priv->active) {
  1279. drm_gem_object_reference(obj);
  1280. obj_priv->active = 1;
  1281. }
  1282. /* Move from whatever list we were on to the tail of execution. */
  1283. spin_lock(&dev_priv->mm.active_list_lock);
  1284. list_move_tail(&obj_priv->list,
  1285. &dev_priv->mm.active_list);
  1286. spin_unlock(&dev_priv->mm.active_list_lock);
  1287. obj_priv->last_rendering_seqno = seqno;
  1288. }
  1289. static void
  1290. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  1291. {
  1292. struct drm_device *dev = obj->dev;
  1293. drm_i915_private_t *dev_priv = dev->dev_private;
  1294. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1295. BUG_ON(!obj_priv->active);
  1296. list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
  1297. obj_priv->last_rendering_seqno = 0;
  1298. }
  1299. /* Immediately discard the backing storage */
  1300. static void
  1301. i915_gem_object_truncate(struct drm_gem_object *obj)
  1302. {
  1303. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1304. struct inode *inode;
  1305. inode = obj->filp->f_path.dentry->d_inode;
  1306. if (inode->i_op->truncate)
  1307. inode->i_op->truncate (inode);
  1308. obj_priv->madv = __I915_MADV_PURGED;
  1309. }
  1310. static inline int
  1311. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
  1312. {
  1313. return obj_priv->madv == I915_MADV_DONTNEED;
  1314. }
  1315. static void
  1316. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  1317. {
  1318. struct drm_device *dev = obj->dev;
  1319. drm_i915_private_t *dev_priv = dev->dev_private;
  1320. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1321. i915_verify_inactive(dev, __FILE__, __LINE__);
  1322. if (obj_priv->pin_count != 0)
  1323. list_del_init(&obj_priv->list);
  1324. else
  1325. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1326. obj_priv->last_rendering_seqno = 0;
  1327. if (obj_priv->active) {
  1328. obj_priv->active = 0;
  1329. drm_gem_object_unreference(obj);
  1330. }
  1331. i915_verify_inactive(dev, __FILE__, __LINE__);
  1332. }
  1333. /**
  1334. * Creates a new sequence number, emitting a write of it to the status page
  1335. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  1336. *
  1337. * Must be called with struct_lock held.
  1338. *
  1339. * Returned sequence numbers are nonzero on success.
  1340. */
  1341. static uint32_t
  1342. i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  1343. uint32_t flush_domains)
  1344. {
  1345. drm_i915_private_t *dev_priv = dev->dev_private;
  1346. struct drm_i915_file_private *i915_file_priv = NULL;
  1347. struct drm_i915_gem_request *request;
  1348. uint32_t seqno;
  1349. int was_empty;
  1350. RING_LOCALS;
  1351. if (file_priv != NULL)
  1352. i915_file_priv = file_priv->driver_priv;
  1353. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1354. if (request == NULL)
  1355. return 0;
  1356. /* Grab the seqno we're going to make this request be, and bump the
  1357. * next (skipping 0 so it can be the reserved no-seqno value).
  1358. */
  1359. seqno = dev_priv->mm.next_gem_seqno;
  1360. dev_priv->mm.next_gem_seqno++;
  1361. if (dev_priv->mm.next_gem_seqno == 0)
  1362. dev_priv->mm.next_gem_seqno++;
  1363. BEGIN_LP_RING(4);
  1364. OUT_RING(MI_STORE_DWORD_INDEX);
  1365. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1366. OUT_RING(seqno);
  1367. OUT_RING(MI_USER_INTERRUPT);
  1368. ADVANCE_LP_RING();
  1369. DRM_DEBUG("%d\n", seqno);
  1370. request->seqno = seqno;
  1371. request->emitted_jiffies = jiffies;
  1372. was_empty = list_empty(&dev_priv->mm.request_list);
  1373. list_add_tail(&request->list, &dev_priv->mm.request_list);
  1374. if (i915_file_priv) {
  1375. list_add_tail(&request->client_list,
  1376. &i915_file_priv->mm.request_list);
  1377. } else {
  1378. INIT_LIST_HEAD(&request->client_list);
  1379. }
  1380. /* Associate any objects on the flushing list matching the write
  1381. * domain we're flushing with our flush.
  1382. */
  1383. if (flush_domains != 0) {
  1384. struct drm_i915_gem_object *obj_priv, *next;
  1385. list_for_each_entry_safe(obj_priv, next,
  1386. &dev_priv->mm.flushing_list, list) {
  1387. struct drm_gem_object *obj = obj_priv->obj;
  1388. if ((obj->write_domain & flush_domains) ==
  1389. obj->write_domain) {
  1390. uint32_t old_write_domain = obj->write_domain;
  1391. obj->write_domain = 0;
  1392. i915_gem_object_move_to_active(obj, seqno);
  1393. trace_i915_gem_object_change_domain(obj,
  1394. obj->read_domains,
  1395. old_write_domain);
  1396. }
  1397. }
  1398. }
  1399. if (!dev_priv->mm.suspended) {
  1400. mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
  1401. if (was_empty)
  1402. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1403. }
  1404. return seqno;
  1405. }
  1406. /**
  1407. * Command execution barrier
  1408. *
  1409. * Ensures that all commands in the ring are finished
  1410. * before signalling the CPU
  1411. */
  1412. static uint32_t
  1413. i915_retire_commands(struct drm_device *dev)
  1414. {
  1415. drm_i915_private_t *dev_priv = dev->dev_private;
  1416. uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1417. uint32_t flush_domains = 0;
  1418. RING_LOCALS;
  1419. /* The sampler always gets flushed on i965 (sigh) */
  1420. if (IS_I965G(dev))
  1421. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1422. BEGIN_LP_RING(2);
  1423. OUT_RING(cmd);
  1424. OUT_RING(0); /* noop */
  1425. ADVANCE_LP_RING();
  1426. return flush_domains;
  1427. }
  1428. /**
  1429. * Moves buffers associated only with the given active seqno from the active
  1430. * to inactive list, potentially freeing them.
  1431. */
  1432. static void
  1433. i915_gem_retire_request(struct drm_device *dev,
  1434. struct drm_i915_gem_request *request)
  1435. {
  1436. drm_i915_private_t *dev_priv = dev->dev_private;
  1437. trace_i915_gem_request_retire(dev, request->seqno);
  1438. /* Move any buffers on the active list that are no longer referenced
  1439. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1440. */
  1441. spin_lock(&dev_priv->mm.active_list_lock);
  1442. while (!list_empty(&dev_priv->mm.active_list)) {
  1443. struct drm_gem_object *obj;
  1444. struct drm_i915_gem_object *obj_priv;
  1445. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  1446. struct drm_i915_gem_object,
  1447. list);
  1448. obj = obj_priv->obj;
  1449. /* If the seqno being retired doesn't match the oldest in the
  1450. * list, then the oldest in the list must still be newer than
  1451. * this seqno.
  1452. */
  1453. if (obj_priv->last_rendering_seqno != request->seqno)
  1454. goto out;
  1455. #if WATCH_LRU
  1456. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  1457. __func__, request->seqno, obj);
  1458. #endif
  1459. if (obj->write_domain != 0)
  1460. i915_gem_object_move_to_flushing(obj);
  1461. else {
  1462. /* Take a reference on the object so it won't be
  1463. * freed while the spinlock is held. The list
  1464. * protection for this spinlock is safe when breaking
  1465. * the lock like this since the next thing we do
  1466. * is just get the head of the list again.
  1467. */
  1468. drm_gem_object_reference(obj);
  1469. i915_gem_object_move_to_inactive(obj);
  1470. spin_unlock(&dev_priv->mm.active_list_lock);
  1471. drm_gem_object_unreference(obj);
  1472. spin_lock(&dev_priv->mm.active_list_lock);
  1473. }
  1474. }
  1475. out:
  1476. spin_unlock(&dev_priv->mm.active_list_lock);
  1477. }
  1478. /**
  1479. * Returns true if seq1 is later than seq2.
  1480. */
  1481. bool
  1482. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1483. {
  1484. return (int32_t)(seq1 - seq2) >= 0;
  1485. }
  1486. uint32_t
  1487. i915_get_gem_seqno(struct drm_device *dev)
  1488. {
  1489. drm_i915_private_t *dev_priv = dev->dev_private;
  1490. return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
  1491. }
  1492. /**
  1493. * This function clears the request list as sequence numbers are passed.
  1494. */
  1495. void
  1496. i915_gem_retire_requests(struct drm_device *dev)
  1497. {
  1498. drm_i915_private_t *dev_priv = dev->dev_private;
  1499. uint32_t seqno;
  1500. if (!dev_priv->hw_status_page)
  1501. return;
  1502. seqno = i915_get_gem_seqno(dev);
  1503. while (!list_empty(&dev_priv->mm.request_list)) {
  1504. struct drm_i915_gem_request *request;
  1505. uint32_t retiring_seqno;
  1506. request = list_first_entry(&dev_priv->mm.request_list,
  1507. struct drm_i915_gem_request,
  1508. list);
  1509. retiring_seqno = request->seqno;
  1510. if (i915_seqno_passed(seqno, retiring_seqno) ||
  1511. atomic_read(&dev_priv->mm.wedged)) {
  1512. i915_gem_retire_request(dev, request);
  1513. list_del(&request->list);
  1514. list_del(&request->client_list);
  1515. kfree(request);
  1516. } else
  1517. break;
  1518. }
  1519. }
  1520. void
  1521. i915_gem_retire_work_handler(struct work_struct *work)
  1522. {
  1523. drm_i915_private_t *dev_priv;
  1524. struct drm_device *dev;
  1525. dev_priv = container_of(work, drm_i915_private_t,
  1526. mm.retire_work.work);
  1527. dev = dev_priv->dev;
  1528. mutex_lock(&dev->struct_mutex);
  1529. i915_gem_retire_requests(dev);
  1530. if (!dev_priv->mm.suspended &&
  1531. !list_empty(&dev_priv->mm.request_list))
  1532. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1533. mutex_unlock(&dev->struct_mutex);
  1534. }
  1535. /**
  1536. * Waits for a sequence number to be signaled, and cleans up the
  1537. * request and object lists appropriately for that event.
  1538. */
  1539. static int
  1540. i915_wait_request(struct drm_device *dev, uint32_t seqno)
  1541. {
  1542. drm_i915_private_t *dev_priv = dev->dev_private;
  1543. u32 ier;
  1544. int ret = 0;
  1545. BUG_ON(seqno == 0);
  1546. if (atomic_read(&dev_priv->mm.wedged))
  1547. return -EIO;
  1548. if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
  1549. if (IS_IGDNG(dev))
  1550. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1551. else
  1552. ier = I915_READ(IER);
  1553. if (!ier) {
  1554. DRM_ERROR("something (likely vbetool) disabled "
  1555. "interrupts, re-enabling\n");
  1556. i915_driver_irq_preinstall(dev);
  1557. i915_driver_irq_postinstall(dev);
  1558. }
  1559. trace_i915_gem_request_wait_begin(dev, seqno);
  1560. dev_priv->mm.waiting_gem_seqno = seqno;
  1561. i915_user_irq_get(dev);
  1562. ret = wait_event_interruptible(dev_priv->irq_queue,
  1563. i915_seqno_passed(i915_get_gem_seqno(dev),
  1564. seqno) ||
  1565. atomic_read(&dev_priv->mm.wedged));
  1566. i915_user_irq_put(dev);
  1567. dev_priv->mm.waiting_gem_seqno = 0;
  1568. trace_i915_gem_request_wait_end(dev, seqno);
  1569. }
  1570. if (atomic_read(&dev_priv->mm.wedged))
  1571. ret = -EIO;
  1572. if (ret && ret != -ERESTARTSYS)
  1573. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  1574. __func__, ret, seqno, i915_get_gem_seqno(dev));
  1575. /* Directly dispatch request retiring. While we have the work queue
  1576. * to handle this, the waiter on a request often wants an associated
  1577. * buffer to have made it to the inactive list, and we would need
  1578. * a separate wait queue to handle that.
  1579. */
  1580. if (ret == 0)
  1581. i915_gem_retire_requests(dev);
  1582. return ret;
  1583. }
  1584. static void
  1585. i915_gem_flush(struct drm_device *dev,
  1586. uint32_t invalidate_domains,
  1587. uint32_t flush_domains)
  1588. {
  1589. drm_i915_private_t *dev_priv = dev->dev_private;
  1590. uint32_t cmd;
  1591. RING_LOCALS;
  1592. #if WATCH_EXEC
  1593. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  1594. invalidate_domains, flush_domains);
  1595. #endif
  1596. trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
  1597. invalidate_domains, flush_domains);
  1598. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1599. drm_agp_chipset_flush(dev);
  1600. if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
  1601. /*
  1602. * read/write caches:
  1603. *
  1604. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  1605. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  1606. * also flushed at 2d versus 3d pipeline switches.
  1607. *
  1608. * read-only caches:
  1609. *
  1610. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  1611. * MI_READ_FLUSH is set, and is always flushed on 965.
  1612. *
  1613. * I915_GEM_DOMAIN_COMMAND may not exist?
  1614. *
  1615. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  1616. * invalidated when MI_EXE_FLUSH is set.
  1617. *
  1618. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  1619. * invalidated with every MI_FLUSH.
  1620. *
  1621. * TLBs:
  1622. *
  1623. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  1624. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  1625. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  1626. * are flushed at any MI_FLUSH.
  1627. */
  1628. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1629. if ((invalidate_domains|flush_domains) &
  1630. I915_GEM_DOMAIN_RENDER)
  1631. cmd &= ~MI_NO_WRITE_FLUSH;
  1632. if (!IS_I965G(dev)) {
  1633. /*
  1634. * On the 965, the sampler cache always gets flushed
  1635. * and this bit is reserved.
  1636. */
  1637. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  1638. cmd |= MI_READ_FLUSH;
  1639. }
  1640. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  1641. cmd |= MI_EXE_FLUSH;
  1642. #if WATCH_EXEC
  1643. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  1644. #endif
  1645. BEGIN_LP_RING(2);
  1646. OUT_RING(cmd);
  1647. OUT_RING(0); /* noop */
  1648. ADVANCE_LP_RING();
  1649. }
  1650. }
  1651. /**
  1652. * Ensures that all rendering to the object has completed and the object is
  1653. * safe to unbind from the GTT or access from the CPU.
  1654. */
  1655. static int
  1656. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  1657. {
  1658. struct drm_device *dev = obj->dev;
  1659. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1660. int ret;
  1661. /* This function only exists to support waiting for existing rendering,
  1662. * not for emitting required flushes.
  1663. */
  1664. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1665. /* If there is rendering queued on the buffer being evicted, wait for
  1666. * it.
  1667. */
  1668. if (obj_priv->active) {
  1669. #if WATCH_BUF
  1670. DRM_INFO("%s: object %p wait for seqno %08x\n",
  1671. __func__, obj, obj_priv->last_rendering_seqno);
  1672. #endif
  1673. ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
  1674. if (ret != 0)
  1675. return ret;
  1676. }
  1677. return 0;
  1678. }
  1679. /**
  1680. * Unbinds an object from the GTT aperture.
  1681. */
  1682. int
  1683. i915_gem_object_unbind(struct drm_gem_object *obj)
  1684. {
  1685. struct drm_device *dev = obj->dev;
  1686. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1687. int ret = 0;
  1688. #if WATCH_BUF
  1689. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  1690. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  1691. #endif
  1692. if (obj_priv->gtt_space == NULL)
  1693. return 0;
  1694. if (obj_priv->pin_count != 0) {
  1695. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1696. return -EINVAL;
  1697. }
  1698. /* blow away mappings if mapped through GTT */
  1699. i915_gem_release_mmap(obj);
  1700. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1701. i915_gem_clear_fence_reg(obj);
  1702. /* Move the object to the CPU domain to ensure that
  1703. * any possible CPU writes while it's not in the GTT
  1704. * are flushed when we go to remap it. This will
  1705. * also ensure that all pending GPU writes are finished
  1706. * before we unbind.
  1707. */
  1708. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1709. if (ret) {
  1710. if (ret != -ERESTARTSYS)
  1711. DRM_ERROR("set_domain failed: %d\n", ret);
  1712. return ret;
  1713. }
  1714. BUG_ON(obj_priv->active);
  1715. if (obj_priv->agp_mem != NULL) {
  1716. drm_unbind_agp(obj_priv->agp_mem);
  1717. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  1718. obj_priv->agp_mem = NULL;
  1719. }
  1720. i915_gem_object_put_pages(obj);
  1721. BUG_ON(obj_priv->pages_refcount);
  1722. if (obj_priv->gtt_space) {
  1723. atomic_dec(&dev->gtt_count);
  1724. atomic_sub(obj->size, &dev->gtt_memory);
  1725. drm_mm_put_block(obj_priv->gtt_space);
  1726. obj_priv->gtt_space = NULL;
  1727. }
  1728. /* Remove ourselves from the LRU list if present. */
  1729. if (!list_empty(&obj_priv->list))
  1730. list_del_init(&obj_priv->list);
  1731. if (i915_gem_object_is_purgeable(obj_priv))
  1732. i915_gem_object_truncate(obj);
  1733. trace_i915_gem_object_unbind(obj);
  1734. return 0;
  1735. }
  1736. static struct drm_gem_object *
  1737. i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
  1738. {
  1739. drm_i915_private_t *dev_priv = dev->dev_private;
  1740. struct drm_i915_gem_object *obj_priv;
  1741. struct drm_gem_object *best = NULL;
  1742. struct drm_gem_object *first = NULL;
  1743. /* Try to find the smallest clean object */
  1744. list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
  1745. struct drm_gem_object *obj = obj_priv->obj;
  1746. if (obj->size >= min_size) {
  1747. if ((!obj_priv->dirty ||
  1748. i915_gem_object_is_purgeable(obj_priv)) &&
  1749. (!best || obj->size < best->size)) {
  1750. best = obj;
  1751. if (best->size == min_size)
  1752. return best;
  1753. }
  1754. if (!first)
  1755. first = obj;
  1756. }
  1757. }
  1758. return best ? best : first;
  1759. }
  1760. static int
  1761. i915_gem_evict_everything(struct drm_device *dev)
  1762. {
  1763. drm_i915_private_t *dev_priv = dev->dev_private;
  1764. uint32_t seqno;
  1765. int ret;
  1766. bool lists_empty;
  1767. spin_lock(&dev_priv->mm.active_list_lock);
  1768. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1769. list_empty(&dev_priv->mm.flushing_list) &&
  1770. list_empty(&dev_priv->mm.active_list));
  1771. spin_unlock(&dev_priv->mm.active_list_lock);
  1772. if (lists_empty)
  1773. return -ENOSPC;
  1774. /* Flush everything (on to the inactive lists) and evict */
  1775. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1776. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  1777. if (seqno == 0)
  1778. return -ENOMEM;
  1779. ret = i915_wait_request(dev, seqno);
  1780. if (ret)
  1781. return ret;
  1782. ret = i915_gem_evict_from_inactive_list(dev);
  1783. if (ret)
  1784. return ret;
  1785. spin_lock(&dev_priv->mm.active_list_lock);
  1786. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1787. list_empty(&dev_priv->mm.flushing_list) &&
  1788. list_empty(&dev_priv->mm.active_list));
  1789. spin_unlock(&dev_priv->mm.active_list_lock);
  1790. BUG_ON(!lists_empty);
  1791. return 0;
  1792. }
  1793. static int
  1794. i915_gem_evict_something(struct drm_device *dev, int min_size)
  1795. {
  1796. drm_i915_private_t *dev_priv = dev->dev_private;
  1797. struct drm_gem_object *obj;
  1798. int ret;
  1799. for (;;) {
  1800. i915_gem_retire_requests(dev);
  1801. /* If there's an inactive buffer available now, grab it
  1802. * and be done.
  1803. */
  1804. obj = i915_gem_find_inactive_object(dev, min_size);
  1805. if (obj) {
  1806. struct drm_i915_gem_object *obj_priv;
  1807. #if WATCH_LRU
  1808. DRM_INFO("%s: evicting %p\n", __func__, obj);
  1809. #endif
  1810. obj_priv = obj->driver_private;
  1811. BUG_ON(obj_priv->pin_count != 0);
  1812. BUG_ON(obj_priv->active);
  1813. /* Wait on the rendering and unbind the buffer. */
  1814. return i915_gem_object_unbind(obj);
  1815. }
  1816. /* If we didn't get anything, but the ring is still processing
  1817. * things, wait for the next to finish and hopefully leave us
  1818. * a buffer to evict.
  1819. */
  1820. if (!list_empty(&dev_priv->mm.request_list)) {
  1821. struct drm_i915_gem_request *request;
  1822. request = list_first_entry(&dev_priv->mm.request_list,
  1823. struct drm_i915_gem_request,
  1824. list);
  1825. ret = i915_wait_request(dev, request->seqno);
  1826. if (ret)
  1827. return ret;
  1828. continue;
  1829. }
  1830. /* If we didn't have anything on the request list but there
  1831. * are buffers awaiting a flush, emit one and try again.
  1832. * When we wait on it, those buffers waiting for that flush
  1833. * will get moved to inactive.
  1834. */
  1835. if (!list_empty(&dev_priv->mm.flushing_list)) {
  1836. struct drm_i915_gem_object *obj_priv;
  1837. /* Find an object that we can immediately reuse */
  1838. list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
  1839. obj = obj_priv->obj;
  1840. if (obj->size >= min_size)
  1841. break;
  1842. obj = NULL;
  1843. }
  1844. if (obj != NULL) {
  1845. uint32_t seqno;
  1846. i915_gem_flush(dev,
  1847. obj->write_domain,
  1848. obj->write_domain);
  1849. seqno = i915_add_request(dev, NULL, obj->write_domain);
  1850. if (seqno == 0)
  1851. return -ENOMEM;
  1852. ret = i915_wait_request(dev, seqno);
  1853. if (ret)
  1854. return ret;
  1855. continue;
  1856. }
  1857. }
  1858. /* If we didn't do any of the above, there's no single buffer
  1859. * large enough to swap out for the new one, so just evict
  1860. * everything and start again. (This should be rare.)
  1861. */
  1862. if (!list_empty (&dev_priv->mm.inactive_list))
  1863. return i915_gem_evict_from_inactive_list(dev);
  1864. else
  1865. return i915_gem_evict_everything(dev);
  1866. }
  1867. }
  1868. int
  1869. i915_gem_object_get_pages(struct drm_gem_object *obj)
  1870. {
  1871. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1872. int page_count, i;
  1873. struct address_space *mapping;
  1874. struct inode *inode;
  1875. struct page *page;
  1876. int ret;
  1877. if (obj_priv->pages_refcount++ != 0)
  1878. return 0;
  1879. /* Get the list of pages out of our struct file. They'll be pinned
  1880. * at this point until we release them.
  1881. */
  1882. page_count = obj->size / PAGE_SIZE;
  1883. BUG_ON(obj_priv->pages != NULL);
  1884. obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
  1885. if (obj_priv->pages == NULL) {
  1886. obj_priv->pages_refcount--;
  1887. return -ENOMEM;
  1888. }
  1889. inode = obj->filp->f_path.dentry->d_inode;
  1890. mapping = inode->i_mapping;
  1891. for (i = 0; i < page_count; i++) {
  1892. page = read_mapping_page(mapping, i, NULL);
  1893. if (IS_ERR(page)) {
  1894. ret = PTR_ERR(page);
  1895. i915_gem_object_put_pages(obj);
  1896. return ret;
  1897. }
  1898. obj_priv->pages[i] = page;
  1899. }
  1900. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1901. i915_gem_object_do_bit_17_swizzle(obj);
  1902. return 0;
  1903. }
  1904. static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
  1905. {
  1906. struct drm_gem_object *obj = reg->obj;
  1907. struct drm_device *dev = obj->dev;
  1908. drm_i915_private_t *dev_priv = dev->dev_private;
  1909. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1910. int regnum = obj_priv->fence_reg;
  1911. uint64_t val;
  1912. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1913. 0xfffff000) << 32;
  1914. val |= obj_priv->gtt_offset & 0xfffff000;
  1915. val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1916. if (obj_priv->tiling_mode == I915_TILING_Y)
  1917. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1918. val |= I965_FENCE_REG_VALID;
  1919. I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
  1920. }
  1921. static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
  1922. {
  1923. struct drm_gem_object *obj = reg->obj;
  1924. struct drm_device *dev = obj->dev;
  1925. drm_i915_private_t *dev_priv = dev->dev_private;
  1926. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1927. int regnum = obj_priv->fence_reg;
  1928. int tile_width;
  1929. uint32_t fence_reg, val;
  1930. uint32_t pitch_val;
  1931. if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
  1932. (obj_priv->gtt_offset & (obj->size - 1))) {
  1933. WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
  1934. __func__, obj_priv->gtt_offset, obj->size);
  1935. return;
  1936. }
  1937. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1938. HAS_128_BYTE_Y_TILING(dev))
  1939. tile_width = 128;
  1940. else
  1941. tile_width = 512;
  1942. /* Note: pitch better be a power of two tile widths */
  1943. pitch_val = obj_priv->stride / tile_width;
  1944. pitch_val = ffs(pitch_val) - 1;
  1945. val = obj_priv->gtt_offset;
  1946. if (obj_priv->tiling_mode == I915_TILING_Y)
  1947. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1948. val |= I915_FENCE_SIZE_BITS(obj->size);
  1949. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1950. val |= I830_FENCE_REG_VALID;
  1951. if (regnum < 8)
  1952. fence_reg = FENCE_REG_830_0 + (regnum * 4);
  1953. else
  1954. fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
  1955. I915_WRITE(fence_reg, val);
  1956. }
  1957. static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  1958. {
  1959. struct drm_gem_object *obj = reg->obj;
  1960. struct drm_device *dev = obj->dev;
  1961. drm_i915_private_t *dev_priv = dev->dev_private;
  1962. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1963. int regnum = obj_priv->fence_reg;
  1964. uint32_t val;
  1965. uint32_t pitch_val;
  1966. uint32_t fence_size_bits;
  1967. if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
  1968. (obj_priv->gtt_offset & (obj->size - 1))) {
  1969. WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
  1970. __func__, obj_priv->gtt_offset);
  1971. return;
  1972. }
  1973. pitch_val = obj_priv->stride / 128;
  1974. pitch_val = ffs(pitch_val) - 1;
  1975. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  1976. val = obj_priv->gtt_offset;
  1977. if (obj_priv->tiling_mode == I915_TILING_Y)
  1978. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1979. fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
  1980. WARN_ON(fence_size_bits & ~0x00000f00);
  1981. val |= fence_size_bits;
  1982. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1983. val |= I830_FENCE_REG_VALID;
  1984. I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  1985. }
  1986. /**
  1987. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  1988. * @obj: object to map through a fence reg
  1989. *
  1990. * When mapping objects through the GTT, userspace wants to be able to write
  1991. * to them without having to worry about swizzling if the object is tiled.
  1992. *
  1993. * This function walks the fence regs looking for a free one for @obj,
  1994. * stealing one if it can't find any.
  1995. *
  1996. * It then sets up the reg based on the object's properties: address, pitch
  1997. * and tiling format.
  1998. */
  1999. int
  2000. i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
  2001. {
  2002. struct drm_device *dev = obj->dev;
  2003. struct drm_i915_private *dev_priv = dev->dev_private;
  2004. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2005. struct drm_i915_fence_reg *reg = NULL;
  2006. struct drm_i915_gem_object *old_obj_priv = NULL;
  2007. int i, ret, avail;
  2008. /* Just update our place in the LRU if our fence is getting used. */
  2009. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  2010. list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2011. return 0;
  2012. }
  2013. switch (obj_priv->tiling_mode) {
  2014. case I915_TILING_NONE:
  2015. WARN(1, "allocating a fence for non-tiled object?\n");
  2016. break;
  2017. case I915_TILING_X:
  2018. if (!obj_priv->stride)
  2019. return -EINVAL;
  2020. WARN((obj_priv->stride & (512 - 1)),
  2021. "object 0x%08x is X tiled but has non-512B pitch\n",
  2022. obj_priv->gtt_offset);
  2023. break;
  2024. case I915_TILING_Y:
  2025. if (!obj_priv->stride)
  2026. return -EINVAL;
  2027. WARN((obj_priv->stride & (128 - 1)),
  2028. "object 0x%08x is Y tiled but has non-128B pitch\n",
  2029. obj_priv->gtt_offset);
  2030. break;
  2031. }
  2032. /* First try to find a free reg */
  2033. avail = 0;
  2034. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2035. reg = &dev_priv->fence_regs[i];
  2036. if (!reg->obj)
  2037. break;
  2038. old_obj_priv = reg->obj->driver_private;
  2039. if (!old_obj_priv->pin_count)
  2040. avail++;
  2041. }
  2042. /* None available, try to steal one or wait for a user to finish */
  2043. if (i == dev_priv->num_fence_regs) {
  2044. struct drm_gem_object *old_obj = NULL;
  2045. if (avail == 0)
  2046. return -ENOSPC;
  2047. list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
  2048. fence_list) {
  2049. old_obj = old_obj_priv->obj;
  2050. if (old_obj_priv->pin_count)
  2051. continue;
  2052. /* Take a reference, as otherwise the wait_rendering
  2053. * below may cause the object to get freed out from
  2054. * under us.
  2055. */
  2056. drm_gem_object_reference(old_obj);
  2057. /* i915 uses fences for GPU access to tiled buffers */
  2058. if (IS_I965G(dev) || !old_obj_priv->active)
  2059. break;
  2060. /* This brings the object to the head of the LRU if it
  2061. * had been written to. The only way this should
  2062. * result in us waiting longer than the expected
  2063. * optimal amount of time is if there was a
  2064. * fence-using buffer later that was read-only.
  2065. */
  2066. i915_gem_object_flush_gpu_write_domain(old_obj);
  2067. ret = i915_gem_object_wait_rendering(old_obj);
  2068. if (ret != 0) {
  2069. drm_gem_object_unreference(old_obj);
  2070. return ret;
  2071. }
  2072. break;
  2073. }
  2074. /*
  2075. * Zap this virtual mapping so we can set up a fence again
  2076. * for this object next time we need it.
  2077. */
  2078. i915_gem_release_mmap(old_obj);
  2079. i = old_obj_priv->fence_reg;
  2080. reg = &dev_priv->fence_regs[i];
  2081. old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2082. list_del_init(&old_obj_priv->fence_list);
  2083. drm_gem_object_unreference(old_obj);
  2084. }
  2085. obj_priv->fence_reg = i;
  2086. list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2087. reg->obj = obj;
  2088. if (IS_I965G(dev))
  2089. i965_write_fence_reg(reg);
  2090. else if (IS_I9XX(dev))
  2091. i915_write_fence_reg(reg);
  2092. else
  2093. i830_write_fence_reg(reg);
  2094. trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
  2095. return 0;
  2096. }
  2097. /**
  2098. * i915_gem_clear_fence_reg - clear out fence register info
  2099. * @obj: object to clear
  2100. *
  2101. * Zeroes out the fence register itself and clears out the associated
  2102. * data structures in dev_priv and obj_priv.
  2103. */
  2104. static void
  2105. i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  2106. {
  2107. struct drm_device *dev = obj->dev;
  2108. drm_i915_private_t *dev_priv = dev->dev_private;
  2109. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2110. if (IS_I965G(dev))
  2111. I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
  2112. else {
  2113. uint32_t fence_reg;
  2114. if (obj_priv->fence_reg < 8)
  2115. fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
  2116. else
  2117. fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
  2118. 8) * 4;
  2119. I915_WRITE(fence_reg, 0);
  2120. }
  2121. dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
  2122. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2123. list_del_init(&obj_priv->fence_list);
  2124. }
  2125. /**
  2126. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2127. * to the buffer to finish, and then resets the fence register.
  2128. * @obj: tiled object holding a fence register.
  2129. *
  2130. * Zeroes out the fence register itself and clears out the associated
  2131. * data structures in dev_priv and obj_priv.
  2132. */
  2133. int
  2134. i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
  2135. {
  2136. struct drm_device *dev = obj->dev;
  2137. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2138. if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
  2139. return 0;
  2140. /* On the i915, GPU access to tiled buffers is via a fence,
  2141. * therefore we must wait for any outstanding access to complete
  2142. * before clearing the fence.
  2143. */
  2144. if (!IS_I965G(dev)) {
  2145. int ret;
  2146. i915_gem_object_flush_gpu_write_domain(obj);
  2147. i915_gem_object_flush_gtt_write_domain(obj);
  2148. ret = i915_gem_object_wait_rendering(obj);
  2149. if (ret != 0)
  2150. return ret;
  2151. }
  2152. i915_gem_clear_fence_reg (obj);
  2153. return 0;
  2154. }
  2155. /**
  2156. * Finds free space in the GTT aperture and binds the object there.
  2157. */
  2158. static int
  2159. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  2160. {
  2161. struct drm_device *dev = obj->dev;
  2162. drm_i915_private_t *dev_priv = dev->dev_private;
  2163. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2164. struct drm_mm_node *free_space;
  2165. bool retry_alloc = false;
  2166. int ret;
  2167. if (dev_priv->mm.suspended)
  2168. return -EBUSY;
  2169. if (obj_priv->madv != I915_MADV_WILLNEED) {
  2170. DRM_ERROR("Attempting to bind a purgeable object\n");
  2171. return -EINVAL;
  2172. }
  2173. if (alignment == 0)
  2174. alignment = i915_gem_get_gtt_alignment(obj);
  2175. if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
  2176. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2177. return -EINVAL;
  2178. }
  2179. search_free:
  2180. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2181. obj->size, alignment, 0);
  2182. if (free_space != NULL) {
  2183. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  2184. alignment);
  2185. if (obj_priv->gtt_space != NULL) {
  2186. obj_priv->gtt_space->private = obj;
  2187. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  2188. }
  2189. }
  2190. if (obj_priv->gtt_space == NULL) {
  2191. /* If the gtt is empty and we're still having trouble
  2192. * fitting our object in, we're out of memory.
  2193. */
  2194. #if WATCH_LRU
  2195. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  2196. #endif
  2197. ret = i915_gem_evict_something(dev, obj->size);
  2198. if (ret)
  2199. return ret;
  2200. goto search_free;
  2201. }
  2202. #if WATCH_BUF
  2203. DRM_INFO("Binding object of size %zd at 0x%08x\n",
  2204. obj->size, obj_priv->gtt_offset);
  2205. #endif
  2206. if (retry_alloc) {
  2207. i915_gem_object_set_page_gfp_mask (obj,
  2208. i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
  2209. }
  2210. ret = i915_gem_object_get_pages(obj);
  2211. if (retry_alloc) {
  2212. i915_gem_object_set_page_gfp_mask (obj,
  2213. i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
  2214. }
  2215. if (ret) {
  2216. drm_mm_put_block(obj_priv->gtt_space);
  2217. obj_priv->gtt_space = NULL;
  2218. if (ret == -ENOMEM) {
  2219. /* first try to clear up some space from the GTT */
  2220. ret = i915_gem_evict_something(dev, obj->size);
  2221. if (ret) {
  2222. /* now try to shrink everyone else */
  2223. if (! retry_alloc) {
  2224. retry_alloc = true;
  2225. goto search_free;
  2226. }
  2227. return ret;
  2228. }
  2229. goto search_free;
  2230. }
  2231. return ret;
  2232. }
  2233. /* Create an AGP memory structure pointing at our pages, and bind it
  2234. * into the GTT.
  2235. */
  2236. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  2237. obj_priv->pages,
  2238. obj->size >> PAGE_SHIFT,
  2239. obj_priv->gtt_offset,
  2240. obj_priv->agp_type);
  2241. if (obj_priv->agp_mem == NULL) {
  2242. i915_gem_object_put_pages(obj);
  2243. drm_mm_put_block(obj_priv->gtt_space);
  2244. obj_priv->gtt_space = NULL;
  2245. ret = i915_gem_evict_something(dev, obj->size);
  2246. if (ret)
  2247. return ret;
  2248. goto search_free;
  2249. }
  2250. atomic_inc(&dev->gtt_count);
  2251. atomic_add(obj->size, &dev->gtt_memory);
  2252. /* Assert that the object is not currently in any GPU domain. As it
  2253. * wasn't in the GTT, there shouldn't be any way it could have been in
  2254. * a GPU cache
  2255. */
  2256. BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2257. BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2258. trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  2259. return 0;
  2260. }
  2261. void
  2262. i915_gem_clflush_object(struct drm_gem_object *obj)
  2263. {
  2264. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2265. /* If we don't have a page list set up, then we're not pinned
  2266. * to GPU, and we can ignore the cache flush because it'll happen
  2267. * again at bind time.
  2268. */
  2269. if (obj_priv->pages == NULL)
  2270. return;
  2271. trace_i915_gem_object_clflush(obj);
  2272. drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  2273. }
  2274. /** Flushes any GPU write domain for the object if it's dirty. */
  2275. static void
  2276. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
  2277. {
  2278. struct drm_device *dev = obj->dev;
  2279. uint32_t seqno;
  2280. uint32_t old_write_domain;
  2281. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2282. return;
  2283. /* Queue the GPU write cache flushing we need. */
  2284. old_write_domain = obj->write_domain;
  2285. i915_gem_flush(dev, 0, obj->write_domain);
  2286. seqno = i915_add_request(dev, NULL, obj->write_domain);
  2287. obj->write_domain = 0;
  2288. i915_gem_object_move_to_active(obj, seqno);
  2289. trace_i915_gem_object_change_domain(obj,
  2290. obj->read_domains,
  2291. old_write_domain);
  2292. }
  2293. /** Flushes the GTT write domain for the object if it's dirty. */
  2294. static void
  2295. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  2296. {
  2297. uint32_t old_write_domain;
  2298. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  2299. return;
  2300. /* No actual flushing is required for the GTT write domain. Writes
  2301. * to it immediately go to main memory as far as we know, so there's
  2302. * no chipset flush. It also doesn't land in render cache.
  2303. */
  2304. old_write_domain = obj->write_domain;
  2305. obj->write_domain = 0;
  2306. trace_i915_gem_object_change_domain(obj,
  2307. obj->read_domains,
  2308. old_write_domain);
  2309. }
  2310. /** Flushes the CPU write domain for the object if it's dirty. */
  2311. static void
  2312. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  2313. {
  2314. struct drm_device *dev = obj->dev;
  2315. uint32_t old_write_domain;
  2316. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  2317. return;
  2318. i915_gem_clflush_object(obj);
  2319. drm_agp_chipset_flush(dev);
  2320. old_write_domain = obj->write_domain;
  2321. obj->write_domain = 0;
  2322. trace_i915_gem_object_change_domain(obj,
  2323. obj->read_domains,
  2324. old_write_domain);
  2325. }
  2326. /**
  2327. * Moves a single object to the GTT read, and possibly write domain.
  2328. *
  2329. * This function returns when the move is complete, including waiting on
  2330. * flushes to occur.
  2331. */
  2332. int
  2333. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  2334. {
  2335. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2336. uint32_t old_write_domain, old_read_domains;
  2337. int ret;
  2338. /* Not valid to be called on unbound objects. */
  2339. if (obj_priv->gtt_space == NULL)
  2340. return -EINVAL;
  2341. i915_gem_object_flush_gpu_write_domain(obj);
  2342. /* Wait on any GPU rendering and flushing to occur. */
  2343. ret = i915_gem_object_wait_rendering(obj);
  2344. if (ret != 0)
  2345. return ret;
  2346. old_write_domain = obj->write_domain;
  2347. old_read_domains = obj->read_domains;
  2348. /* If we're writing through the GTT domain, then CPU and GPU caches
  2349. * will need to be invalidated at next use.
  2350. */
  2351. if (write)
  2352. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  2353. i915_gem_object_flush_cpu_write_domain(obj);
  2354. /* It should now be out of any other write domains, and we can update
  2355. * the domain values for our changes.
  2356. */
  2357. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2358. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2359. if (write) {
  2360. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2361. obj_priv->dirty = 1;
  2362. }
  2363. trace_i915_gem_object_change_domain(obj,
  2364. old_read_domains,
  2365. old_write_domain);
  2366. return 0;
  2367. }
  2368. /**
  2369. * Moves a single object to the CPU read, and possibly write domain.
  2370. *
  2371. * This function returns when the move is complete, including waiting on
  2372. * flushes to occur.
  2373. */
  2374. static int
  2375. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  2376. {
  2377. uint32_t old_write_domain, old_read_domains;
  2378. int ret;
  2379. i915_gem_object_flush_gpu_write_domain(obj);
  2380. /* Wait on any GPU rendering and flushing to occur. */
  2381. ret = i915_gem_object_wait_rendering(obj);
  2382. if (ret != 0)
  2383. return ret;
  2384. i915_gem_object_flush_gtt_write_domain(obj);
  2385. /* If we have a partially-valid cache of the object in the CPU,
  2386. * finish invalidating it and free the per-page flags.
  2387. */
  2388. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2389. old_write_domain = obj->write_domain;
  2390. old_read_domains = obj->read_domains;
  2391. /* Flush the CPU cache if it's still invalid. */
  2392. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2393. i915_gem_clflush_object(obj);
  2394. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2395. }
  2396. /* It should now be out of any other write domains, and we can update
  2397. * the domain values for our changes.
  2398. */
  2399. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2400. /* If we're writing through the CPU, then the GPU read domains will
  2401. * need to be invalidated at next use.
  2402. */
  2403. if (write) {
  2404. obj->read_domains &= I915_GEM_DOMAIN_CPU;
  2405. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2406. }
  2407. trace_i915_gem_object_change_domain(obj,
  2408. old_read_domains,
  2409. old_write_domain);
  2410. return 0;
  2411. }
  2412. /*
  2413. * Set the next domain for the specified object. This
  2414. * may not actually perform the necessary flushing/invaliding though,
  2415. * as that may want to be batched with other set_domain operations
  2416. *
  2417. * This is (we hope) the only really tricky part of gem. The goal
  2418. * is fairly simple -- track which caches hold bits of the object
  2419. * and make sure they remain coherent. A few concrete examples may
  2420. * help to explain how it works. For shorthand, we use the notation
  2421. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2422. * a pair of read and write domain masks.
  2423. *
  2424. * Case 1: the batch buffer
  2425. *
  2426. * 1. Allocated
  2427. * 2. Written by CPU
  2428. * 3. Mapped to GTT
  2429. * 4. Read by GPU
  2430. * 5. Unmapped from GTT
  2431. * 6. Freed
  2432. *
  2433. * Let's take these a step at a time
  2434. *
  2435. * 1. Allocated
  2436. * Pages allocated from the kernel may still have
  2437. * cache contents, so we set them to (CPU, CPU) always.
  2438. * 2. Written by CPU (using pwrite)
  2439. * The pwrite function calls set_domain (CPU, CPU) and
  2440. * this function does nothing (as nothing changes)
  2441. * 3. Mapped by GTT
  2442. * This function asserts that the object is not
  2443. * currently in any GPU-based read or write domains
  2444. * 4. Read by GPU
  2445. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2446. * As write_domain is zero, this function adds in the
  2447. * current read domains (CPU+COMMAND, 0).
  2448. * flush_domains is set to CPU.
  2449. * invalidate_domains is set to COMMAND
  2450. * clflush is run to get data out of the CPU caches
  2451. * then i915_dev_set_domain calls i915_gem_flush to
  2452. * emit an MI_FLUSH and drm_agp_chipset_flush
  2453. * 5. Unmapped from GTT
  2454. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2455. * flush_domains and invalidate_domains end up both zero
  2456. * so no flushing/invalidating happens
  2457. * 6. Freed
  2458. * yay, done
  2459. *
  2460. * Case 2: The shared render buffer
  2461. *
  2462. * 1. Allocated
  2463. * 2. Mapped to GTT
  2464. * 3. Read/written by GPU
  2465. * 4. set_domain to (CPU,CPU)
  2466. * 5. Read/written by CPU
  2467. * 6. Read/written by GPU
  2468. *
  2469. * 1. Allocated
  2470. * Same as last example, (CPU, CPU)
  2471. * 2. Mapped to GTT
  2472. * Nothing changes (assertions find that it is not in the GPU)
  2473. * 3. Read/written by GPU
  2474. * execbuffer calls set_domain (RENDER, RENDER)
  2475. * flush_domains gets CPU
  2476. * invalidate_domains gets GPU
  2477. * clflush (obj)
  2478. * MI_FLUSH and drm_agp_chipset_flush
  2479. * 4. set_domain (CPU, CPU)
  2480. * flush_domains gets GPU
  2481. * invalidate_domains gets CPU
  2482. * wait_rendering (obj) to make sure all drawing is complete.
  2483. * This will include an MI_FLUSH to get the data from GPU
  2484. * to memory
  2485. * clflush (obj) to invalidate the CPU cache
  2486. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2487. * 5. Read/written by CPU
  2488. * cache lines are loaded and dirtied
  2489. * 6. Read written by GPU
  2490. * Same as last GPU access
  2491. *
  2492. * Case 3: The constant buffer
  2493. *
  2494. * 1. Allocated
  2495. * 2. Written by CPU
  2496. * 3. Read by GPU
  2497. * 4. Updated (written) by CPU again
  2498. * 5. Read by GPU
  2499. *
  2500. * 1. Allocated
  2501. * (CPU, CPU)
  2502. * 2. Written by CPU
  2503. * (CPU, CPU)
  2504. * 3. Read by GPU
  2505. * (CPU+RENDER, 0)
  2506. * flush_domains = CPU
  2507. * invalidate_domains = RENDER
  2508. * clflush (obj)
  2509. * MI_FLUSH
  2510. * drm_agp_chipset_flush
  2511. * 4. Updated (written) by CPU again
  2512. * (CPU, CPU)
  2513. * flush_domains = 0 (no previous write domain)
  2514. * invalidate_domains = 0 (no new read domains)
  2515. * 5. Read by GPU
  2516. * (CPU+RENDER, 0)
  2517. * flush_domains = CPU
  2518. * invalidate_domains = RENDER
  2519. * clflush (obj)
  2520. * MI_FLUSH
  2521. * drm_agp_chipset_flush
  2522. */
  2523. static void
  2524. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
  2525. {
  2526. struct drm_device *dev = obj->dev;
  2527. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2528. uint32_t invalidate_domains = 0;
  2529. uint32_t flush_domains = 0;
  2530. uint32_t old_read_domains;
  2531. BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
  2532. BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
  2533. intel_mark_busy(dev, obj);
  2534. #if WATCH_BUF
  2535. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  2536. __func__, obj,
  2537. obj->read_domains, obj->pending_read_domains,
  2538. obj->write_domain, obj->pending_write_domain);
  2539. #endif
  2540. /*
  2541. * If the object isn't moving to a new write domain,
  2542. * let the object stay in multiple read domains
  2543. */
  2544. if (obj->pending_write_domain == 0)
  2545. obj->pending_read_domains |= obj->read_domains;
  2546. else
  2547. obj_priv->dirty = 1;
  2548. /*
  2549. * Flush the current write domain if
  2550. * the new read domains don't match. Invalidate
  2551. * any read domains which differ from the old
  2552. * write domain
  2553. */
  2554. if (obj->write_domain &&
  2555. obj->write_domain != obj->pending_read_domains) {
  2556. flush_domains |= obj->write_domain;
  2557. invalidate_domains |=
  2558. obj->pending_read_domains & ~obj->write_domain;
  2559. }
  2560. /*
  2561. * Invalidate any read caches which may have
  2562. * stale data. That is, any new read domains.
  2563. */
  2564. invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
  2565. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  2566. #if WATCH_BUF
  2567. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  2568. __func__, flush_domains, invalidate_domains);
  2569. #endif
  2570. i915_gem_clflush_object(obj);
  2571. }
  2572. old_read_domains = obj->read_domains;
  2573. /* The actual obj->write_domain will be updated with
  2574. * pending_write_domain after we emit the accumulated flush for all
  2575. * of our domain changes in execbuffers (which clears objects'
  2576. * write_domains). So if we have a current write domain that we
  2577. * aren't changing, set pending_write_domain to that.
  2578. */
  2579. if (flush_domains == 0 && obj->pending_write_domain == 0)
  2580. obj->pending_write_domain = obj->write_domain;
  2581. obj->read_domains = obj->pending_read_domains;
  2582. dev->invalidate_domains |= invalidate_domains;
  2583. dev->flush_domains |= flush_domains;
  2584. #if WATCH_BUF
  2585. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  2586. __func__,
  2587. obj->read_domains, obj->write_domain,
  2588. dev->invalidate_domains, dev->flush_domains);
  2589. #endif
  2590. trace_i915_gem_object_change_domain(obj,
  2591. old_read_domains,
  2592. obj->write_domain);
  2593. }
  2594. /**
  2595. * Moves the object from a partially CPU read to a full one.
  2596. *
  2597. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2598. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2599. */
  2600. static void
  2601. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  2602. {
  2603. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2604. if (!obj_priv->page_cpu_valid)
  2605. return;
  2606. /* If we're partially in the CPU read domain, finish moving it in.
  2607. */
  2608. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  2609. int i;
  2610. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  2611. if (obj_priv->page_cpu_valid[i])
  2612. continue;
  2613. drm_clflush_pages(obj_priv->pages + i, 1);
  2614. }
  2615. }
  2616. /* Free the page_cpu_valid mappings which are now stale, whether
  2617. * or not we've got I915_GEM_DOMAIN_CPU.
  2618. */
  2619. kfree(obj_priv->page_cpu_valid);
  2620. obj_priv->page_cpu_valid = NULL;
  2621. }
  2622. /**
  2623. * Set the CPU read domain on a range of the object.
  2624. *
  2625. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2626. * not entirely valid. The page_cpu_valid member of the object flags which
  2627. * pages have been flushed, and will be respected by
  2628. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2629. * of the whole object.
  2630. *
  2631. * This function returns when the move is complete, including waiting on
  2632. * flushes to occur.
  2633. */
  2634. static int
  2635. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  2636. uint64_t offset, uint64_t size)
  2637. {
  2638. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2639. uint32_t old_read_domains;
  2640. int i, ret;
  2641. if (offset == 0 && size == obj->size)
  2642. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2643. i915_gem_object_flush_gpu_write_domain(obj);
  2644. /* Wait on any GPU rendering and flushing to occur. */
  2645. ret = i915_gem_object_wait_rendering(obj);
  2646. if (ret != 0)
  2647. return ret;
  2648. i915_gem_object_flush_gtt_write_domain(obj);
  2649. /* If we're already fully in the CPU read domain, we're done. */
  2650. if (obj_priv->page_cpu_valid == NULL &&
  2651. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2652. return 0;
  2653. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2654. * newly adding I915_GEM_DOMAIN_CPU
  2655. */
  2656. if (obj_priv->page_cpu_valid == NULL) {
  2657. obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
  2658. GFP_KERNEL);
  2659. if (obj_priv->page_cpu_valid == NULL)
  2660. return -ENOMEM;
  2661. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2662. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  2663. /* Flush the cache on any pages that are still invalid from the CPU's
  2664. * perspective.
  2665. */
  2666. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2667. i++) {
  2668. if (obj_priv->page_cpu_valid[i])
  2669. continue;
  2670. drm_clflush_pages(obj_priv->pages + i, 1);
  2671. obj_priv->page_cpu_valid[i] = 1;
  2672. }
  2673. /* It should now be out of any other write domains, and we can update
  2674. * the domain values for our changes.
  2675. */
  2676. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2677. old_read_domains = obj->read_domains;
  2678. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2679. trace_i915_gem_object_change_domain(obj,
  2680. old_read_domains,
  2681. obj->write_domain);
  2682. return 0;
  2683. }
  2684. /**
  2685. * Pin an object to the GTT and evaluate the relocations landing in it.
  2686. */
  2687. static int
  2688. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  2689. struct drm_file *file_priv,
  2690. struct drm_i915_gem_exec_object *entry,
  2691. struct drm_i915_gem_relocation_entry *relocs)
  2692. {
  2693. struct drm_device *dev = obj->dev;
  2694. drm_i915_private_t *dev_priv = dev->dev_private;
  2695. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2696. int i, ret;
  2697. void __iomem *reloc_page;
  2698. /* Choose the GTT offset for our buffer and put it there. */
  2699. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  2700. if (ret)
  2701. return ret;
  2702. entry->offset = obj_priv->gtt_offset;
  2703. /* Apply the relocations, using the GTT aperture to avoid cache
  2704. * flushing requirements.
  2705. */
  2706. for (i = 0; i < entry->relocation_count; i++) {
  2707. struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
  2708. struct drm_gem_object *target_obj;
  2709. struct drm_i915_gem_object *target_obj_priv;
  2710. uint32_t reloc_val, reloc_offset;
  2711. uint32_t __iomem *reloc_entry;
  2712. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  2713. reloc->target_handle);
  2714. if (target_obj == NULL) {
  2715. i915_gem_object_unpin(obj);
  2716. return -EBADF;
  2717. }
  2718. target_obj_priv = target_obj->driver_private;
  2719. #if WATCH_RELOC
  2720. DRM_INFO("%s: obj %p offset %08x target %d "
  2721. "read %08x write %08x gtt %08x "
  2722. "presumed %08x delta %08x\n",
  2723. __func__,
  2724. obj,
  2725. (int) reloc->offset,
  2726. (int) reloc->target_handle,
  2727. (int) reloc->read_domains,
  2728. (int) reloc->write_domain,
  2729. (int) target_obj_priv->gtt_offset,
  2730. (int) reloc->presumed_offset,
  2731. reloc->delta);
  2732. #endif
  2733. /* The target buffer should have appeared before us in the
  2734. * exec_object list, so it should have a GTT space bound by now.
  2735. */
  2736. if (target_obj_priv->gtt_space == NULL) {
  2737. DRM_ERROR("No GTT space found for object %d\n",
  2738. reloc->target_handle);
  2739. drm_gem_object_unreference(target_obj);
  2740. i915_gem_object_unpin(obj);
  2741. return -EINVAL;
  2742. }
  2743. /* Validate that the target is in a valid r/w GPU domain */
  2744. if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
  2745. reloc->read_domains & I915_GEM_DOMAIN_CPU) {
  2746. DRM_ERROR("reloc with read/write CPU domains: "
  2747. "obj %p target %d offset %d "
  2748. "read %08x write %08x",
  2749. obj, reloc->target_handle,
  2750. (int) reloc->offset,
  2751. reloc->read_domains,
  2752. reloc->write_domain);
  2753. drm_gem_object_unreference(target_obj);
  2754. i915_gem_object_unpin(obj);
  2755. return -EINVAL;
  2756. }
  2757. if (reloc->write_domain && target_obj->pending_write_domain &&
  2758. reloc->write_domain != target_obj->pending_write_domain) {
  2759. DRM_ERROR("Write domain conflict: "
  2760. "obj %p target %d offset %d "
  2761. "new %08x old %08x\n",
  2762. obj, reloc->target_handle,
  2763. (int) reloc->offset,
  2764. reloc->write_domain,
  2765. target_obj->pending_write_domain);
  2766. drm_gem_object_unreference(target_obj);
  2767. i915_gem_object_unpin(obj);
  2768. return -EINVAL;
  2769. }
  2770. target_obj->pending_read_domains |= reloc->read_domains;
  2771. target_obj->pending_write_domain |= reloc->write_domain;
  2772. /* If the relocation already has the right value in it, no
  2773. * more work needs to be done.
  2774. */
  2775. if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
  2776. drm_gem_object_unreference(target_obj);
  2777. continue;
  2778. }
  2779. /* Check that the relocation address is valid... */
  2780. if (reloc->offset > obj->size - 4) {
  2781. DRM_ERROR("Relocation beyond object bounds: "
  2782. "obj %p target %d offset %d size %d.\n",
  2783. obj, reloc->target_handle,
  2784. (int) reloc->offset, (int) obj->size);
  2785. drm_gem_object_unreference(target_obj);
  2786. i915_gem_object_unpin(obj);
  2787. return -EINVAL;
  2788. }
  2789. if (reloc->offset & 3) {
  2790. DRM_ERROR("Relocation not 4-byte aligned: "
  2791. "obj %p target %d offset %d.\n",
  2792. obj, reloc->target_handle,
  2793. (int) reloc->offset);
  2794. drm_gem_object_unreference(target_obj);
  2795. i915_gem_object_unpin(obj);
  2796. return -EINVAL;
  2797. }
  2798. /* and points to somewhere within the target object. */
  2799. if (reloc->delta >= target_obj->size) {
  2800. DRM_ERROR("Relocation beyond target object bounds: "
  2801. "obj %p target %d delta %d size %d.\n",
  2802. obj, reloc->target_handle,
  2803. (int) reloc->delta, (int) target_obj->size);
  2804. drm_gem_object_unreference(target_obj);
  2805. i915_gem_object_unpin(obj);
  2806. return -EINVAL;
  2807. }
  2808. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  2809. if (ret != 0) {
  2810. drm_gem_object_unreference(target_obj);
  2811. i915_gem_object_unpin(obj);
  2812. return -EINVAL;
  2813. }
  2814. /* Map the page containing the relocation we're going to
  2815. * perform.
  2816. */
  2817. reloc_offset = obj_priv->gtt_offset + reloc->offset;
  2818. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2819. (reloc_offset &
  2820. ~(PAGE_SIZE - 1)));
  2821. reloc_entry = (uint32_t __iomem *)(reloc_page +
  2822. (reloc_offset & (PAGE_SIZE - 1)));
  2823. reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  2824. #if WATCH_BUF
  2825. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  2826. obj, (unsigned int) reloc->offset,
  2827. readl(reloc_entry), reloc_val);
  2828. #endif
  2829. writel(reloc_val, reloc_entry);
  2830. io_mapping_unmap_atomic(reloc_page);
  2831. /* The updated presumed offset for this entry will be
  2832. * copied back out to the user.
  2833. */
  2834. reloc->presumed_offset = target_obj_priv->gtt_offset;
  2835. drm_gem_object_unreference(target_obj);
  2836. }
  2837. #if WATCH_BUF
  2838. if (0)
  2839. i915_gem_dump_object(obj, 128, __func__, ~0);
  2840. #endif
  2841. return 0;
  2842. }
  2843. /** Dispatch a batchbuffer to the ring
  2844. */
  2845. static int
  2846. i915_dispatch_gem_execbuffer(struct drm_device *dev,
  2847. struct drm_i915_gem_execbuffer *exec,
  2848. struct drm_clip_rect *cliprects,
  2849. uint64_t exec_offset)
  2850. {
  2851. drm_i915_private_t *dev_priv = dev->dev_private;
  2852. int nbox = exec->num_cliprects;
  2853. int i = 0, count;
  2854. uint32_t exec_start, exec_len;
  2855. RING_LOCALS;
  2856. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2857. exec_len = (uint32_t) exec->batch_len;
  2858. trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
  2859. count = nbox ? nbox : 1;
  2860. for (i = 0; i < count; i++) {
  2861. if (i < nbox) {
  2862. int ret = i915_emit_box(dev, cliprects, i,
  2863. exec->DR1, exec->DR4);
  2864. if (ret)
  2865. return ret;
  2866. }
  2867. if (IS_I830(dev) || IS_845G(dev)) {
  2868. BEGIN_LP_RING(4);
  2869. OUT_RING(MI_BATCH_BUFFER);
  2870. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2871. OUT_RING(exec_start + exec_len - 4);
  2872. OUT_RING(0);
  2873. ADVANCE_LP_RING();
  2874. } else {
  2875. BEGIN_LP_RING(2);
  2876. if (IS_I965G(dev)) {
  2877. OUT_RING(MI_BATCH_BUFFER_START |
  2878. (2 << 6) |
  2879. MI_BATCH_NON_SECURE_I965);
  2880. OUT_RING(exec_start);
  2881. } else {
  2882. OUT_RING(MI_BATCH_BUFFER_START |
  2883. (2 << 6));
  2884. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2885. }
  2886. ADVANCE_LP_RING();
  2887. }
  2888. }
  2889. /* XXX breadcrumb */
  2890. return 0;
  2891. }
  2892. /* Throttle our rendering by waiting until the ring has completed our requests
  2893. * emitted over 20 msec ago.
  2894. *
  2895. * Note that if we were to use the current jiffies each time around the loop,
  2896. * we wouldn't escape the function with any frames outstanding if the time to
  2897. * render a frame was over 20ms.
  2898. *
  2899. * This should get us reasonable parallelism between CPU and GPU but also
  2900. * relatively low latency when blocking on a particular request to finish.
  2901. */
  2902. static int
  2903. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  2904. {
  2905. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  2906. int ret = 0;
  2907. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2908. mutex_lock(&dev->struct_mutex);
  2909. while (!list_empty(&i915_file_priv->mm.request_list)) {
  2910. struct drm_i915_gem_request *request;
  2911. request = list_first_entry(&i915_file_priv->mm.request_list,
  2912. struct drm_i915_gem_request,
  2913. client_list);
  2914. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2915. break;
  2916. ret = i915_wait_request(dev, request->seqno);
  2917. if (ret != 0)
  2918. break;
  2919. }
  2920. mutex_unlock(&dev->struct_mutex);
  2921. return ret;
  2922. }
  2923. static int
  2924. i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
  2925. uint32_t buffer_count,
  2926. struct drm_i915_gem_relocation_entry **relocs)
  2927. {
  2928. uint32_t reloc_count = 0, reloc_index = 0, i;
  2929. int ret;
  2930. *relocs = NULL;
  2931. for (i = 0; i < buffer_count; i++) {
  2932. if (reloc_count + exec_list[i].relocation_count < reloc_count)
  2933. return -EINVAL;
  2934. reloc_count += exec_list[i].relocation_count;
  2935. }
  2936. *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
  2937. if (*relocs == NULL)
  2938. return -ENOMEM;
  2939. for (i = 0; i < buffer_count; i++) {
  2940. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2941. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2942. ret = copy_from_user(&(*relocs)[reloc_index],
  2943. user_relocs,
  2944. exec_list[i].relocation_count *
  2945. sizeof(**relocs));
  2946. if (ret != 0) {
  2947. drm_free_large(*relocs);
  2948. *relocs = NULL;
  2949. return -EFAULT;
  2950. }
  2951. reloc_index += exec_list[i].relocation_count;
  2952. }
  2953. return 0;
  2954. }
  2955. static int
  2956. i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
  2957. uint32_t buffer_count,
  2958. struct drm_i915_gem_relocation_entry *relocs)
  2959. {
  2960. uint32_t reloc_count = 0, i;
  2961. int ret = 0;
  2962. for (i = 0; i < buffer_count; i++) {
  2963. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2964. int unwritten;
  2965. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2966. unwritten = copy_to_user(user_relocs,
  2967. &relocs[reloc_count],
  2968. exec_list[i].relocation_count *
  2969. sizeof(*relocs));
  2970. if (unwritten) {
  2971. ret = -EFAULT;
  2972. goto err;
  2973. }
  2974. reloc_count += exec_list[i].relocation_count;
  2975. }
  2976. err:
  2977. drm_free_large(relocs);
  2978. return ret;
  2979. }
  2980. static int
  2981. i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
  2982. uint64_t exec_offset)
  2983. {
  2984. uint32_t exec_start, exec_len;
  2985. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2986. exec_len = (uint32_t) exec->batch_len;
  2987. if ((exec_start | exec_len) & 0x7)
  2988. return -EINVAL;
  2989. if (!exec_start)
  2990. return -EINVAL;
  2991. return 0;
  2992. }
  2993. int
  2994. i915_gem_execbuffer(struct drm_device *dev, void *data,
  2995. struct drm_file *file_priv)
  2996. {
  2997. drm_i915_private_t *dev_priv = dev->dev_private;
  2998. struct drm_i915_gem_execbuffer *args = data;
  2999. struct drm_i915_gem_exec_object *exec_list = NULL;
  3000. struct drm_gem_object **object_list = NULL;
  3001. struct drm_gem_object *batch_obj;
  3002. struct drm_i915_gem_object *obj_priv;
  3003. struct drm_clip_rect *cliprects = NULL;
  3004. struct drm_i915_gem_relocation_entry *relocs;
  3005. int ret, ret2, i, pinned = 0;
  3006. uint64_t exec_offset;
  3007. uint32_t seqno, flush_domains, reloc_index;
  3008. int pin_tries;
  3009. #if WATCH_EXEC
  3010. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3011. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3012. #endif
  3013. if (args->buffer_count < 1) {
  3014. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3015. return -EINVAL;
  3016. }
  3017. /* Copy in the exec list from userland */
  3018. exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
  3019. object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
  3020. if (exec_list == NULL || object_list == NULL) {
  3021. DRM_ERROR("Failed to allocate exec or object list "
  3022. "for %d buffers\n",
  3023. args->buffer_count);
  3024. ret = -ENOMEM;
  3025. goto pre_mutex_err;
  3026. }
  3027. ret = copy_from_user(exec_list,
  3028. (struct drm_i915_relocation_entry __user *)
  3029. (uintptr_t) args->buffers_ptr,
  3030. sizeof(*exec_list) * args->buffer_count);
  3031. if (ret != 0) {
  3032. DRM_ERROR("copy %d exec entries failed %d\n",
  3033. args->buffer_count, ret);
  3034. goto pre_mutex_err;
  3035. }
  3036. if (args->num_cliprects != 0) {
  3037. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3038. GFP_KERNEL);
  3039. if (cliprects == NULL)
  3040. goto pre_mutex_err;
  3041. ret = copy_from_user(cliprects,
  3042. (struct drm_clip_rect __user *)
  3043. (uintptr_t) args->cliprects_ptr,
  3044. sizeof(*cliprects) * args->num_cliprects);
  3045. if (ret != 0) {
  3046. DRM_ERROR("copy %d cliprects failed: %d\n",
  3047. args->num_cliprects, ret);
  3048. goto pre_mutex_err;
  3049. }
  3050. }
  3051. ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
  3052. &relocs);
  3053. if (ret != 0)
  3054. goto pre_mutex_err;
  3055. mutex_lock(&dev->struct_mutex);
  3056. i915_verify_inactive(dev, __FILE__, __LINE__);
  3057. if (atomic_read(&dev_priv->mm.wedged)) {
  3058. DRM_ERROR("Execbuf while wedged\n");
  3059. mutex_unlock(&dev->struct_mutex);
  3060. ret = -EIO;
  3061. goto pre_mutex_err;
  3062. }
  3063. if (dev_priv->mm.suspended) {
  3064. DRM_ERROR("Execbuf while VT-switched.\n");
  3065. mutex_unlock(&dev->struct_mutex);
  3066. ret = -EBUSY;
  3067. goto pre_mutex_err;
  3068. }
  3069. /* Look up object handles */
  3070. for (i = 0; i < args->buffer_count; i++) {
  3071. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  3072. exec_list[i].handle);
  3073. if (object_list[i] == NULL) {
  3074. DRM_ERROR("Invalid object handle %d at index %d\n",
  3075. exec_list[i].handle, i);
  3076. ret = -EBADF;
  3077. goto err;
  3078. }
  3079. obj_priv = object_list[i]->driver_private;
  3080. if (obj_priv->in_execbuffer) {
  3081. DRM_ERROR("Object %p appears more than once in object list\n",
  3082. object_list[i]);
  3083. ret = -EBADF;
  3084. goto err;
  3085. }
  3086. obj_priv->in_execbuffer = true;
  3087. }
  3088. /* Pin and relocate */
  3089. for (pin_tries = 0; ; pin_tries++) {
  3090. ret = 0;
  3091. reloc_index = 0;
  3092. for (i = 0; i < args->buffer_count; i++) {
  3093. object_list[i]->pending_read_domains = 0;
  3094. object_list[i]->pending_write_domain = 0;
  3095. ret = i915_gem_object_pin_and_relocate(object_list[i],
  3096. file_priv,
  3097. &exec_list[i],
  3098. &relocs[reloc_index]);
  3099. if (ret)
  3100. break;
  3101. pinned = i + 1;
  3102. reloc_index += exec_list[i].relocation_count;
  3103. }
  3104. /* success */
  3105. if (ret == 0)
  3106. break;
  3107. /* error other than GTT full, or we've already tried again */
  3108. if (ret != -ENOSPC || pin_tries >= 1) {
  3109. if (ret != -ERESTARTSYS) {
  3110. unsigned long long total_size = 0;
  3111. for (i = 0; i < args->buffer_count; i++)
  3112. total_size += object_list[i]->size;
  3113. DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
  3114. pinned+1, args->buffer_count,
  3115. total_size, ret);
  3116. DRM_ERROR("%d objects [%d pinned], "
  3117. "%d object bytes [%d pinned], "
  3118. "%d/%d gtt bytes\n",
  3119. atomic_read(&dev->object_count),
  3120. atomic_read(&dev->pin_count),
  3121. atomic_read(&dev->object_memory),
  3122. atomic_read(&dev->pin_memory),
  3123. atomic_read(&dev->gtt_memory),
  3124. dev->gtt_total);
  3125. }
  3126. goto err;
  3127. }
  3128. /* unpin all of our buffers */
  3129. for (i = 0; i < pinned; i++)
  3130. i915_gem_object_unpin(object_list[i]);
  3131. pinned = 0;
  3132. /* evict everyone we can from the aperture */
  3133. ret = i915_gem_evict_everything(dev);
  3134. if (ret && ret != -ENOSPC)
  3135. goto err;
  3136. }
  3137. /* Set the pending read domains for the batch buffer to COMMAND */
  3138. batch_obj = object_list[args->buffer_count-1];
  3139. if (batch_obj->pending_write_domain) {
  3140. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3141. ret = -EINVAL;
  3142. goto err;
  3143. }
  3144. batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3145. /* Sanity check the batch buffer, prior to moving objects */
  3146. exec_offset = exec_list[args->buffer_count - 1].offset;
  3147. ret = i915_gem_check_execbuffer (args, exec_offset);
  3148. if (ret != 0) {
  3149. DRM_ERROR("execbuf with invalid offset/length\n");
  3150. goto err;
  3151. }
  3152. i915_verify_inactive(dev, __FILE__, __LINE__);
  3153. /* Zero the global flush/invalidate flags. These
  3154. * will be modified as new domains are computed
  3155. * for each object
  3156. */
  3157. dev->invalidate_domains = 0;
  3158. dev->flush_domains = 0;
  3159. for (i = 0; i < args->buffer_count; i++) {
  3160. struct drm_gem_object *obj = object_list[i];
  3161. /* Compute new gpu domains and update invalidate/flush */
  3162. i915_gem_object_set_to_gpu_domain(obj);
  3163. }
  3164. i915_verify_inactive(dev, __FILE__, __LINE__);
  3165. if (dev->invalidate_domains | dev->flush_domains) {
  3166. #if WATCH_EXEC
  3167. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3168. __func__,
  3169. dev->invalidate_domains,
  3170. dev->flush_domains);
  3171. #endif
  3172. i915_gem_flush(dev,
  3173. dev->invalidate_domains,
  3174. dev->flush_domains);
  3175. if (dev->flush_domains)
  3176. (void)i915_add_request(dev, file_priv,
  3177. dev->flush_domains);
  3178. }
  3179. for (i = 0; i < args->buffer_count; i++) {
  3180. struct drm_gem_object *obj = object_list[i];
  3181. uint32_t old_write_domain = obj->write_domain;
  3182. obj->write_domain = obj->pending_write_domain;
  3183. trace_i915_gem_object_change_domain(obj,
  3184. obj->read_domains,
  3185. old_write_domain);
  3186. }
  3187. i915_verify_inactive(dev, __FILE__, __LINE__);
  3188. #if WATCH_COHERENCY
  3189. for (i = 0; i < args->buffer_count; i++) {
  3190. i915_gem_object_check_coherency(object_list[i],
  3191. exec_list[i].handle);
  3192. }
  3193. #endif
  3194. #if WATCH_EXEC
  3195. i915_gem_dump_object(batch_obj,
  3196. args->batch_len,
  3197. __func__,
  3198. ~0);
  3199. #endif
  3200. /* Exec the batchbuffer */
  3201. ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
  3202. if (ret) {
  3203. DRM_ERROR("dispatch failed %d\n", ret);
  3204. goto err;
  3205. }
  3206. /*
  3207. * Ensure that the commands in the batch buffer are
  3208. * finished before the interrupt fires
  3209. */
  3210. flush_domains = i915_retire_commands(dev);
  3211. i915_verify_inactive(dev, __FILE__, __LINE__);
  3212. /*
  3213. * Get a seqno representing the execution of the current buffer,
  3214. * which we can wait on. We would like to mitigate these interrupts,
  3215. * likely by only creating seqnos occasionally (so that we have
  3216. * *some* interrupts representing completion of buffers that we can
  3217. * wait on when trying to clear up gtt space).
  3218. */
  3219. seqno = i915_add_request(dev, file_priv, flush_domains);
  3220. BUG_ON(seqno == 0);
  3221. for (i = 0; i < args->buffer_count; i++) {
  3222. struct drm_gem_object *obj = object_list[i];
  3223. i915_gem_object_move_to_active(obj, seqno);
  3224. #if WATCH_LRU
  3225. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  3226. #endif
  3227. }
  3228. #if WATCH_LRU
  3229. i915_dump_lru(dev, __func__);
  3230. #endif
  3231. i915_verify_inactive(dev, __FILE__, __LINE__);
  3232. err:
  3233. for (i = 0; i < pinned; i++)
  3234. i915_gem_object_unpin(object_list[i]);
  3235. for (i = 0; i < args->buffer_count; i++) {
  3236. if (object_list[i]) {
  3237. obj_priv = object_list[i]->driver_private;
  3238. obj_priv->in_execbuffer = false;
  3239. }
  3240. drm_gem_object_unreference(object_list[i]);
  3241. }
  3242. mutex_unlock(&dev->struct_mutex);
  3243. if (!ret) {
  3244. /* Copy the new buffer offsets back to the user's exec list. */
  3245. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3246. (uintptr_t) args->buffers_ptr,
  3247. exec_list,
  3248. sizeof(*exec_list) * args->buffer_count);
  3249. if (ret) {
  3250. ret = -EFAULT;
  3251. DRM_ERROR("failed to copy %d exec entries "
  3252. "back to user (%d)\n",
  3253. args->buffer_count, ret);
  3254. }
  3255. }
  3256. /* Copy the updated relocations out regardless of current error
  3257. * state. Failure to update the relocs would mean that the next
  3258. * time userland calls execbuf, it would do so with presumed offset
  3259. * state that didn't match the actual object state.
  3260. */
  3261. ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
  3262. relocs);
  3263. if (ret2 != 0) {
  3264. DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
  3265. if (ret == 0)
  3266. ret = ret2;
  3267. }
  3268. pre_mutex_err:
  3269. drm_free_large(object_list);
  3270. drm_free_large(exec_list);
  3271. kfree(cliprects);
  3272. return ret;
  3273. }
  3274. int
  3275. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  3276. {
  3277. struct drm_device *dev = obj->dev;
  3278. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3279. int ret;
  3280. i915_verify_inactive(dev, __FILE__, __LINE__);
  3281. if (obj_priv->gtt_space == NULL) {
  3282. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  3283. if (ret)
  3284. return ret;
  3285. }
  3286. /*
  3287. * Pre-965 chips need a fence register set up in order to
  3288. * properly handle tiled surfaces.
  3289. */
  3290. if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
  3291. ret = i915_gem_object_get_fence_reg(obj);
  3292. if (ret != 0) {
  3293. if (ret != -EBUSY && ret != -ERESTARTSYS)
  3294. DRM_ERROR("Failure to install fence: %d\n",
  3295. ret);
  3296. return ret;
  3297. }
  3298. }
  3299. obj_priv->pin_count++;
  3300. /* If the object is not active and not pending a flush,
  3301. * remove it from the inactive list
  3302. */
  3303. if (obj_priv->pin_count == 1) {
  3304. atomic_inc(&dev->pin_count);
  3305. atomic_add(obj->size, &dev->pin_memory);
  3306. if (!obj_priv->active &&
  3307. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
  3308. !list_empty(&obj_priv->list))
  3309. list_del_init(&obj_priv->list);
  3310. }
  3311. i915_verify_inactive(dev, __FILE__, __LINE__);
  3312. return 0;
  3313. }
  3314. void
  3315. i915_gem_object_unpin(struct drm_gem_object *obj)
  3316. {
  3317. struct drm_device *dev = obj->dev;
  3318. drm_i915_private_t *dev_priv = dev->dev_private;
  3319. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3320. i915_verify_inactive(dev, __FILE__, __LINE__);
  3321. obj_priv->pin_count--;
  3322. BUG_ON(obj_priv->pin_count < 0);
  3323. BUG_ON(obj_priv->gtt_space == NULL);
  3324. /* If the object is no longer pinned, and is
  3325. * neither active nor being flushed, then stick it on
  3326. * the inactive list
  3327. */
  3328. if (obj_priv->pin_count == 0) {
  3329. if (!obj_priv->active &&
  3330. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  3331. list_move_tail(&obj_priv->list,
  3332. &dev_priv->mm.inactive_list);
  3333. atomic_dec(&dev->pin_count);
  3334. atomic_sub(obj->size, &dev->pin_memory);
  3335. }
  3336. i915_verify_inactive(dev, __FILE__, __LINE__);
  3337. }
  3338. int
  3339. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3340. struct drm_file *file_priv)
  3341. {
  3342. struct drm_i915_gem_pin *args = data;
  3343. struct drm_gem_object *obj;
  3344. struct drm_i915_gem_object *obj_priv;
  3345. int ret;
  3346. mutex_lock(&dev->struct_mutex);
  3347. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3348. if (obj == NULL) {
  3349. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  3350. args->handle);
  3351. mutex_unlock(&dev->struct_mutex);
  3352. return -EBADF;
  3353. }
  3354. obj_priv = obj->driver_private;
  3355. if (obj_priv->madv != I915_MADV_WILLNEED) {
  3356. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3357. drm_gem_object_unreference(obj);
  3358. mutex_unlock(&dev->struct_mutex);
  3359. return -EINVAL;
  3360. }
  3361. if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
  3362. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3363. args->handle);
  3364. drm_gem_object_unreference(obj);
  3365. mutex_unlock(&dev->struct_mutex);
  3366. return -EINVAL;
  3367. }
  3368. obj_priv->user_pin_count++;
  3369. obj_priv->pin_filp = file_priv;
  3370. if (obj_priv->user_pin_count == 1) {
  3371. ret = i915_gem_object_pin(obj, args->alignment);
  3372. if (ret != 0) {
  3373. drm_gem_object_unreference(obj);
  3374. mutex_unlock(&dev->struct_mutex);
  3375. return ret;
  3376. }
  3377. }
  3378. /* XXX - flush the CPU caches for pinned objects
  3379. * as the X server doesn't manage domains yet
  3380. */
  3381. i915_gem_object_flush_cpu_write_domain(obj);
  3382. args->offset = obj_priv->gtt_offset;
  3383. drm_gem_object_unreference(obj);
  3384. mutex_unlock(&dev->struct_mutex);
  3385. return 0;
  3386. }
  3387. int
  3388. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3389. struct drm_file *file_priv)
  3390. {
  3391. struct drm_i915_gem_pin *args = data;
  3392. struct drm_gem_object *obj;
  3393. struct drm_i915_gem_object *obj_priv;
  3394. mutex_lock(&dev->struct_mutex);
  3395. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3396. if (obj == NULL) {
  3397. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  3398. args->handle);
  3399. mutex_unlock(&dev->struct_mutex);
  3400. return -EBADF;
  3401. }
  3402. obj_priv = obj->driver_private;
  3403. if (obj_priv->pin_filp != file_priv) {
  3404. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3405. args->handle);
  3406. drm_gem_object_unreference(obj);
  3407. mutex_unlock(&dev->struct_mutex);
  3408. return -EINVAL;
  3409. }
  3410. obj_priv->user_pin_count--;
  3411. if (obj_priv->user_pin_count == 0) {
  3412. obj_priv->pin_filp = NULL;
  3413. i915_gem_object_unpin(obj);
  3414. }
  3415. drm_gem_object_unreference(obj);
  3416. mutex_unlock(&dev->struct_mutex);
  3417. return 0;
  3418. }
  3419. int
  3420. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3421. struct drm_file *file_priv)
  3422. {
  3423. struct drm_i915_gem_busy *args = data;
  3424. struct drm_gem_object *obj;
  3425. struct drm_i915_gem_object *obj_priv;
  3426. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3427. if (obj == NULL) {
  3428. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  3429. args->handle);
  3430. return -EBADF;
  3431. }
  3432. mutex_lock(&dev->struct_mutex);
  3433. /* Update the active list for the hardware's current position.
  3434. * Otherwise this only updates on a delayed timer or when irqs are
  3435. * actually unmasked, and our working set ends up being larger than
  3436. * required.
  3437. */
  3438. i915_gem_retire_requests(dev);
  3439. obj_priv = obj->driver_private;
  3440. /* Don't count being on the flushing list against the object being
  3441. * done. Otherwise, a buffer left on the flushing list but not getting
  3442. * flushed (because nobody's flushing that domain) won't ever return
  3443. * unbusy and get reused by libdrm's bo cache. The other expected
  3444. * consumer of this interface, OpenGL's occlusion queries, also specs
  3445. * that the objects get unbusy "eventually" without any interference.
  3446. */
  3447. args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
  3448. drm_gem_object_unreference(obj);
  3449. mutex_unlock(&dev->struct_mutex);
  3450. return 0;
  3451. }
  3452. int
  3453. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3454. struct drm_file *file_priv)
  3455. {
  3456. return i915_gem_ring_throttle(dev, file_priv);
  3457. }
  3458. int
  3459. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3460. struct drm_file *file_priv)
  3461. {
  3462. struct drm_i915_gem_madvise *args = data;
  3463. struct drm_gem_object *obj;
  3464. struct drm_i915_gem_object *obj_priv;
  3465. switch (args->madv) {
  3466. case I915_MADV_DONTNEED:
  3467. case I915_MADV_WILLNEED:
  3468. break;
  3469. default:
  3470. return -EINVAL;
  3471. }
  3472. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3473. if (obj == NULL) {
  3474. DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
  3475. args->handle);
  3476. return -EBADF;
  3477. }
  3478. mutex_lock(&dev->struct_mutex);
  3479. obj_priv = obj->driver_private;
  3480. if (obj_priv->pin_count) {
  3481. drm_gem_object_unreference(obj);
  3482. mutex_unlock(&dev->struct_mutex);
  3483. DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
  3484. return -EINVAL;
  3485. }
  3486. if (obj_priv->madv != __I915_MADV_PURGED)
  3487. obj_priv->madv = args->madv;
  3488. /* if the object is no longer bound, discard its backing storage */
  3489. if (i915_gem_object_is_purgeable(obj_priv) &&
  3490. obj_priv->gtt_space == NULL)
  3491. i915_gem_object_truncate(obj);
  3492. args->retained = obj_priv->madv != __I915_MADV_PURGED;
  3493. drm_gem_object_unreference(obj);
  3494. mutex_unlock(&dev->struct_mutex);
  3495. return 0;
  3496. }
  3497. int i915_gem_init_object(struct drm_gem_object *obj)
  3498. {
  3499. struct drm_i915_gem_object *obj_priv;
  3500. obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
  3501. if (obj_priv == NULL)
  3502. return -ENOMEM;
  3503. /*
  3504. * We've just allocated pages from the kernel,
  3505. * so they've just been written by the CPU with
  3506. * zeros. They'll need to be clflushed before we
  3507. * use them with the GPU.
  3508. */
  3509. obj->write_domain = I915_GEM_DOMAIN_CPU;
  3510. obj->read_domains = I915_GEM_DOMAIN_CPU;
  3511. obj_priv->agp_type = AGP_USER_MEMORY;
  3512. obj->driver_private = obj_priv;
  3513. obj_priv->obj = obj;
  3514. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  3515. INIT_LIST_HEAD(&obj_priv->list);
  3516. INIT_LIST_HEAD(&obj_priv->fence_list);
  3517. obj_priv->madv = I915_MADV_WILLNEED;
  3518. trace_i915_gem_object_create(obj);
  3519. return 0;
  3520. }
  3521. void i915_gem_free_object(struct drm_gem_object *obj)
  3522. {
  3523. struct drm_device *dev = obj->dev;
  3524. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3525. trace_i915_gem_object_destroy(obj);
  3526. while (obj_priv->pin_count > 0)
  3527. i915_gem_object_unpin(obj);
  3528. if (obj_priv->phys_obj)
  3529. i915_gem_detach_phys_object(dev, obj);
  3530. i915_gem_object_unbind(obj);
  3531. if (obj_priv->mmap_offset)
  3532. i915_gem_free_mmap_offset(obj);
  3533. kfree(obj_priv->page_cpu_valid);
  3534. kfree(obj_priv->bit_17);
  3535. kfree(obj->driver_private);
  3536. }
  3537. /** Unbinds all inactive objects. */
  3538. static int
  3539. i915_gem_evict_from_inactive_list(struct drm_device *dev)
  3540. {
  3541. drm_i915_private_t *dev_priv = dev->dev_private;
  3542. while (!list_empty(&dev_priv->mm.inactive_list)) {
  3543. struct drm_gem_object *obj;
  3544. int ret;
  3545. obj = list_first_entry(&dev_priv->mm.inactive_list,
  3546. struct drm_i915_gem_object,
  3547. list)->obj;
  3548. ret = i915_gem_object_unbind(obj);
  3549. if (ret != 0) {
  3550. DRM_ERROR("Error unbinding object: %d\n", ret);
  3551. return ret;
  3552. }
  3553. }
  3554. return 0;
  3555. }
  3556. int
  3557. i915_gem_idle(struct drm_device *dev)
  3558. {
  3559. drm_i915_private_t *dev_priv = dev->dev_private;
  3560. uint32_t seqno, cur_seqno, last_seqno;
  3561. int stuck, ret;
  3562. mutex_lock(&dev->struct_mutex);
  3563. if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
  3564. mutex_unlock(&dev->struct_mutex);
  3565. return 0;
  3566. }
  3567. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3568. * We need to replace this with a semaphore, or something.
  3569. */
  3570. dev_priv->mm.suspended = 1;
  3571. del_timer(&dev_priv->hangcheck_timer);
  3572. /* Cancel the retire work handler, wait for it to finish if running
  3573. */
  3574. mutex_unlock(&dev->struct_mutex);
  3575. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3576. mutex_lock(&dev->struct_mutex);
  3577. i915_kernel_lost_context(dev);
  3578. /* Flush the GPU along with all non-CPU write domains
  3579. */
  3580. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  3581. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  3582. if (seqno == 0) {
  3583. mutex_unlock(&dev->struct_mutex);
  3584. return -ENOMEM;
  3585. }
  3586. dev_priv->mm.waiting_gem_seqno = seqno;
  3587. last_seqno = 0;
  3588. stuck = 0;
  3589. for (;;) {
  3590. cur_seqno = i915_get_gem_seqno(dev);
  3591. if (i915_seqno_passed(cur_seqno, seqno))
  3592. break;
  3593. if (last_seqno == cur_seqno) {
  3594. if (stuck++ > 100) {
  3595. DRM_ERROR("hardware wedged\n");
  3596. atomic_set(&dev_priv->mm.wedged, 1);
  3597. DRM_WAKEUP(&dev_priv->irq_queue);
  3598. break;
  3599. }
  3600. }
  3601. msleep(10);
  3602. last_seqno = cur_seqno;
  3603. }
  3604. dev_priv->mm.waiting_gem_seqno = 0;
  3605. i915_gem_retire_requests(dev);
  3606. spin_lock(&dev_priv->mm.active_list_lock);
  3607. if (!atomic_read(&dev_priv->mm.wedged)) {
  3608. /* Active and flushing should now be empty as we've
  3609. * waited for a sequence higher than any pending execbuffer
  3610. */
  3611. WARN_ON(!list_empty(&dev_priv->mm.active_list));
  3612. WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
  3613. /* Request should now be empty as we've also waited
  3614. * for the last request in the list
  3615. */
  3616. WARN_ON(!list_empty(&dev_priv->mm.request_list));
  3617. }
  3618. /* Empty the active and flushing lists to inactive. If there's
  3619. * anything left at this point, it means that we're wedged and
  3620. * nothing good's going to happen by leaving them there. So strip
  3621. * the GPU domains and just stuff them onto inactive.
  3622. */
  3623. while (!list_empty(&dev_priv->mm.active_list)) {
  3624. struct drm_gem_object *obj;
  3625. uint32_t old_write_domain;
  3626. obj = list_first_entry(&dev_priv->mm.active_list,
  3627. struct drm_i915_gem_object,
  3628. list)->obj;
  3629. old_write_domain = obj->write_domain;
  3630. obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  3631. i915_gem_object_move_to_inactive(obj);
  3632. trace_i915_gem_object_change_domain(obj,
  3633. obj->read_domains,
  3634. old_write_domain);
  3635. }
  3636. spin_unlock(&dev_priv->mm.active_list_lock);
  3637. while (!list_empty(&dev_priv->mm.flushing_list)) {
  3638. struct drm_gem_object *obj;
  3639. uint32_t old_write_domain;
  3640. obj = list_first_entry(&dev_priv->mm.flushing_list,
  3641. struct drm_i915_gem_object,
  3642. list)->obj;
  3643. old_write_domain = obj->write_domain;
  3644. obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  3645. i915_gem_object_move_to_inactive(obj);
  3646. trace_i915_gem_object_change_domain(obj,
  3647. obj->read_domains,
  3648. old_write_domain);
  3649. }
  3650. /* Move all inactive buffers out of the GTT. */
  3651. ret = i915_gem_evict_from_inactive_list(dev);
  3652. WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
  3653. if (ret) {
  3654. mutex_unlock(&dev->struct_mutex);
  3655. return ret;
  3656. }
  3657. i915_gem_cleanup_ringbuffer(dev);
  3658. mutex_unlock(&dev->struct_mutex);
  3659. return 0;
  3660. }
  3661. static int
  3662. i915_gem_init_hws(struct drm_device *dev)
  3663. {
  3664. drm_i915_private_t *dev_priv = dev->dev_private;
  3665. struct drm_gem_object *obj;
  3666. struct drm_i915_gem_object *obj_priv;
  3667. int ret;
  3668. /* If we need a physical address for the status page, it's already
  3669. * initialized at driver load time.
  3670. */
  3671. if (!I915_NEED_GFX_HWS(dev))
  3672. return 0;
  3673. obj = drm_gem_object_alloc(dev, 4096);
  3674. if (obj == NULL) {
  3675. DRM_ERROR("Failed to allocate status page\n");
  3676. return -ENOMEM;
  3677. }
  3678. obj_priv = obj->driver_private;
  3679. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  3680. ret = i915_gem_object_pin(obj, 4096);
  3681. if (ret != 0) {
  3682. drm_gem_object_unreference(obj);
  3683. return ret;
  3684. }
  3685. dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  3686. dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
  3687. if (dev_priv->hw_status_page == NULL) {
  3688. DRM_ERROR("Failed to map status page.\n");
  3689. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3690. i915_gem_object_unpin(obj);
  3691. drm_gem_object_unreference(obj);
  3692. return -EINVAL;
  3693. }
  3694. dev_priv->hws_obj = obj;
  3695. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  3696. I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
  3697. I915_READ(HWS_PGA); /* posting read */
  3698. DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  3699. return 0;
  3700. }
  3701. static void
  3702. i915_gem_cleanup_hws(struct drm_device *dev)
  3703. {
  3704. drm_i915_private_t *dev_priv = dev->dev_private;
  3705. struct drm_gem_object *obj;
  3706. struct drm_i915_gem_object *obj_priv;
  3707. if (dev_priv->hws_obj == NULL)
  3708. return;
  3709. obj = dev_priv->hws_obj;
  3710. obj_priv = obj->driver_private;
  3711. kunmap(obj_priv->pages[0]);
  3712. i915_gem_object_unpin(obj);
  3713. drm_gem_object_unreference(obj);
  3714. dev_priv->hws_obj = NULL;
  3715. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3716. dev_priv->hw_status_page = NULL;
  3717. /* Write high address into HWS_PGA when disabling. */
  3718. I915_WRITE(HWS_PGA, 0x1ffff000);
  3719. }
  3720. int
  3721. i915_gem_init_ringbuffer(struct drm_device *dev)
  3722. {
  3723. drm_i915_private_t *dev_priv = dev->dev_private;
  3724. struct drm_gem_object *obj;
  3725. struct drm_i915_gem_object *obj_priv;
  3726. drm_i915_ring_buffer_t *ring = &dev_priv->ring;
  3727. int ret;
  3728. u32 head;
  3729. ret = i915_gem_init_hws(dev);
  3730. if (ret != 0)
  3731. return ret;
  3732. obj = drm_gem_object_alloc(dev, 128 * 1024);
  3733. if (obj == NULL) {
  3734. DRM_ERROR("Failed to allocate ringbuffer\n");
  3735. i915_gem_cleanup_hws(dev);
  3736. return -ENOMEM;
  3737. }
  3738. obj_priv = obj->driver_private;
  3739. ret = i915_gem_object_pin(obj, 4096);
  3740. if (ret != 0) {
  3741. drm_gem_object_unreference(obj);
  3742. i915_gem_cleanup_hws(dev);
  3743. return ret;
  3744. }
  3745. /* Set up the kernel mapping for the ring. */
  3746. ring->Size = obj->size;
  3747. ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
  3748. ring->map.size = obj->size;
  3749. ring->map.type = 0;
  3750. ring->map.flags = 0;
  3751. ring->map.mtrr = 0;
  3752. drm_core_ioremap_wc(&ring->map, dev);
  3753. if (ring->map.handle == NULL) {
  3754. DRM_ERROR("Failed to map ringbuffer.\n");
  3755. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3756. i915_gem_object_unpin(obj);
  3757. drm_gem_object_unreference(obj);
  3758. i915_gem_cleanup_hws(dev);
  3759. return -EINVAL;
  3760. }
  3761. ring->ring_obj = obj;
  3762. ring->virtual_start = ring->map.handle;
  3763. /* Stop the ring if it's running. */
  3764. I915_WRITE(PRB0_CTL, 0);
  3765. I915_WRITE(PRB0_TAIL, 0);
  3766. I915_WRITE(PRB0_HEAD, 0);
  3767. /* Initialize the ring. */
  3768. I915_WRITE(PRB0_START, obj_priv->gtt_offset);
  3769. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3770. /* G45 ring initialization fails to reset head to zero */
  3771. if (head != 0) {
  3772. DRM_ERROR("Ring head not reset to zero "
  3773. "ctl %08x head %08x tail %08x start %08x\n",
  3774. I915_READ(PRB0_CTL),
  3775. I915_READ(PRB0_HEAD),
  3776. I915_READ(PRB0_TAIL),
  3777. I915_READ(PRB0_START));
  3778. I915_WRITE(PRB0_HEAD, 0);
  3779. DRM_ERROR("Ring head forced to zero "
  3780. "ctl %08x head %08x tail %08x start %08x\n",
  3781. I915_READ(PRB0_CTL),
  3782. I915_READ(PRB0_HEAD),
  3783. I915_READ(PRB0_TAIL),
  3784. I915_READ(PRB0_START));
  3785. }
  3786. I915_WRITE(PRB0_CTL,
  3787. ((obj->size - 4096) & RING_NR_PAGES) |
  3788. RING_NO_REPORT |
  3789. RING_VALID);
  3790. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3791. /* If the head is still not zero, the ring is dead */
  3792. if (head != 0) {
  3793. DRM_ERROR("Ring initialization failed "
  3794. "ctl %08x head %08x tail %08x start %08x\n",
  3795. I915_READ(PRB0_CTL),
  3796. I915_READ(PRB0_HEAD),
  3797. I915_READ(PRB0_TAIL),
  3798. I915_READ(PRB0_START));
  3799. return -EIO;
  3800. }
  3801. /* Update our cache of the ring state */
  3802. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3803. i915_kernel_lost_context(dev);
  3804. else {
  3805. ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3806. ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
  3807. ring->space = ring->head - (ring->tail + 8);
  3808. if (ring->space < 0)
  3809. ring->space += ring->Size;
  3810. }
  3811. return 0;
  3812. }
  3813. void
  3814. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3815. {
  3816. drm_i915_private_t *dev_priv = dev->dev_private;
  3817. if (dev_priv->ring.ring_obj == NULL)
  3818. return;
  3819. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  3820. i915_gem_object_unpin(dev_priv->ring.ring_obj);
  3821. drm_gem_object_unreference(dev_priv->ring.ring_obj);
  3822. dev_priv->ring.ring_obj = NULL;
  3823. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3824. i915_gem_cleanup_hws(dev);
  3825. }
  3826. int
  3827. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3828. struct drm_file *file_priv)
  3829. {
  3830. drm_i915_private_t *dev_priv = dev->dev_private;
  3831. int ret;
  3832. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3833. return 0;
  3834. if (atomic_read(&dev_priv->mm.wedged)) {
  3835. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3836. atomic_set(&dev_priv->mm.wedged, 0);
  3837. }
  3838. mutex_lock(&dev->struct_mutex);
  3839. dev_priv->mm.suspended = 0;
  3840. ret = i915_gem_init_ringbuffer(dev);
  3841. if (ret != 0) {
  3842. mutex_unlock(&dev->struct_mutex);
  3843. return ret;
  3844. }
  3845. spin_lock(&dev_priv->mm.active_list_lock);
  3846. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  3847. spin_unlock(&dev_priv->mm.active_list_lock);
  3848. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  3849. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  3850. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  3851. mutex_unlock(&dev->struct_mutex);
  3852. drm_irq_install(dev);
  3853. return 0;
  3854. }
  3855. int
  3856. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3857. struct drm_file *file_priv)
  3858. {
  3859. int ret;
  3860. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3861. return 0;
  3862. ret = i915_gem_idle(dev);
  3863. drm_irq_uninstall(dev);
  3864. return ret;
  3865. }
  3866. void
  3867. i915_gem_lastclose(struct drm_device *dev)
  3868. {
  3869. int ret;
  3870. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3871. return;
  3872. ret = i915_gem_idle(dev);
  3873. if (ret)
  3874. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3875. }
  3876. void
  3877. i915_gem_load(struct drm_device *dev)
  3878. {
  3879. int i;
  3880. drm_i915_private_t *dev_priv = dev->dev_private;
  3881. spin_lock_init(&dev_priv->mm.active_list_lock);
  3882. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  3883. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  3884. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3885. INIT_LIST_HEAD(&dev_priv->mm.request_list);
  3886. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3887. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3888. i915_gem_retire_work_handler);
  3889. dev_priv->mm.next_gem_seqno = 1;
  3890. spin_lock(&shrink_list_lock);
  3891. list_add(&dev_priv->mm.shrink_list, &shrink_list);
  3892. spin_unlock(&shrink_list_lock);
  3893. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3894. dev_priv->fence_reg_start = 3;
  3895. if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3896. dev_priv->num_fence_regs = 16;
  3897. else
  3898. dev_priv->num_fence_regs = 8;
  3899. /* Initialize fence registers to zero */
  3900. if (IS_I965G(dev)) {
  3901. for (i = 0; i < 16; i++)
  3902. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  3903. } else {
  3904. for (i = 0; i < 8; i++)
  3905. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  3906. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3907. for (i = 0; i < 8; i++)
  3908. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  3909. }
  3910. i915_gem_detect_bit_6_swizzle(dev);
  3911. }
  3912. /*
  3913. * Create a physically contiguous memory object for this object
  3914. * e.g. for cursor + overlay regs
  3915. */
  3916. int i915_gem_init_phys_object(struct drm_device *dev,
  3917. int id, int size)
  3918. {
  3919. drm_i915_private_t *dev_priv = dev->dev_private;
  3920. struct drm_i915_gem_phys_object *phys_obj;
  3921. int ret;
  3922. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3923. return 0;
  3924. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3925. if (!phys_obj)
  3926. return -ENOMEM;
  3927. phys_obj->id = id;
  3928. phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
  3929. if (!phys_obj->handle) {
  3930. ret = -ENOMEM;
  3931. goto kfree_obj;
  3932. }
  3933. #ifdef CONFIG_X86
  3934. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3935. #endif
  3936. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3937. return 0;
  3938. kfree_obj:
  3939. kfree(phys_obj);
  3940. return ret;
  3941. }
  3942. void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3943. {
  3944. drm_i915_private_t *dev_priv = dev->dev_private;
  3945. struct drm_i915_gem_phys_object *phys_obj;
  3946. if (!dev_priv->mm.phys_objs[id - 1])
  3947. return;
  3948. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3949. if (phys_obj->cur_obj) {
  3950. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3951. }
  3952. #ifdef CONFIG_X86
  3953. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3954. #endif
  3955. drm_pci_free(dev, phys_obj->handle);
  3956. kfree(phys_obj);
  3957. dev_priv->mm.phys_objs[id - 1] = NULL;
  3958. }
  3959. void i915_gem_free_all_phys_object(struct drm_device *dev)
  3960. {
  3961. int i;
  3962. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  3963. i915_gem_free_phys_object(dev, i);
  3964. }
  3965. void i915_gem_detach_phys_object(struct drm_device *dev,
  3966. struct drm_gem_object *obj)
  3967. {
  3968. struct drm_i915_gem_object *obj_priv;
  3969. int i;
  3970. int ret;
  3971. int page_count;
  3972. obj_priv = obj->driver_private;
  3973. if (!obj_priv->phys_obj)
  3974. return;
  3975. ret = i915_gem_object_get_pages(obj);
  3976. if (ret)
  3977. goto out;
  3978. page_count = obj->size / PAGE_SIZE;
  3979. for (i = 0; i < page_count; i++) {
  3980. char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
  3981. char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  3982. memcpy(dst, src, PAGE_SIZE);
  3983. kunmap_atomic(dst, KM_USER0);
  3984. }
  3985. drm_clflush_pages(obj_priv->pages, page_count);
  3986. drm_agp_chipset_flush(dev);
  3987. i915_gem_object_put_pages(obj);
  3988. out:
  3989. obj_priv->phys_obj->cur_obj = NULL;
  3990. obj_priv->phys_obj = NULL;
  3991. }
  3992. int
  3993. i915_gem_attach_phys_object(struct drm_device *dev,
  3994. struct drm_gem_object *obj, int id)
  3995. {
  3996. drm_i915_private_t *dev_priv = dev->dev_private;
  3997. struct drm_i915_gem_object *obj_priv;
  3998. int ret = 0;
  3999. int page_count;
  4000. int i;
  4001. if (id > I915_MAX_PHYS_OBJECT)
  4002. return -EINVAL;
  4003. obj_priv = obj->driver_private;
  4004. if (obj_priv->phys_obj) {
  4005. if (obj_priv->phys_obj->id == id)
  4006. return 0;
  4007. i915_gem_detach_phys_object(dev, obj);
  4008. }
  4009. /* create a new object */
  4010. if (!dev_priv->mm.phys_objs[id - 1]) {
  4011. ret = i915_gem_init_phys_object(dev, id,
  4012. obj->size);
  4013. if (ret) {
  4014. DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
  4015. goto out;
  4016. }
  4017. }
  4018. /* bind to the object */
  4019. obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4020. obj_priv->phys_obj->cur_obj = obj;
  4021. ret = i915_gem_object_get_pages(obj);
  4022. if (ret) {
  4023. DRM_ERROR("failed to get page list\n");
  4024. goto out;
  4025. }
  4026. page_count = obj->size / PAGE_SIZE;
  4027. for (i = 0; i < page_count; i++) {
  4028. char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4029. char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4030. memcpy(dst, src, PAGE_SIZE);
  4031. kunmap_atomic(src, KM_USER0);
  4032. }
  4033. i915_gem_object_put_pages(obj);
  4034. return 0;
  4035. out:
  4036. return ret;
  4037. }
  4038. static int
  4039. i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  4040. struct drm_i915_gem_pwrite *args,
  4041. struct drm_file *file_priv)
  4042. {
  4043. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  4044. void *obj_addr;
  4045. int ret;
  4046. char __user *user_data;
  4047. user_data = (char __user *) (uintptr_t) args->data_ptr;
  4048. obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  4049. DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
  4050. ret = copy_from_user(obj_addr, user_data, args->size);
  4051. if (ret)
  4052. return -EFAULT;
  4053. drm_agp_chipset_flush(dev);
  4054. return 0;
  4055. }
  4056. void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
  4057. {
  4058. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  4059. /* Clean up our request list when the client is going away, so that
  4060. * later retire_requests won't dereference our soon-to-be-gone
  4061. * file_priv.
  4062. */
  4063. mutex_lock(&dev->struct_mutex);
  4064. while (!list_empty(&i915_file_priv->mm.request_list))
  4065. list_del_init(i915_file_priv->mm.request_list.next);
  4066. mutex_unlock(&dev->struct_mutex);
  4067. }
  4068. static int
  4069. i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
  4070. {
  4071. drm_i915_private_t *dev_priv, *next_dev;
  4072. struct drm_i915_gem_object *obj_priv, *next_obj;
  4073. int cnt = 0;
  4074. int would_deadlock = 1;
  4075. /* "fast-path" to count number of available objects */
  4076. if (nr_to_scan == 0) {
  4077. spin_lock(&shrink_list_lock);
  4078. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4079. struct drm_device *dev = dev_priv->dev;
  4080. if (mutex_trylock(&dev->struct_mutex)) {
  4081. list_for_each_entry(obj_priv,
  4082. &dev_priv->mm.inactive_list,
  4083. list)
  4084. cnt++;
  4085. mutex_unlock(&dev->struct_mutex);
  4086. }
  4087. }
  4088. spin_unlock(&shrink_list_lock);
  4089. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4090. }
  4091. spin_lock(&shrink_list_lock);
  4092. /* first scan for clean buffers */
  4093. list_for_each_entry_safe(dev_priv, next_dev,
  4094. &shrink_list, mm.shrink_list) {
  4095. struct drm_device *dev = dev_priv->dev;
  4096. if (! mutex_trylock(&dev->struct_mutex))
  4097. continue;
  4098. spin_unlock(&shrink_list_lock);
  4099. i915_gem_retire_requests(dev);
  4100. list_for_each_entry_safe(obj_priv, next_obj,
  4101. &dev_priv->mm.inactive_list,
  4102. list) {
  4103. if (i915_gem_object_is_purgeable(obj_priv)) {
  4104. i915_gem_object_unbind(obj_priv->obj);
  4105. if (--nr_to_scan <= 0)
  4106. break;
  4107. }
  4108. }
  4109. spin_lock(&shrink_list_lock);
  4110. mutex_unlock(&dev->struct_mutex);
  4111. would_deadlock = 0;
  4112. if (nr_to_scan <= 0)
  4113. break;
  4114. }
  4115. /* second pass, evict/count anything still on the inactive list */
  4116. list_for_each_entry_safe(dev_priv, next_dev,
  4117. &shrink_list, mm.shrink_list) {
  4118. struct drm_device *dev = dev_priv->dev;
  4119. if (! mutex_trylock(&dev->struct_mutex))
  4120. continue;
  4121. spin_unlock(&shrink_list_lock);
  4122. list_for_each_entry_safe(obj_priv, next_obj,
  4123. &dev_priv->mm.inactive_list,
  4124. list) {
  4125. if (nr_to_scan > 0) {
  4126. i915_gem_object_unbind(obj_priv->obj);
  4127. nr_to_scan--;
  4128. } else
  4129. cnt++;
  4130. }
  4131. spin_lock(&shrink_list_lock);
  4132. mutex_unlock(&dev->struct_mutex);
  4133. would_deadlock = 0;
  4134. }
  4135. spin_unlock(&shrink_list_lock);
  4136. if (would_deadlock)
  4137. return -1;
  4138. else if (cnt > 0)
  4139. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4140. else
  4141. return 0;
  4142. }
  4143. static struct shrinker shrinker = {
  4144. .shrink = i915_gem_shrink,
  4145. .seeks = DEFAULT_SEEKS,
  4146. };
  4147. __init void
  4148. i915_gem_shrinker_init(void)
  4149. {
  4150. register_shrinker(&shrinker);
  4151. }
  4152. __exit void
  4153. i915_gem_shrinker_exit(void)
  4154. {
  4155. unregister_shrinker(&shrinker);
  4156. }