i915_gem.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/swap.h>
  34. #include <linux/pci.h>
  35. #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
  36. static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
  37. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  38. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  39. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  40. int write);
  41. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  42. uint64_t offset,
  43. uint64_t size);
  44. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  45. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  46. static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
  47. unsigned alignment);
  48. static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
  49. static int i915_gem_evict_something(struct drm_device *dev, int min_size);
  50. static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
  51. static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  52. struct drm_i915_gem_pwrite *args,
  53. struct drm_file *file_priv);
  54. static LIST_HEAD(shrink_list);
  55. static DEFINE_SPINLOCK(shrink_list_lock);
  56. int i915_gem_do_init(struct drm_device *dev, unsigned long start,
  57. unsigned long end)
  58. {
  59. drm_i915_private_t *dev_priv = dev->dev_private;
  60. if (start >= end ||
  61. (start & (PAGE_SIZE - 1)) != 0 ||
  62. (end & (PAGE_SIZE - 1)) != 0) {
  63. return -EINVAL;
  64. }
  65. drm_mm_init(&dev_priv->mm.gtt_space, start,
  66. end - start);
  67. dev->gtt_total = (uint32_t) (end - start);
  68. return 0;
  69. }
  70. int
  71. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  72. struct drm_file *file_priv)
  73. {
  74. struct drm_i915_gem_init *args = data;
  75. int ret;
  76. mutex_lock(&dev->struct_mutex);
  77. ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
  78. mutex_unlock(&dev->struct_mutex);
  79. return ret;
  80. }
  81. int
  82. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  83. struct drm_file *file_priv)
  84. {
  85. struct drm_i915_gem_get_aperture *args = data;
  86. if (!(dev->driver->driver_features & DRIVER_GEM))
  87. return -ENODEV;
  88. args->aper_size = dev->gtt_total;
  89. args->aper_available_size = (args->aper_size -
  90. atomic_read(&dev->pin_memory));
  91. return 0;
  92. }
  93. /**
  94. * Creates a new mm object and returns a handle to it.
  95. */
  96. int
  97. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  98. struct drm_file *file_priv)
  99. {
  100. struct drm_i915_gem_create *args = data;
  101. struct drm_gem_object *obj;
  102. int ret;
  103. u32 handle;
  104. args->size = roundup(args->size, PAGE_SIZE);
  105. /* Allocate the new object */
  106. obj = drm_gem_object_alloc(dev, args->size);
  107. if (obj == NULL)
  108. return -ENOMEM;
  109. ret = drm_gem_handle_create(file_priv, obj, &handle);
  110. mutex_lock(&dev->struct_mutex);
  111. drm_gem_object_handle_unreference(obj);
  112. mutex_unlock(&dev->struct_mutex);
  113. if (ret)
  114. return ret;
  115. args->handle = handle;
  116. return 0;
  117. }
  118. static inline int
  119. fast_shmem_read(struct page **pages,
  120. loff_t page_base, int page_offset,
  121. char __user *data,
  122. int length)
  123. {
  124. char __iomem *vaddr;
  125. int unwritten;
  126. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  127. if (vaddr == NULL)
  128. return -ENOMEM;
  129. unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
  130. kunmap_atomic(vaddr, KM_USER0);
  131. if (unwritten)
  132. return -EFAULT;
  133. return 0;
  134. }
  135. static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
  136. {
  137. drm_i915_private_t *dev_priv = obj->dev->dev_private;
  138. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  139. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  140. obj_priv->tiling_mode != I915_TILING_NONE;
  141. }
  142. static inline int
  143. slow_shmem_copy(struct page *dst_page,
  144. int dst_offset,
  145. struct page *src_page,
  146. int src_offset,
  147. int length)
  148. {
  149. char *dst_vaddr, *src_vaddr;
  150. dst_vaddr = kmap_atomic(dst_page, KM_USER0);
  151. if (dst_vaddr == NULL)
  152. return -ENOMEM;
  153. src_vaddr = kmap_atomic(src_page, KM_USER1);
  154. if (src_vaddr == NULL) {
  155. kunmap_atomic(dst_vaddr, KM_USER0);
  156. return -ENOMEM;
  157. }
  158. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  159. kunmap_atomic(src_vaddr, KM_USER1);
  160. kunmap_atomic(dst_vaddr, KM_USER0);
  161. return 0;
  162. }
  163. static inline int
  164. slow_shmem_bit17_copy(struct page *gpu_page,
  165. int gpu_offset,
  166. struct page *cpu_page,
  167. int cpu_offset,
  168. int length,
  169. int is_read)
  170. {
  171. char *gpu_vaddr, *cpu_vaddr;
  172. /* Use the unswizzled path if this page isn't affected. */
  173. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  174. if (is_read)
  175. return slow_shmem_copy(cpu_page, cpu_offset,
  176. gpu_page, gpu_offset, length);
  177. else
  178. return slow_shmem_copy(gpu_page, gpu_offset,
  179. cpu_page, cpu_offset, length);
  180. }
  181. gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
  182. if (gpu_vaddr == NULL)
  183. return -ENOMEM;
  184. cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
  185. if (cpu_vaddr == NULL) {
  186. kunmap_atomic(gpu_vaddr, KM_USER0);
  187. return -ENOMEM;
  188. }
  189. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  190. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  191. */
  192. while (length > 0) {
  193. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  194. int this_length = min(cacheline_end - gpu_offset, length);
  195. int swizzled_gpu_offset = gpu_offset ^ 64;
  196. if (is_read) {
  197. memcpy(cpu_vaddr + cpu_offset,
  198. gpu_vaddr + swizzled_gpu_offset,
  199. this_length);
  200. } else {
  201. memcpy(gpu_vaddr + swizzled_gpu_offset,
  202. cpu_vaddr + cpu_offset,
  203. this_length);
  204. }
  205. cpu_offset += this_length;
  206. gpu_offset += this_length;
  207. length -= this_length;
  208. }
  209. kunmap_atomic(cpu_vaddr, KM_USER1);
  210. kunmap_atomic(gpu_vaddr, KM_USER0);
  211. return 0;
  212. }
  213. /**
  214. * This is the fast shmem pread path, which attempts to copy_from_user directly
  215. * from the backing pages of the object to the user's address space. On a
  216. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  217. */
  218. static int
  219. i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  220. struct drm_i915_gem_pread *args,
  221. struct drm_file *file_priv)
  222. {
  223. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  224. ssize_t remain;
  225. loff_t offset, page_base;
  226. char __user *user_data;
  227. int page_offset, page_length;
  228. int ret;
  229. user_data = (char __user *) (uintptr_t) args->data_ptr;
  230. remain = args->size;
  231. mutex_lock(&dev->struct_mutex);
  232. ret = i915_gem_object_get_pages(obj);
  233. if (ret != 0)
  234. goto fail_unlock;
  235. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  236. args->size);
  237. if (ret != 0)
  238. goto fail_put_pages;
  239. obj_priv = obj->driver_private;
  240. offset = args->offset;
  241. while (remain > 0) {
  242. /* Operation in this page
  243. *
  244. * page_base = page offset within aperture
  245. * page_offset = offset within page
  246. * page_length = bytes to copy for this page
  247. */
  248. page_base = (offset & ~(PAGE_SIZE-1));
  249. page_offset = offset & (PAGE_SIZE-1);
  250. page_length = remain;
  251. if ((page_offset + remain) > PAGE_SIZE)
  252. page_length = PAGE_SIZE - page_offset;
  253. ret = fast_shmem_read(obj_priv->pages,
  254. page_base, page_offset,
  255. user_data, page_length);
  256. if (ret)
  257. goto fail_put_pages;
  258. remain -= page_length;
  259. user_data += page_length;
  260. offset += page_length;
  261. }
  262. fail_put_pages:
  263. i915_gem_object_put_pages(obj);
  264. fail_unlock:
  265. mutex_unlock(&dev->struct_mutex);
  266. return ret;
  267. }
  268. static inline gfp_t
  269. i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
  270. {
  271. return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
  272. }
  273. static inline void
  274. i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
  275. {
  276. mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
  277. }
  278. static int
  279. i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  280. {
  281. int ret;
  282. ret = i915_gem_object_get_pages(obj);
  283. /* If we've insufficient memory to map in the pages, attempt
  284. * to make some space by throwing out some old buffers.
  285. */
  286. if (ret == -ENOMEM) {
  287. struct drm_device *dev = obj->dev;
  288. gfp_t gfp;
  289. ret = i915_gem_evict_something(dev, obj->size);
  290. if (ret)
  291. return ret;
  292. gfp = i915_gem_object_get_page_gfp_mask(obj);
  293. i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
  294. ret = i915_gem_object_get_pages(obj);
  295. i915_gem_object_set_page_gfp_mask (obj, gfp);
  296. }
  297. return ret;
  298. }
  299. /**
  300. * This is the fallback shmem pread path, which allocates temporary storage
  301. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  302. * can copy out of the object's backing pages while holding the struct mutex
  303. * and not take page faults.
  304. */
  305. static int
  306. i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
  307. struct drm_i915_gem_pread *args,
  308. struct drm_file *file_priv)
  309. {
  310. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  311. struct mm_struct *mm = current->mm;
  312. struct page **user_pages;
  313. ssize_t remain;
  314. loff_t offset, pinned_pages, i;
  315. loff_t first_data_page, last_data_page, num_pages;
  316. int shmem_page_index, shmem_page_offset;
  317. int data_page_index, data_page_offset;
  318. int page_length;
  319. int ret;
  320. uint64_t data_ptr = args->data_ptr;
  321. int do_bit17_swizzling;
  322. remain = args->size;
  323. /* Pin the user pages containing the data. We can't fault while
  324. * holding the struct mutex, yet we want to hold it while
  325. * dereferencing the user data.
  326. */
  327. first_data_page = data_ptr / PAGE_SIZE;
  328. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  329. num_pages = last_data_page - first_data_page + 1;
  330. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  331. if (user_pages == NULL)
  332. return -ENOMEM;
  333. down_read(&mm->mmap_sem);
  334. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  335. num_pages, 1, 0, user_pages, NULL);
  336. up_read(&mm->mmap_sem);
  337. if (pinned_pages < num_pages) {
  338. ret = -EFAULT;
  339. goto fail_put_user_pages;
  340. }
  341. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  342. mutex_lock(&dev->struct_mutex);
  343. ret = i915_gem_object_get_pages_or_evict(obj);
  344. if (ret)
  345. goto fail_unlock;
  346. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  347. args->size);
  348. if (ret != 0)
  349. goto fail_put_pages;
  350. obj_priv = obj->driver_private;
  351. offset = args->offset;
  352. while (remain > 0) {
  353. /* Operation in this page
  354. *
  355. * shmem_page_index = page number within shmem file
  356. * shmem_page_offset = offset within page in shmem file
  357. * data_page_index = page number in get_user_pages return
  358. * data_page_offset = offset with data_page_index page.
  359. * page_length = bytes to copy for this page
  360. */
  361. shmem_page_index = offset / PAGE_SIZE;
  362. shmem_page_offset = offset & ~PAGE_MASK;
  363. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  364. data_page_offset = data_ptr & ~PAGE_MASK;
  365. page_length = remain;
  366. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  367. page_length = PAGE_SIZE - shmem_page_offset;
  368. if ((data_page_offset + page_length) > PAGE_SIZE)
  369. page_length = PAGE_SIZE - data_page_offset;
  370. if (do_bit17_swizzling) {
  371. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  372. shmem_page_offset,
  373. user_pages[data_page_index],
  374. data_page_offset,
  375. page_length,
  376. 1);
  377. } else {
  378. ret = slow_shmem_copy(user_pages[data_page_index],
  379. data_page_offset,
  380. obj_priv->pages[shmem_page_index],
  381. shmem_page_offset,
  382. page_length);
  383. }
  384. if (ret)
  385. goto fail_put_pages;
  386. remain -= page_length;
  387. data_ptr += page_length;
  388. offset += page_length;
  389. }
  390. fail_put_pages:
  391. i915_gem_object_put_pages(obj);
  392. fail_unlock:
  393. mutex_unlock(&dev->struct_mutex);
  394. fail_put_user_pages:
  395. for (i = 0; i < pinned_pages; i++) {
  396. SetPageDirty(user_pages[i]);
  397. page_cache_release(user_pages[i]);
  398. }
  399. drm_free_large(user_pages);
  400. return ret;
  401. }
  402. /**
  403. * Reads data from the object referenced by handle.
  404. *
  405. * On error, the contents of *data are undefined.
  406. */
  407. int
  408. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  409. struct drm_file *file_priv)
  410. {
  411. struct drm_i915_gem_pread *args = data;
  412. struct drm_gem_object *obj;
  413. struct drm_i915_gem_object *obj_priv;
  414. int ret;
  415. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  416. if (obj == NULL)
  417. return -EBADF;
  418. obj_priv = obj->driver_private;
  419. /* Bounds check source.
  420. *
  421. * XXX: This could use review for overflow issues...
  422. */
  423. if (args->offset > obj->size || args->size > obj->size ||
  424. args->offset + args->size > obj->size) {
  425. drm_gem_object_unreference(obj);
  426. return -EINVAL;
  427. }
  428. if (i915_gem_object_needs_bit17_swizzle(obj)) {
  429. ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  430. } else {
  431. ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
  432. if (ret != 0)
  433. ret = i915_gem_shmem_pread_slow(dev, obj, args,
  434. file_priv);
  435. }
  436. drm_gem_object_unreference(obj);
  437. return ret;
  438. }
  439. /* This is the fast write path which cannot handle
  440. * page faults in the source data
  441. */
  442. static inline int
  443. fast_user_write(struct io_mapping *mapping,
  444. loff_t page_base, int page_offset,
  445. char __user *user_data,
  446. int length)
  447. {
  448. char *vaddr_atomic;
  449. unsigned long unwritten;
  450. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  451. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  452. user_data, length);
  453. io_mapping_unmap_atomic(vaddr_atomic);
  454. if (unwritten)
  455. return -EFAULT;
  456. return 0;
  457. }
  458. /* Here's the write path which can sleep for
  459. * page faults
  460. */
  461. static inline int
  462. slow_kernel_write(struct io_mapping *mapping,
  463. loff_t gtt_base, int gtt_offset,
  464. struct page *user_page, int user_offset,
  465. int length)
  466. {
  467. char *src_vaddr, *dst_vaddr;
  468. unsigned long unwritten;
  469. dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
  470. src_vaddr = kmap_atomic(user_page, KM_USER1);
  471. unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
  472. src_vaddr + user_offset,
  473. length);
  474. kunmap_atomic(src_vaddr, KM_USER1);
  475. io_mapping_unmap_atomic(dst_vaddr);
  476. if (unwritten)
  477. return -EFAULT;
  478. return 0;
  479. }
  480. static inline int
  481. fast_shmem_write(struct page **pages,
  482. loff_t page_base, int page_offset,
  483. char __user *data,
  484. int length)
  485. {
  486. char __iomem *vaddr;
  487. unsigned long unwritten;
  488. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  489. if (vaddr == NULL)
  490. return -ENOMEM;
  491. unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
  492. kunmap_atomic(vaddr, KM_USER0);
  493. if (unwritten)
  494. return -EFAULT;
  495. return 0;
  496. }
  497. /**
  498. * This is the fast pwrite path, where we copy the data directly from the
  499. * user into the GTT, uncached.
  500. */
  501. static int
  502. i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  503. struct drm_i915_gem_pwrite *args,
  504. struct drm_file *file_priv)
  505. {
  506. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  507. drm_i915_private_t *dev_priv = dev->dev_private;
  508. ssize_t remain;
  509. loff_t offset, page_base;
  510. char __user *user_data;
  511. int page_offset, page_length;
  512. int ret;
  513. user_data = (char __user *) (uintptr_t) args->data_ptr;
  514. remain = args->size;
  515. if (!access_ok(VERIFY_READ, user_data, remain))
  516. return -EFAULT;
  517. mutex_lock(&dev->struct_mutex);
  518. ret = i915_gem_object_pin(obj, 0);
  519. if (ret) {
  520. mutex_unlock(&dev->struct_mutex);
  521. return ret;
  522. }
  523. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  524. if (ret)
  525. goto fail;
  526. obj_priv = obj->driver_private;
  527. offset = obj_priv->gtt_offset + args->offset;
  528. while (remain > 0) {
  529. /* Operation in this page
  530. *
  531. * page_base = page offset within aperture
  532. * page_offset = offset within page
  533. * page_length = bytes to copy for this page
  534. */
  535. page_base = (offset & ~(PAGE_SIZE-1));
  536. page_offset = offset & (PAGE_SIZE-1);
  537. page_length = remain;
  538. if ((page_offset + remain) > PAGE_SIZE)
  539. page_length = PAGE_SIZE - page_offset;
  540. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  541. page_offset, user_data, page_length);
  542. /* If we get a fault while copying data, then (presumably) our
  543. * source page isn't available. Return the error and we'll
  544. * retry in the slow path.
  545. */
  546. if (ret)
  547. goto fail;
  548. remain -= page_length;
  549. user_data += page_length;
  550. offset += page_length;
  551. }
  552. fail:
  553. i915_gem_object_unpin(obj);
  554. mutex_unlock(&dev->struct_mutex);
  555. return ret;
  556. }
  557. /**
  558. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  559. * the memory and maps it using kmap_atomic for copying.
  560. *
  561. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  562. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  563. */
  564. static int
  565. i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  566. struct drm_i915_gem_pwrite *args,
  567. struct drm_file *file_priv)
  568. {
  569. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  570. drm_i915_private_t *dev_priv = dev->dev_private;
  571. ssize_t remain;
  572. loff_t gtt_page_base, offset;
  573. loff_t first_data_page, last_data_page, num_pages;
  574. loff_t pinned_pages, i;
  575. struct page **user_pages;
  576. struct mm_struct *mm = current->mm;
  577. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  578. int ret;
  579. uint64_t data_ptr = args->data_ptr;
  580. remain = args->size;
  581. /* Pin the user pages containing the data. We can't fault while
  582. * holding the struct mutex, and all of the pwrite implementations
  583. * want to hold it while dereferencing the user data.
  584. */
  585. first_data_page = data_ptr / PAGE_SIZE;
  586. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  587. num_pages = last_data_page - first_data_page + 1;
  588. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  589. if (user_pages == NULL)
  590. return -ENOMEM;
  591. down_read(&mm->mmap_sem);
  592. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  593. num_pages, 0, 0, user_pages, NULL);
  594. up_read(&mm->mmap_sem);
  595. if (pinned_pages < num_pages) {
  596. ret = -EFAULT;
  597. goto out_unpin_pages;
  598. }
  599. mutex_lock(&dev->struct_mutex);
  600. ret = i915_gem_object_pin(obj, 0);
  601. if (ret)
  602. goto out_unlock;
  603. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  604. if (ret)
  605. goto out_unpin_object;
  606. obj_priv = obj->driver_private;
  607. offset = obj_priv->gtt_offset + args->offset;
  608. while (remain > 0) {
  609. /* Operation in this page
  610. *
  611. * gtt_page_base = page offset within aperture
  612. * gtt_page_offset = offset within page in aperture
  613. * data_page_index = page number in get_user_pages return
  614. * data_page_offset = offset with data_page_index page.
  615. * page_length = bytes to copy for this page
  616. */
  617. gtt_page_base = offset & PAGE_MASK;
  618. gtt_page_offset = offset & ~PAGE_MASK;
  619. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  620. data_page_offset = data_ptr & ~PAGE_MASK;
  621. page_length = remain;
  622. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  623. page_length = PAGE_SIZE - gtt_page_offset;
  624. if ((data_page_offset + page_length) > PAGE_SIZE)
  625. page_length = PAGE_SIZE - data_page_offset;
  626. ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
  627. gtt_page_base, gtt_page_offset,
  628. user_pages[data_page_index],
  629. data_page_offset,
  630. page_length);
  631. /* If we get a fault while copying data, then (presumably) our
  632. * source page isn't available. Return the error and we'll
  633. * retry in the slow path.
  634. */
  635. if (ret)
  636. goto out_unpin_object;
  637. remain -= page_length;
  638. offset += page_length;
  639. data_ptr += page_length;
  640. }
  641. out_unpin_object:
  642. i915_gem_object_unpin(obj);
  643. out_unlock:
  644. mutex_unlock(&dev->struct_mutex);
  645. out_unpin_pages:
  646. for (i = 0; i < pinned_pages; i++)
  647. page_cache_release(user_pages[i]);
  648. drm_free_large(user_pages);
  649. return ret;
  650. }
  651. /**
  652. * This is the fast shmem pwrite path, which attempts to directly
  653. * copy_from_user into the kmapped pages backing the object.
  654. */
  655. static int
  656. i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  657. struct drm_i915_gem_pwrite *args,
  658. struct drm_file *file_priv)
  659. {
  660. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  661. ssize_t remain;
  662. loff_t offset, page_base;
  663. char __user *user_data;
  664. int page_offset, page_length;
  665. int ret;
  666. user_data = (char __user *) (uintptr_t) args->data_ptr;
  667. remain = args->size;
  668. mutex_lock(&dev->struct_mutex);
  669. ret = i915_gem_object_get_pages(obj);
  670. if (ret != 0)
  671. goto fail_unlock;
  672. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  673. if (ret != 0)
  674. goto fail_put_pages;
  675. obj_priv = obj->driver_private;
  676. offset = args->offset;
  677. obj_priv->dirty = 1;
  678. while (remain > 0) {
  679. /* Operation in this page
  680. *
  681. * page_base = page offset within aperture
  682. * page_offset = offset within page
  683. * page_length = bytes to copy for this page
  684. */
  685. page_base = (offset & ~(PAGE_SIZE-1));
  686. page_offset = offset & (PAGE_SIZE-1);
  687. page_length = remain;
  688. if ((page_offset + remain) > PAGE_SIZE)
  689. page_length = PAGE_SIZE - page_offset;
  690. ret = fast_shmem_write(obj_priv->pages,
  691. page_base, page_offset,
  692. user_data, page_length);
  693. if (ret)
  694. goto fail_put_pages;
  695. remain -= page_length;
  696. user_data += page_length;
  697. offset += page_length;
  698. }
  699. fail_put_pages:
  700. i915_gem_object_put_pages(obj);
  701. fail_unlock:
  702. mutex_unlock(&dev->struct_mutex);
  703. return ret;
  704. }
  705. /**
  706. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  707. * the memory and maps it using kmap_atomic for copying.
  708. *
  709. * This avoids taking mmap_sem for faulting on the user's address while the
  710. * struct_mutex is held.
  711. */
  712. static int
  713. i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  714. struct drm_i915_gem_pwrite *args,
  715. struct drm_file *file_priv)
  716. {
  717. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  718. struct mm_struct *mm = current->mm;
  719. struct page **user_pages;
  720. ssize_t remain;
  721. loff_t offset, pinned_pages, i;
  722. loff_t first_data_page, last_data_page, num_pages;
  723. int shmem_page_index, shmem_page_offset;
  724. int data_page_index, data_page_offset;
  725. int page_length;
  726. int ret;
  727. uint64_t data_ptr = args->data_ptr;
  728. int do_bit17_swizzling;
  729. remain = args->size;
  730. /* Pin the user pages containing the data. We can't fault while
  731. * holding the struct mutex, and all of the pwrite implementations
  732. * want to hold it while dereferencing the user data.
  733. */
  734. first_data_page = data_ptr / PAGE_SIZE;
  735. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  736. num_pages = last_data_page - first_data_page + 1;
  737. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  738. if (user_pages == NULL)
  739. return -ENOMEM;
  740. down_read(&mm->mmap_sem);
  741. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  742. num_pages, 0, 0, user_pages, NULL);
  743. up_read(&mm->mmap_sem);
  744. if (pinned_pages < num_pages) {
  745. ret = -EFAULT;
  746. goto fail_put_user_pages;
  747. }
  748. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  749. mutex_lock(&dev->struct_mutex);
  750. ret = i915_gem_object_get_pages_or_evict(obj);
  751. if (ret)
  752. goto fail_unlock;
  753. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  754. if (ret != 0)
  755. goto fail_put_pages;
  756. obj_priv = obj->driver_private;
  757. offset = args->offset;
  758. obj_priv->dirty = 1;
  759. while (remain > 0) {
  760. /* Operation in this page
  761. *
  762. * shmem_page_index = page number within shmem file
  763. * shmem_page_offset = offset within page in shmem file
  764. * data_page_index = page number in get_user_pages return
  765. * data_page_offset = offset with data_page_index page.
  766. * page_length = bytes to copy for this page
  767. */
  768. shmem_page_index = offset / PAGE_SIZE;
  769. shmem_page_offset = offset & ~PAGE_MASK;
  770. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  771. data_page_offset = data_ptr & ~PAGE_MASK;
  772. page_length = remain;
  773. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  774. page_length = PAGE_SIZE - shmem_page_offset;
  775. if ((data_page_offset + page_length) > PAGE_SIZE)
  776. page_length = PAGE_SIZE - data_page_offset;
  777. if (do_bit17_swizzling) {
  778. ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  779. shmem_page_offset,
  780. user_pages[data_page_index],
  781. data_page_offset,
  782. page_length,
  783. 0);
  784. } else {
  785. ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
  786. shmem_page_offset,
  787. user_pages[data_page_index],
  788. data_page_offset,
  789. page_length);
  790. }
  791. if (ret)
  792. goto fail_put_pages;
  793. remain -= page_length;
  794. data_ptr += page_length;
  795. offset += page_length;
  796. }
  797. fail_put_pages:
  798. i915_gem_object_put_pages(obj);
  799. fail_unlock:
  800. mutex_unlock(&dev->struct_mutex);
  801. fail_put_user_pages:
  802. for (i = 0; i < pinned_pages; i++)
  803. page_cache_release(user_pages[i]);
  804. drm_free_large(user_pages);
  805. return ret;
  806. }
  807. /**
  808. * Writes data to the object referenced by handle.
  809. *
  810. * On error, the contents of the buffer that were to be modified are undefined.
  811. */
  812. int
  813. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  814. struct drm_file *file_priv)
  815. {
  816. struct drm_i915_gem_pwrite *args = data;
  817. struct drm_gem_object *obj;
  818. struct drm_i915_gem_object *obj_priv;
  819. int ret = 0;
  820. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  821. if (obj == NULL)
  822. return -EBADF;
  823. obj_priv = obj->driver_private;
  824. /* Bounds check destination.
  825. *
  826. * XXX: This could use review for overflow issues...
  827. */
  828. if (args->offset > obj->size || args->size > obj->size ||
  829. args->offset + args->size > obj->size) {
  830. drm_gem_object_unreference(obj);
  831. return -EINVAL;
  832. }
  833. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  834. * it would end up going through the fenced access, and we'll get
  835. * different detiling behavior between reading and writing.
  836. * pread/pwrite currently are reading and writing from the CPU
  837. * perspective, requiring manual detiling by the client.
  838. */
  839. if (obj_priv->phys_obj)
  840. ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
  841. else if (obj_priv->tiling_mode == I915_TILING_NONE &&
  842. dev->gtt_total != 0) {
  843. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
  844. if (ret == -EFAULT) {
  845. ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
  846. file_priv);
  847. }
  848. } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
  849. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
  850. } else {
  851. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
  852. if (ret == -EFAULT) {
  853. ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
  854. file_priv);
  855. }
  856. }
  857. #if WATCH_PWRITE
  858. if (ret)
  859. DRM_INFO("pwrite failed %d\n", ret);
  860. #endif
  861. drm_gem_object_unreference(obj);
  862. return ret;
  863. }
  864. /**
  865. * Called when user space prepares to use an object with the CPU, either
  866. * through the mmap ioctl's mapping or a GTT mapping.
  867. */
  868. int
  869. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  870. struct drm_file *file_priv)
  871. {
  872. struct drm_i915_private *dev_priv = dev->dev_private;
  873. struct drm_i915_gem_set_domain *args = data;
  874. struct drm_gem_object *obj;
  875. struct drm_i915_gem_object *obj_priv;
  876. uint32_t read_domains = args->read_domains;
  877. uint32_t write_domain = args->write_domain;
  878. int ret;
  879. if (!(dev->driver->driver_features & DRIVER_GEM))
  880. return -ENODEV;
  881. /* Only handle setting domains to types used by the CPU. */
  882. if (write_domain & I915_GEM_GPU_DOMAINS)
  883. return -EINVAL;
  884. if (read_domains & I915_GEM_GPU_DOMAINS)
  885. return -EINVAL;
  886. /* Having something in the write domain implies it's in the read
  887. * domain, and only that read domain. Enforce that in the request.
  888. */
  889. if (write_domain != 0 && read_domains != write_domain)
  890. return -EINVAL;
  891. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  892. if (obj == NULL)
  893. return -EBADF;
  894. obj_priv = obj->driver_private;
  895. mutex_lock(&dev->struct_mutex);
  896. intel_mark_busy(dev, obj);
  897. #if WATCH_BUF
  898. DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
  899. obj, obj->size, read_domains, write_domain);
  900. #endif
  901. if (read_domains & I915_GEM_DOMAIN_GTT) {
  902. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  903. /* Update the LRU on the fence for the CPU access that's
  904. * about to occur.
  905. */
  906. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  907. list_move_tail(&obj_priv->fence_list,
  908. &dev_priv->mm.fence_list);
  909. }
  910. /* Silently promote "you're not bound, there was nothing to do"
  911. * to success, since the client was just asking us to
  912. * make sure everything was done.
  913. */
  914. if (ret == -EINVAL)
  915. ret = 0;
  916. } else {
  917. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  918. }
  919. drm_gem_object_unreference(obj);
  920. mutex_unlock(&dev->struct_mutex);
  921. return ret;
  922. }
  923. /**
  924. * Called when user space has done writes to this buffer
  925. */
  926. int
  927. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  928. struct drm_file *file_priv)
  929. {
  930. struct drm_i915_gem_sw_finish *args = data;
  931. struct drm_gem_object *obj;
  932. struct drm_i915_gem_object *obj_priv;
  933. int ret = 0;
  934. if (!(dev->driver->driver_features & DRIVER_GEM))
  935. return -ENODEV;
  936. mutex_lock(&dev->struct_mutex);
  937. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  938. if (obj == NULL) {
  939. mutex_unlock(&dev->struct_mutex);
  940. return -EBADF;
  941. }
  942. #if WATCH_BUF
  943. DRM_INFO("%s: sw_finish %d (%p %zd)\n",
  944. __func__, args->handle, obj, obj->size);
  945. #endif
  946. obj_priv = obj->driver_private;
  947. /* Pinned buffers may be scanout, so flush the cache */
  948. if (obj_priv->pin_count)
  949. i915_gem_object_flush_cpu_write_domain(obj);
  950. drm_gem_object_unreference(obj);
  951. mutex_unlock(&dev->struct_mutex);
  952. return ret;
  953. }
  954. /**
  955. * Maps the contents of an object, returning the address it is mapped
  956. * into.
  957. *
  958. * While the mapping holds a reference on the contents of the object, it doesn't
  959. * imply a ref on the object itself.
  960. */
  961. int
  962. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  963. struct drm_file *file_priv)
  964. {
  965. struct drm_i915_gem_mmap *args = data;
  966. struct drm_gem_object *obj;
  967. loff_t offset;
  968. unsigned long addr;
  969. if (!(dev->driver->driver_features & DRIVER_GEM))
  970. return -ENODEV;
  971. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  972. if (obj == NULL)
  973. return -EBADF;
  974. offset = args->offset;
  975. down_write(&current->mm->mmap_sem);
  976. addr = do_mmap(obj->filp, 0, args->size,
  977. PROT_READ | PROT_WRITE, MAP_SHARED,
  978. args->offset);
  979. up_write(&current->mm->mmap_sem);
  980. mutex_lock(&dev->struct_mutex);
  981. drm_gem_object_unreference(obj);
  982. mutex_unlock(&dev->struct_mutex);
  983. if (IS_ERR((void *)addr))
  984. return addr;
  985. args->addr_ptr = (uint64_t) addr;
  986. return 0;
  987. }
  988. /**
  989. * i915_gem_fault - fault a page into the GTT
  990. * vma: VMA in question
  991. * vmf: fault info
  992. *
  993. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  994. * from userspace. The fault handler takes care of binding the object to
  995. * the GTT (if needed), allocating and programming a fence register (again,
  996. * only if needed based on whether the old reg is still valid or the object
  997. * is tiled) and inserting a new PTE into the faulting process.
  998. *
  999. * Note that the faulting process may involve evicting existing objects
  1000. * from the GTT and/or fence registers to make room. So performance may
  1001. * suffer if the GTT working set is large or there are few fence registers
  1002. * left.
  1003. */
  1004. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1005. {
  1006. struct drm_gem_object *obj = vma->vm_private_data;
  1007. struct drm_device *dev = obj->dev;
  1008. struct drm_i915_private *dev_priv = dev->dev_private;
  1009. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1010. pgoff_t page_offset;
  1011. unsigned long pfn;
  1012. int ret = 0;
  1013. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1014. /* We don't use vmf->pgoff since that has the fake offset */
  1015. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1016. PAGE_SHIFT;
  1017. /* Now bind it into the GTT if needed */
  1018. mutex_lock(&dev->struct_mutex);
  1019. if (!obj_priv->gtt_space) {
  1020. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1021. if (ret) {
  1022. mutex_unlock(&dev->struct_mutex);
  1023. return VM_FAULT_SIGBUS;
  1024. }
  1025. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1026. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1027. if (ret) {
  1028. mutex_unlock(&dev->struct_mutex);
  1029. return VM_FAULT_SIGBUS;
  1030. }
  1031. }
  1032. /* Need a new fence register? */
  1033. if (obj_priv->tiling_mode != I915_TILING_NONE) {
  1034. ret = i915_gem_object_get_fence_reg(obj);
  1035. if (ret) {
  1036. mutex_unlock(&dev->struct_mutex);
  1037. return VM_FAULT_SIGBUS;
  1038. }
  1039. }
  1040. pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
  1041. page_offset;
  1042. /* Finally, remap it using the new GTT offset */
  1043. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1044. mutex_unlock(&dev->struct_mutex);
  1045. switch (ret) {
  1046. case -ENOMEM:
  1047. case -EAGAIN:
  1048. return VM_FAULT_OOM;
  1049. case -EFAULT:
  1050. case -EINVAL:
  1051. return VM_FAULT_SIGBUS;
  1052. default:
  1053. return VM_FAULT_NOPAGE;
  1054. }
  1055. }
  1056. /**
  1057. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1058. * @obj: obj in question
  1059. *
  1060. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1061. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1062. * up the object based on the offset and sets up the various memory mapping
  1063. * structures.
  1064. *
  1065. * This routine allocates and attaches a fake offset for @obj.
  1066. */
  1067. static int
  1068. i915_gem_create_mmap_offset(struct drm_gem_object *obj)
  1069. {
  1070. struct drm_device *dev = obj->dev;
  1071. struct drm_gem_mm *mm = dev->mm_private;
  1072. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1073. struct drm_map_list *list;
  1074. struct drm_local_map *map;
  1075. int ret = 0;
  1076. /* Set the object up for mmap'ing */
  1077. list = &obj->map_list;
  1078. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1079. if (!list->map)
  1080. return -ENOMEM;
  1081. map = list->map;
  1082. map->type = _DRM_GEM;
  1083. map->size = obj->size;
  1084. map->handle = obj;
  1085. /* Get a DRM GEM mmap offset allocated... */
  1086. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1087. obj->size / PAGE_SIZE, 0, 0);
  1088. if (!list->file_offset_node) {
  1089. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  1090. ret = -ENOMEM;
  1091. goto out_free_list;
  1092. }
  1093. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1094. obj->size / PAGE_SIZE, 0);
  1095. if (!list->file_offset_node) {
  1096. ret = -ENOMEM;
  1097. goto out_free_list;
  1098. }
  1099. list->hash.key = list->file_offset_node->start;
  1100. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  1101. DRM_ERROR("failed to add to map hash\n");
  1102. goto out_free_mm;
  1103. }
  1104. /* By now we should be all set, any drm_mmap request on the offset
  1105. * below will get to our mmap & fault handler */
  1106. obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  1107. return 0;
  1108. out_free_mm:
  1109. drm_mm_put_block(list->file_offset_node);
  1110. out_free_list:
  1111. kfree(list->map);
  1112. return ret;
  1113. }
  1114. /**
  1115. * i915_gem_release_mmap - remove physical page mappings
  1116. * @obj: obj in question
  1117. *
  1118. * Preserve the reservation of the mmaping with the DRM core code, but
  1119. * relinquish ownership of the pages back to the system.
  1120. *
  1121. * It is vital that we remove the page mapping if we have mapped a tiled
  1122. * object through the GTT and then lose the fence register due to
  1123. * resource pressure. Similarly if the object has been moved out of the
  1124. * aperture, than pages mapped into userspace must be revoked. Removing the
  1125. * mapping will then trigger a page fault on the next user access, allowing
  1126. * fixup by i915_gem_fault().
  1127. */
  1128. void
  1129. i915_gem_release_mmap(struct drm_gem_object *obj)
  1130. {
  1131. struct drm_device *dev = obj->dev;
  1132. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1133. if (dev->dev_mapping)
  1134. unmap_mapping_range(dev->dev_mapping,
  1135. obj_priv->mmap_offset, obj->size, 1);
  1136. }
  1137. static void
  1138. i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  1139. {
  1140. struct drm_device *dev = obj->dev;
  1141. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1142. struct drm_gem_mm *mm = dev->mm_private;
  1143. struct drm_map_list *list;
  1144. list = &obj->map_list;
  1145. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1146. if (list->file_offset_node) {
  1147. drm_mm_put_block(list->file_offset_node);
  1148. list->file_offset_node = NULL;
  1149. }
  1150. if (list->map) {
  1151. kfree(list->map);
  1152. list->map = NULL;
  1153. }
  1154. obj_priv->mmap_offset = 0;
  1155. }
  1156. /**
  1157. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1158. * @obj: object to check
  1159. *
  1160. * Return the required GTT alignment for an object, taking into account
  1161. * potential fence register mapping if needed.
  1162. */
  1163. static uint32_t
  1164. i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
  1165. {
  1166. struct drm_device *dev = obj->dev;
  1167. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1168. int start, i;
  1169. /*
  1170. * Minimum alignment is 4k (GTT page size), but might be greater
  1171. * if a fence register is needed for the object.
  1172. */
  1173. if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
  1174. return 4096;
  1175. /*
  1176. * Previous chips need to be aligned to the size of the smallest
  1177. * fence register that can contain the object.
  1178. */
  1179. if (IS_I9XX(dev))
  1180. start = 1024*1024;
  1181. else
  1182. start = 512*1024;
  1183. for (i = start; i < obj->size; i <<= 1)
  1184. ;
  1185. return i;
  1186. }
  1187. /**
  1188. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1189. * @dev: DRM device
  1190. * @data: GTT mapping ioctl data
  1191. * @file_priv: GEM object info
  1192. *
  1193. * Simply returns the fake offset to userspace so it can mmap it.
  1194. * The mmap call will end up in drm_gem_mmap(), which will set things
  1195. * up so we can get faults in the handler above.
  1196. *
  1197. * The fault handler will take care of binding the object into the GTT
  1198. * (since it may have been evicted to make room for something), allocating
  1199. * a fence register, and mapping the appropriate aperture address into
  1200. * userspace.
  1201. */
  1202. int
  1203. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1204. struct drm_file *file_priv)
  1205. {
  1206. struct drm_i915_gem_mmap_gtt *args = data;
  1207. struct drm_i915_private *dev_priv = dev->dev_private;
  1208. struct drm_gem_object *obj;
  1209. struct drm_i915_gem_object *obj_priv;
  1210. int ret;
  1211. if (!(dev->driver->driver_features & DRIVER_GEM))
  1212. return -ENODEV;
  1213. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1214. if (obj == NULL)
  1215. return -EBADF;
  1216. mutex_lock(&dev->struct_mutex);
  1217. obj_priv = obj->driver_private;
  1218. if (!obj_priv->mmap_offset) {
  1219. ret = i915_gem_create_mmap_offset(obj);
  1220. if (ret) {
  1221. drm_gem_object_unreference(obj);
  1222. mutex_unlock(&dev->struct_mutex);
  1223. return ret;
  1224. }
  1225. }
  1226. args->offset = obj_priv->mmap_offset;
  1227. /*
  1228. * Pull it into the GTT so that we have a page list (makes the
  1229. * initial fault faster and any subsequent flushing possible).
  1230. */
  1231. if (!obj_priv->agp_mem) {
  1232. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1233. if (ret) {
  1234. drm_gem_object_unreference(obj);
  1235. mutex_unlock(&dev->struct_mutex);
  1236. return ret;
  1237. }
  1238. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1239. }
  1240. drm_gem_object_unreference(obj);
  1241. mutex_unlock(&dev->struct_mutex);
  1242. return 0;
  1243. }
  1244. void
  1245. i915_gem_object_put_pages(struct drm_gem_object *obj)
  1246. {
  1247. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1248. int page_count = obj->size / PAGE_SIZE;
  1249. int i;
  1250. BUG_ON(obj_priv->pages_refcount == 0);
  1251. if (--obj_priv->pages_refcount != 0)
  1252. return;
  1253. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1254. i915_gem_object_save_bit_17_swizzle(obj);
  1255. if (obj_priv->madv == I915_MADV_DONTNEED)
  1256. obj_priv->dirty = 0;
  1257. for (i = 0; i < page_count; i++) {
  1258. if (obj_priv->pages[i] == NULL)
  1259. break;
  1260. if (obj_priv->dirty)
  1261. set_page_dirty(obj_priv->pages[i]);
  1262. if (obj_priv->madv == I915_MADV_WILLNEED)
  1263. mark_page_accessed(obj_priv->pages[i]);
  1264. page_cache_release(obj_priv->pages[i]);
  1265. }
  1266. obj_priv->dirty = 0;
  1267. drm_free_large(obj_priv->pages);
  1268. obj_priv->pages = NULL;
  1269. }
  1270. static void
  1271. i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
  1272. {
  1273. struct drm_device *dev = obj->dev;
  1274. drm_i915_private_t *dev_priv = dev->dev_private;
  1275. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1276. /* Add a reference if we're newly entering the active list. */
  1277. if (!obj_priv->active) {
  1278. drm_gem_object_reference(obj);
  1279. obj_priv->active = 1;
  1280. }
  1281. /* Move from whatever list we were on to the tail of execution. */
  1282. spin_lock(&dev_priv->mm.active_list_lock);
  1283. list_move_tail(&obj_priv->list,
  1284. &dev_priv->mm.active_list);
  1285. spin_unlock(&dev_priv->mm.active_list_lock);
  1286. obj_priv->last_rendering_seqno = seqno;
  1287. }
  1288. static void
  1289. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  1290. {
  1291. struct drm_device *dev = obj->dev;
  1292. drm_i915_private_t *dev_priv = dev->dev_private;
  1293. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1294. BUG_ON(!obj_priv->active);
  1295. list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
  1296. obj_priv->last_rendering_seqno = 0;
  1297. }
  1298. static void
  1299. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  1300. {
  1301. struct drm_device *dev = obj->dev;
  1302. drm_i915_private_t *dev_priv = dev->dev_private;
  1303. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1304. i915_verify_inactive(dev, __FILE__, __LINE__);
  1305. if (obj_priv->pin_count != 0)
  1306. list_del_init(&obj_priv->list);
  1307. else
  1308. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1309. obj_priv->last_rendering_seqno = 0;
  1310. if (obj_priv->active) {
  1311. obj_priv->active = 0;
  1312. drm_gem_object_unreference(obj);
  1313. }
  1314. i915_verify_inactive(dev, __FILE__, __LINE__);
  1315. }
  1316. /**
  1317. * Creates a new sequence number, emitting a write of it to the status page
  1318. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  1319. *
  1320. * Must be called with struct_lock held.
  1321. *
  1322. * Returned sequence numbers are nonzero on success.
  1323. */
  1324. static uint32_t
  1325. i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  1326. uint32_t flush_domains)
  1327. {
  1328. drm_i915_private_t *dev_priv = dev->dev_private;
  1329. struct drm_i915_file_private *i915_file_priv = NULL;
  1330. struct drm_i915_gem_request *request;
  1331. uint32_t seqno;
  1332. int was_empty;
  1333. RING_LOCALS;
  1334. if (file_priv != NULL)
  1335. i915_file_priv = file_priv->driver_priv;
  1336. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1337. if (request == NULL)
  1338. return 0;
  1339. /* Grab the seqno we're going to make this request be, and bump the
  1340. * next (skipping 0 so it can be the reserved no-seqno value).
  1341. */
  1342. seqno = dev_priv->mm.next_gem_seqno;
  1343. dev_priv->mm.next_gem_seqno++;
  1344. if (dev_priv->mm.next_gem_seqno == 0)
  1345. dev_priv->mm.next_gem_seqno++;
  1346. BEGIN_LP_RING(4);
  1347. OUT_RING(MI_STORE_DWORD_INDEX);
  1348. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1349. OUT_RING(seqno);
  1350. OUT_RING(MI_USER_INTERRUPT);
  1351. ADVANCE_LP_RING();
  1352. DRM_DEBUG("%d\n", seqno);
  1353. request->seqno = seqno;
  1354. request->emitted_jiffies = jiffies;
  1355. was_empty = list_empty(&dev_priv->mm.request_list);
  1356. list_add_tail(&request->list, &dev_priv->mm.request_list);
  1357. if (i915_file_priv) {
  1358. list_add_tail(&request->client_list,
  1359. &i915_file_priv->mm.request_list);
  1360. } else {
  1361. INIT_LIST_HEAD(&request->client_list);
  1362. }
  1363. /* Associate any objects on the flushing list matching the write
  1364. * domain we're flushing with our flush.
  1365. */
  1366. if (flush_domains != 0) {
  1367. struct drm_i915_gem_object *obj_priv, *next;
  1368. list_for_each_entry_safe(obj_priv, next,
  1369. &dev_priv->mm.flushing_list, list) {
  1370. struct drm_gem_object *obj = obj_priv->obj;
  1371. if ((obj->write_domain & flush_domains) ==
  1372. obj->write_domain) {
  1373. uint32_t old_write_domain = obj->write_domain;
  1374. obj->write_domain = 0;
  1375. i915_gem_object_move_to_active(obj, seqno);
  1376. trace_i915_gem_object_change_domain(obj,
  1377. obj->read_domains,
  1378. old_write_domain);
  1379. }
  1380. }
  1381. }
  1382. if (!dev_priv->mm.suspended) {
  1383. mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
  1384. if (was_empty)
  1385. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1386. }
  1387. return seqno;
  1388. }
  1389. /**
  1390. * Command execution barrier
  1391. *
  1392. * Ensures that all commands in the ring are finished
  1393. * before signalling the CPU
  1394. */
  1395. static uint32_t
  1396. i915_retire_commands(struct drm_device *dev)
  1397. {
  1398. drm_i915_private_t *dev_priv = dev->dev_private;
  1399. uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1400. uint32_t flush_domains = 0;
  1401. RING_LOCALS;
  1402. /* The sampler always gets flushed on i965 (sigh) */
  1403. if (IS_I965G(dev))
  1404. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1405. BEGIN_LP_RING(2);
  1406. OUT_RING(cmd);
  1407. OUT_RING(0); /* noop */
  1408. ADVANCE_LP_RING();
  1409. return flush_domains;
  1410. }
  1411. /**
  1412. * Moves buffers associated only with the given active seqno from the active
  1413. * to inactive list, potentially freeing them.
  1414. */
  1415. static void
  1416. i915_gem_retire_request(struct drm_device *dev,
  1417. struct drm_i915_gem_request *request)
  1418. {
  1419. drm_i915_private_t *dev_priv = dev->dev_private;
  1420. trace_i915_gem_request_retire(dev, request->seqno);
  1421. /* Move any buffers on the active list that are no longer referenced
  1422. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1423. */
  1424. spin_lock(&dev_priv->mm.active_list_lock);
  1425. while (!list_empty(&dev_priv->mm.active_list)) {
  1426. struct drm_gem_object *obj;
  1427. struct drm_i915_gem_object *obj_priv;
  1428. obj_priv = list_first_entry(&dev_priv->mm.active_list,
  1429. struct drm_i915_gem_object,
  1430. list);
  1431. obj = obj_priv->obj;
  1432. /* If the seqno being retired doesn't match the oldest in the
  1433. * list, then the oldest in the list must still be newer than
  1434. * this seqno.
  1435. */
  1436. if (obj_priv->last_rendering_seqno != request->seqno)
  1437. goto out;
  1438. #if WATCH_LRU
  1439. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  1440. __func__, request->seqno, obj);
  1441. #endif
  1442. if (obj->write_domain != 0)
  1443. i915_gem_object_move_to_flushing(obj);
  1444. else {
  1445. /* Take a reference on the object so it won't be
  1446. * freed while the spinlock is held. The list
  1447. * protection for this spinlock is safe when breaking
  1448. * the lock like this since the next thing we do
  1449. * is just get the head of the list again.
  1450. */
  1451. drm_gem_object_reference(obj);
  1452. i915_gem_object_move_to_inactive(obj);
  1453. spin_unlock(&dev_priv->mm.active_list_lock);
  1454. drm_gem_object_unreference(obj);
  1455. spin_lock(&dev_priv->mm.active_list_lock);
  1456. }
  1457. }
  1458. out:
  1459. spin_unlock(&dev_priv->mm.active_list_lock);
  1460. }
  1461. /**
  1462. * Returns true if seq1 is later than seq2.
  1463. */
  1464. bool
  1465. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1466. {
  1467. return (int32_t)(seq1 - seq2) >= 0;
  1468. }
  1469. uint32_t
  1470. i915_get_gem_seqno(struct drm_device *dev)
  1471. {
  1472. drm_i915_private_t *dev_priv = dev->dev_private;
  1473. return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
  1474. }
  1475. /**
  1476. * This function clears the request list as sequence numbers are passed.
  1477. */
  1478. void
  1479. i915_gem_retire_requests(struct drm_device *dev)
  1480. {
  1481. drm_i915_private_t *dev_priv = dev->dev_private;
  1482. uint32_t seqno;
  1483. if (!dev_priv->hw_status_page)
  1484. return;
  1485. seqno = i915_get_gem_seqno(dev);
  1486. while (!list_empty(&dev_priv->mm.request_list)) {
  1487. struct drm_i915_gem_request *request;
  1488. uint32_t retiring_seqno;
  1489. request = list_first_entry(&dev_priv->mm.request_list,
  1490. struct drm_i915_gem_request,
  1491. list);
  1492. retiring_seqno = request->seqno;
  1493. if (i915_seqno_passed(seqno, retiring_seqno) ||
  1494. atomic_read(&dev_priv->mm.wedged)) {
  1495. i915_gem_retire_request(dev, request);
  1496. list_del(&request->list);
  1497. list_del(&request->client_list);
  1498. kfree(request);
  1499. } else
  1500. break;
  1501. }
  1502. }
  1503. void
  1504. i915_gem_retire_work_handler(struct work_struct *work)
  1505. {
  1506. drm_i915_private_t *dev_priv;
  1507. struct drm_device *dev;
  1508. dev_priv = container_of(work, drm_i915_private_t,
  1509. mm.retire_work.work);
  1510. dev = dev_priv->dev;
  1511. mutex_lock(&dev->struct_mutex);
  1512. i915_gem_retire_requests(dev);
  1513. if (!dev_priv->mm.suspended &&
  1514. !list_empty(&dev_priv->mm.request_list))
  1515. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1516. mutex_unlock(&dev->struct_mutex);
  1517. }
  1518. /**
  1519. * Waits for a sequence number to be signaled, and cleans up the
  1520. * request and object lists appropriately for that event.
  1521. */
  1522. static int
  1523. i915_wait_request(struct drm_device *dev, uint32_t seqno)
  1524. {
  1525. drm_i915_private_t *dev_priv = dev->dev_private;
  1526. u32 ier;
  1527. int ret = 0;
  1528. BUG_ON(seqno == 0);
  1529. if (atomic_read(&dev_priv->mm.wedged))
  1530. return -EIO;
  1531. if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
  1532. if (IS_IGDNG(dev))
  1533. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1534. else
  1535. ier = I915_READ(IER);
  1536. if (!ier) {
  1537. DRM_ERROR("something (likely vbetool) disabled "
  1538. "interrupts, re-enabling\n");
  1539. i915_driver_irq_preinstall(dev);
  1540. i915_driver_irq_postinstall(dev);
  1541. }
  1542. trace_i915_gem_request_wait_begin(dev, seqno);
  1543. dev_priv->mm.waiting_gem_seqno = seqno;
  1544. i915_user_irq_get(dev);
  1545. ret = wait_event_interruptible(dev_priv->irq_queue,
  1546. i915_seqno_passed(i915_get_gem_seqno(dev),
  1547. seqno) ||
  1548. atomic_read(&dev_priv->mm.wedged));
  1549. i915_user_irq_put(dev);
  1550. dev_priv->mm.waiting_gem_seqno = 0;
  1551. trace_i915_gem_request_wait_end(dev, seqno);
  1552. }
  1553. if (atomic_read(&dev_priv->mm.wedged))
  1554. ret = -EIO;
  1555. if (ret && ret != -ERESTARTSYS)
  1556. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  1557. __func__, ret, seqno, i915_get_gem_seqno(dev));
  1558. /* Directly dispatch request retiring. While we have the work queue
  1559. * to handle this, the waiter on a request often wants an associated
  1560. * buffer to have made it to the inactive list, and we would need
  1561. * a separate wait queue to handle that.
  1562. */
  1563. if (ret == 0)
  1564. i915_gem_retire_requests(dev);
  1565. return ret;
  1566. }
  1567. static void
  1568. i915_gem_flush(struct drm_device *dev,
  1569. uint32_t invalidate_domains,
  1570. uint32_t flush_domains)
  1571. {
  1572. drm_i915_private_t *dev_priv = dev->dev_private;
  1573. uint32_t cmd;
  1574. RING_LOCALS;
  1575. #if WATCH_EXEC
  1576. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  1577. invalidate_domains, flush_domains);
  1578. #endif
  1579. trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
  1580. invalidate_domains, flush_domains);
  1581. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1582. drm_agp_chipset_flush(dev);
  1583. if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
  1584. /*
  1585. * read/write caches:
  1586. *
  1587. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  1588. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  1589. * also flushed at 2d versus 3d pipeline switches.
  1590. *
  1591. * read-only caches:
  1592. *
  1593. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  1594. * MI_READ_FLUSH is set, and is always flushed on 965.
  1595. *
  1596. * I915_GEM_DOMAIN_COMMAND may not exist?
  1597. *
  1598. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  1599. * invalidated when MI_EXE_FLUSH is set.
  1600. *
  1601. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  1602. * invalidated with every MI_FLUSH.
  1603. *
  1604. * TLBs:
  1605. *
  1606. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  1607. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  1608. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  1609. * are flushed at any MI_FLUSH.
  1610. */
  1611. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  1612. if ((invalidate_domains|flush_domains) &
  1613. I915_GEM_DOMAIN_RENDER)
  1614. cmd &= ~MI_NO_WRITE_FLUSH;
  1615. if (!IS_I965G(dev)) {
  1616. /*
  1617. * On the 965, the sampler cache always gets flushed
  1618. * and this bit is reserved.
  1619. */
  1620. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  1621. cmd |= MI_READ_FLUSH;
  1622. }
  1623. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  1624. cmd |= MI_EXE_FLUSH;
  1625. #if WATCH_EXEC
  1626. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  1627. #endif
  1628. BEGIN_LP_RING(2);
  1629. OUT_RING(cmd);
  1630. OUT_RING(0); /* noop */
  1631. ADVANCE_LP_RING();
  1632. }
  1633. }
  1634. /**
  1635. * Ensures that all rendering to the object has completed and the object is
  1636. * safe to unbind from the GTT or access from the CPU.
  1637. */
  1638. static int
  1639. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  1640. {
  1641. struct drm_device *dev = obj->dev;
  1642. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1643. int ret;
  1644. /* This function only exists to support waiting for existing rendering,
  1645. * not for emitting required flushes.
  1646. */
  1647. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1648. /* If there is rendering queued on the buffer being evicted, wait for
  1649. * it.
  1650. */
  1651. if (obj_priv->active) {
  1652. #if WATCH_BUF
  1653. DRM_INFO("%s: object %p wait for seqno %08x\n",
  1654. __func__, obj, obj_priv->last_rendering_seqno);
  1655. #endif
  1656. ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
  1657. if (ret != 0)
  1658. return ret;
  1659. }
  1660. return 0;
  1661. }
  1662. /**
  1663. * Unbinds an object from the GTT aperture.
  1664. */
  1665. int
  1666. i915_gem_object_unbind(struct drm_gem_object *obj)
  1667. {
  1668. struct drm_device *dev = obj->dev;
  1669. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1670. int ret = 0;
  1671. #if WATCH_BUF
  1672. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  1673. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  1674. #endif
  1675. if (obj_priv->gtt_space == NULL)
  1676. return 0;
  1677. if (obj_priv->pin_count != 0) {
  1678. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1679. return -EINVAL;
  1680. }
  1681. /* blow away mappings if mapped through GTT */
  1682. i915_gem_release_mmap(obj);
  1683. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1684. i915_gem_clear_fence_reg(obj);
  1685. /* Move the object to the CPU domain to ensure that
  1686. * any possible CPU writes while it's not in the GTT
  1687. * are flushed when we go to remap it. This will
  1688. * also ensure that all pending GPU writes are finished
  1689. * before we unbind.
  1690. */
  1691. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1692. if (ret) {
  1693. if (ret != -ERESTARTSYS)
  1694. DRM_ERROR("set_domain failed: %d\n", ret);
  1695. return ret;
  1696. }
  1697. BUG_ON(obj_priv->active);
  1698. if (obj_priv->agp_mem != NULL) {
  1699. drm_unbind_agp(obj_priv->agp_mem);
  1700. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  1701. obj_priv->agp_mem = NULL;
  1702. }
  1703. i915_gem_object_put_pages(obj);
  1704. BUG_ON(obj_priv->pages_refcount);
  1705. if (obj_priv->gtt_space) {
  1706. atomic_dec(&dev->gtt_count);
  1707. atomic_sub(obj->size, &dev->gtt_memory);
  1708. drm_mm_put_block(obj_priv->gtt_space);
  1709. obj_priv->gtt_space = NULL;
  1710. }
  1711. /* Remove ourselves from the LRU list if present. */
  1712. if (!list_empty(&obj_priv->list))
  1713. list_del_init(&obj_priv->list);
  1714. trace_i915_gem_object_unbind(obj);
  1715. return 0;
  1716. }
  1717. static inline int
  1718. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
  1719. {
  1720. return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED;
  1721. }
  1722. static struct drm_gem_object *
  1723. i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
  1724. {
  1725. drm_i915_private_t *dev_priv = dev->dev_private;
  1726. struct drm_i915_gem_object *obj_priv;
  1727. struct drm_gem_object *best = NULL;
  1728. struct drm_gem_object *first = NULL;
  1729. /* Try to find the smallest clean object */
  1730. list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
  1731. struct drm_gem_object *obj = obj_priv->obj;
  1732. if (obj->size >= min_size) {
  1733. if (i915_gem_object_is_purgeable(obj_priv) &&
  1734. (!best || obj->size < best->size)) {
  1735. best = obj;
  1736. if (best->size == min_size)
  1737. return best;
  1738. }
  1739. if (!first)
  1740. first = obj;
  1741. }
  1742. }
  1743. return best ? best : first;
  1744. }
  1745. static int
  1746. i915_gem_evict_everything(struct drm_device *dev)
  1747. {
  1748. drm_i915_private_t *dev_priv = dev->dev_private;
  1749. uint32_t seqno;
  1750. int ret;
  1751. bool lists_empty;
  1752. DRM_INFO("GTT full, evicting everything: "
  1753. "%d objects [%d pinned], "
  1754. "%d object bytes [%d pinned], "
  1755. "%d/%d gtt bytes\n",
  1756. atomic_read(&dev->object_count),
  1757. atomic_read(&dev->pin_count),
  1758. atomic_read(&dev->object_memory),
  1759. atomic_read(&dev->pin_memory),
  1760. atomic_read(&dev->gtt_memory),
  1761. dev->gtt_total);
  1762. spin_lock(&dev_priv->mm.active_list_lock);
  1763. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1764. list_empty(&dev_priv->mm.flushing_list) &&
  1765. list_empty(&dev_priv->mm.active_list));
  1766. spin_unlock(&dev_priv->mm.active_list_lock);
  1767. if (lists_empty) {
  1768. DRM_ERROR("GTT full, but lists empty!\n");
  1769. return -ENOSPC;
  1770. }
  1771. /* Flush everything (on to the inactive lists) and evict */
  1772. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1773. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  1774. if (seqno == 0)
  1775. return -ENOMEM;
  1776. ret = i915_wait_request(dev, seqno);
  1777. if (ret)
  1778. return ret;
  1779. ret = i915_gem_evict_from_inactive_list(dev);
  1780. if (ret)
  1781. return ret;
  1782. spin_lock(&dev_priv->mm.active_list_lock);
  1783. lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
  1784. list_empty(&dev_priv->mm.flushing_list) &&
  1785. list_empty(&dev_priv->mm.active_list));
  1786. spin_unlock(&dev_priv->mm.active_list_lock);
  1787. BUG_ON(!lists_empty);
  1788. return 0;
  1789. }
  1790. static int
  1791. i915_gem_evict_something(struct drm_device *dev, int min_size)
  1792. {
  1793. drm_i915_private_t *dev_priv = dev->dev_private;
  1794. struct drm_gem_object *obj;
  1795. int ret;
  1796. for (;;) {
  1797. i915_gem_retire_requests(dev);
  1798. /* If there's an inactive buffer available now, grab it
  1799. * and be done.
  1800. */
  1801. obj = i915_gem_find_inactive_object(dev, min_size);
  1802. if (obj) {
  1803. struct drm_i915_gem_object *obj_priv;
  1804. #if WATCH_LRU
  1805. DRM_INFO("%s: evicting %p\n", __func__, obj);
  1806. #endif
  1807. obj_priv = obj->driver_private;
  1808. BUG_ON(obj_priv->pin_count != 0);
  1809. BUG_ON(obj_priv->active);
  1810. /* Wait on the rendering and unbind the buffer. */
  1811. return i915_gem_object_unbind(obj);
  1812. }
  1813. /* If we didn't get anything, but the ring is still processing
  1814. * things, wait for the next to finish and hopefully leave us
  1815. * a buffer to evict.
  1816. */
  1817. if (!list_empty(&dev_priv->mm.request_list)) {
  1818. struct drm_i915_gem_request *request;
  1819. request = list_first_entry(&dev_priv->mm.request_list,
  1820. struct drm_i915_gem_request,
  1821. list);
  1822. ret = i915_wait_request(dev, request->seqno);
  1823. if (ret)
  1824. return ret;
  1825. continue;
  1826. }
  1827. /* If we didn't have anything on the request list but there
  1828. * are buffers awaiting a flush, emit one and try again.
  1829. * When we wait on it, those buffers waiting for that flush
  1830. * will get moved to inactive.
  1831. */
  1832. if (!list_empty(&dev_priv->mm.flushing_list)) {
  1833. struct drm_i915_gem_object *obj_priv;
  1834. /* Find an object that we can immediately reuse */
  1835. list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
  1836. obj = obj_priv->obj;
  1837. if (obj->size >= min_size)
  1838. break;
  1839. obj = NULL;
  1840. }
  1841. if (obj != NULL) {
  1842. uint32_t seqno;
  1843. i915_gem_flush(dev,
  1844. obj->write_domain,
  1845. obj->write_domain);
  1846. seqno = i915_add_request(dev, NULL, obj->write_domain);
  1847. if (seqno == 0)
  1848. return -ENOMEM;
  1849. ret = i915_wait_request(dev, seqno);
  1850. if (ret)
  1851. return ret;
  1852. continue;
  1853. }
  1854. }
  1855. /* If we didn't do any of the above, there's no single buffer
  1856. * large enough to swap out for the new one, so just evict
  1857. * everything and start again. (This should be rare.)
  1858. */
  1859. if (!list_empty (&dev_priv->mm.inactive_list)) {
  1860. DRM_INFO("GTT full, evicting inactive buffers\n");
  1861. return i915_gem_evict_from_inactive_list(dev);
  1862. } else
  1863. return i915_gem_evict_everything(dev);
  1864. }
  1865. }
  1866. int
  1867. i915_gem_object_get_pages(struct drm_gem_object *obj)
  1868. {
  1869. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1870. int page_count, i;
  1871. struct address_space *mapping;
  1872. struct inode *inode;
  1873. struct page *page;
  1874. int ret;
  1875. if (obj_priv->pages_refcount++ != 0)
  1876. return 0;
  1877. /* Get the list of pages out of our struct file. They'll be pinned
  1878. * at this point until we release them.
  1879. */
  1880. page_count = obj->size / PAGE_SIZE;
  1881. BUG_ON(obj_priv->pages != NULL);
  1882. obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
  1883. if (obj_priv->pages == NULL) {
  1884. DRM_ERROR("Failed to allocate page list\n");
  1885. obj_priv->pages_refcount--;
  1886. return -ENOMEM;
  1887. }
  1888. inode = obj->filp->f_path.dentry->d_inode;
  1889. mapping = inode->i_mapping;
  1890. for (i = 0; i < page_count; i++) {
  1891. page = read_mapping_page(mapping, i, NULL);
  1892. if (IS_ERR(page)) {
  1893. ret = PTR_ERR(page);
  1894. i915_gem_object_put_pages(obj);
  1895. return ret;
  1896. }
  1897. obj_priv->pages[i] = page;
  1898. }
  1899. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1900. i915_gem_object_do_bit_17_swizzle(obj);
  1901. return 0;
  1902. }
  1903. static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
  1904. {
  1905. struct drm_gem_object *obj = reg->obj;
  1906. struct drm_device *dev = obj->dev;
  1907. drm_i915_private_t *dev_priv = dev->dev_private;
  1908. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1909. int regnum = obj_priv->fence_reg;
  1910. uint64_t val;
  1911. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1912. 0xfffff000) << 32;
  1913. val |= obj_priv->gtt_offset & 0xfffff000;
  1914. val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1915. if (obj_priv->tiling_mode == I915_TILING_Y)
  1916. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1917. val |= I965_FENCE_REG_VALID;
  1918. I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
  1919. }
  1920. static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
  1921. {
  1922. struct drm_gem_object *obj = reg->obj;
  1923. struct drm_device *dev = obj->dev;
  1924. drm_i915_private_t *dev_priv = dev->dev_private;
  1925. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1926. int regnum = obj_priv->fence_reg;
  1927. int tile_width;
  1928. uint32_t fence_reg, val;
  1929. uint32_t pitch_val;
  1930. if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
  1931. (obj_priv->gtt_offset & (obj->size - 1))) {
  1932. WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
  1933. __func__, obj_priv->gtt_offset, obj->size);
  1934. return;
  1935. }
  1936. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1937. HAS_128_BYTE_Y_TILING(dev))
  1938. tile_width = 128;
  1939. else
  1940. tile_width = 512;
  1941. /* Note: pitch better be a power of two tile widths */
  1942. pitch_val = obj_priv->stride / tile_width;
  1943. pitch_val = ffs(pitch_val) - 1;
  1944. val = obj_priv->gtt_offset;
  1945. if (obj_priv->tiling_mode == I915_TILING_Y)
  1946. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1947. val |= I915_FENCE_SIZE_BITS(obj->size);
  1948. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1949. val |= I830_FENCE_REG_VALID;
  1950. if (regnum < 8)
  1951. fence_reg = FENCE_REG_830_0 + (regnum * 4);
  1952. else
  1953. fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
  1954. I915_WRITE(fence_reg, val);
  1955. }
  1956. static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  1957. {
  1958. struct drm_gem_object *obj = reg->obj;
  1959. struct drm_device *dev = obj->dev;
  1960. drm_i915_private_t *dev_priv = dev->dev_private;
  1961. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  1962. int regnum = obj_priv->fence_reg;
  1963. uint32_t val;
  1964. uint32_t pitch_val;
  1965. uint32_t fence_size_bits;
  1966. if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
  1967. (obj_priv->gtt_offset & (obj->size - 1))) {
  1968. WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
  1969. __func__, obj_priv->gtt_offset);
  1970. return;
  1971. }
  1972. pitch_val = obj_priv->stride / 128;
  1973. pitch_val = ffs(pitch_val) - 1;
  1974. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  1975. val = obj_priv->gtt_offset;
  1976. if (obj_priv->tiling_mode == I915_TILING_Y)
  1977. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1978. fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
  1979. WARN_ON(fence_size_bits & ~0x00000f00);
  1980. val |= fence_size_bits;
  1981. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1982. val |= I830_FENCE_REG_VALID;
  1983. I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  1984. }
  1985. /**
  1986. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  1987. * @obj: object to map through a fence reg
  1988. *
  1989. * When mapping objects through the GTT, userspace wants to be able to write
  1990. * to them without having to worry about swizzling if the object is tiled.
  1991. *
  1992. * This function walks the fence regs looking for a free one for @obj,
  1993. * stealing one if it can't find any.
  1994. *
  1995. * It then sets up the reg based on the object's properties: address, pitch
  1996. * and tiling format.
  1997. */
  1998. int
  1999. i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
  2000. {
  2001. struct drm_device *dev = obj->dev;
  2002. struct drm_i915_private *dev_priv = dev->dev_private;
  2003. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2004. struct drm_i915_fence_reg *reg = NULL;
  2005. struct drm_i915_gem_object *old_obj_priv = NULL;
  2006. int i, ret, avail;
  2007. /* Just update our place in the LRU if our fence is getting used. */
  2008. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  2009. list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2010. return 0;
  2011. }
  2012. switch (obj_priv->tiling_mode) {
  2013. case I915_TILING_NONE:
  2014. WARN(1, "allocating a fence for non-tiled object?\n");
  2015. break;
  2016. case I915_TILING_X:
  2017. if (!obj_priv->stride)
  2018. return -EINVAL;
  2019. WARN((obj_priv->stride & (512 - 1)),
  2020. "object 0x%08x is X tiled but has non-512B pitch\n",
  2021. obj_priv->gtt_offset);
  2022. break;
  2023. case I915_TILING_Y:
  2024. if (!obj_priv->stride)
  2025. return -EINVAL;
  2026. WARN((obj_priv->stride & (128 - 1)),
  2027. "object 0x%08x is Y tiled but has non-128B pitch\n",
  2028. obj_priv->gtt_offset);
  2029. break;
  2030. }
  2031. /* First try to find a free reg */
  2032. avail = 0;
  2033. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2034. reg = &dev_priv->fence_regs[i];
  2035. if (!reg->obj)
  2036. break;
  2037. old_obj_priv = reg->obj->driver_private;
  2038. if (!old_obj_priv->pin_count)
  2039. avail++;
  2040. }
  2041. /* None available, try to steal one or wait for a user to finish */
  2042. if (i == dev_priv->num_fence_regs) {
  2043. struct drm_gem_object *old_obj = NULL;
  2044. if (avail == 0)
  2045. return -ENOSPC;
  2046. list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
  2047. fence_list) {
  2048. old_obj = old_obj_priv->obj;
  2049. if (old_obj_priv->pin_count)
  2050. continue;
  2051. /* Take a reference, as otherwise the wait_rendering
  2052. * below may cause the object to get freed out from
  2053. * under us.
  2054. */
  2055. drm_gem_object_reference(old_obj);
  2056. /* i915 uses fences for GPU access to tiled buffers */
  2057. if (IS_I965G(dev) || !old_obj_priv->active)
  2058. break;
  2059. /* This brings the object to the head of the LRU if it
  2060. * had been written to. The only way this should
  2061. * result in us waiting longer than the expected
  2062. * optimal amount of time is if there was a
  2063. * fence-using buffer later that was read-only.
  2064. */
  2065. i915_gem_object_flush_gpu_write_domain(old_obj);
  2066. ret = i915_gem_object_wait_rendering(old_obj);
  2067. if (ret != 0) {
  2068. drm_gem_object_unreference(old_obj);
  2069. return ret;
  2070. }
  2071. break;
  2072. }
  2073. /*
  2074. * Zap this virtual mapping so we can set up a fence again
  2075. * for this object next time we need it.
  2076. */
  2077. i915_gem_release_mmap(old_obj);
  2078. i = old_obj_priv->fence_reg;
  2079. reg = &dev_priv->fence_regs[i];
  2080. old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2081. list_del_init(&old_obj_priv->fence_list);
  2082. drm_gem_object_unreference(old_obj);
  2083. }
  2084. obj_priv->fence_reg = i;
  2085. list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
  2086. reg->obj = obj;
  2087. if (IS_I965G(dev))
  2088. i965_write_fence_reg(reg);
  2089. else if (IS_I9XX(dev))
  2090. i915_write_fence_reg(reg);
  2091. else
  2092. i830_write_fence_reg(reg);
  2093. trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
  2094. return 0;
  2095. }
  2096. /**
  2097. * i915_gem_clear_fence_reg - clear out fence register info
  2098. * @obj: object to clear
  2099. *
  2100. * Zeroes out the fence register itself and clears out the associated
  2101. * data structures in dev_priv and obj_priv.
  2102. */
  2103. static void
  2104. i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  2105. {
  2106. struct drm_device *dev = obj->dev;
  2107. drm_i915_private_t *dev_priv = dev->dev_private;
  2108. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2109. if (IS_I965G(dev))
  2110. I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
  2111. else {
  2112. uint32_t fence_reg;
  2113. if (obj_priv->fence_reg < 8)
  2114. fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
  2115. else
  2116. fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
  2117. 8) * 4;
  2118. I915_WRITE(fence_reg, 0);
  2119. }
  2120. dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
  2121. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2122. list_del_init(&obj_priv->fence_list);
  2123. }
  2124. /**
  2125. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2126. * to the buffer to finish, and then resets the fence register.
  2127. * @obj: tiled object holding a fence register.
  2128. *
  2129. * Zeroes out the fence register itself and clears out the associated
  2130. * data structures in dev_priv and obj_priv.
  2131. */
  2132. int
  2133. i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
  2134. {
  2135. struct drm_device *dev = obj->dev;
  2136. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2137. if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
  2138. return 0;
  2139. /* On the i915, GPU access to tiled buffers is via a fence,
  2140. * therefore we must wait for any outstanding access to complete
  2141. * before clearing the fence.
  2142. */
  2143. if (!IS_I965G(dev)) {
  2144. int ret;
  2145. i915_gem_object_flush_gpu_write_domain(obj);
  2146. i915_gem_object_flush_gtt_write_domain(obj);
  2147. ret = i915_gem_object_wait_rendering(obj);
  2148. if (ret != 0)
  2149. return ret;
  2150. }
  2151. i915_gem_clear_fence_reg (obj);
  2152. return 0;
  2153. }
  2154. /**
  2155. * Finds free space in the GTT aperture and binds the object there.
  2156. */
  2157. static int
  2158. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  2159. {
  2160. struct drm_device *dev = obj->dev;
  2161. drm_i915_private_t *dev_priv = dev->dev_private;
  2162. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2163. struct drm_mm_node *free_space;
  2164. bool retry_alloc = false;
  2165. int ret;
  2166. if (dev_priv->mm.suspended)
  2167. return -EBUSY;
  2168. if (obj_priv->madv == I915_MADV_DONTNEED) {
  2169. DRM_ERROR("Attempting to bind a purgeable object\n");
  2170. return -EINVAL;
  2171. }
  2172. if (alignment == 0)
  2173. alignment = i915_gem_get_gtt_alignment(obj);
  2174. if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
  2175. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2176. return -EINVAL;
  2177. }
  2178. search_free:
  2179. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2180. obj->size, alignment, 0);
  2181. if (free_space != NULL) {
  2182. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  2183. alignment);
  2184. if (obj_priv->gtt_space != NULL) {
  2185. obj_priv->gtt_space->private = obj;
  2186. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  2187. }
  2188. }
  2189. if (obj_priv->gtt_space == NULL) {
  2190. /* If the gtt is empty and we're still having trouble
  2191. * fitting our object in, we're out of memory.
  2192. */
  2193. #if WATCH_LRU
  2194. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  2195. #endif
  2196. ret = i915_gem_evict_something(dev, obj->size);
  2197. if (ret != 0) {
  2198. if (ret != -ERESTARTSYS)
  2199. DRM_ERROR("Failed to evict a buffer %d\n", ret);
  2200. return ret;
  2201. }
  2202. goto search_free;
  2203. }
  2204. #if WATCH_BUF
  2205. DRM_INFO("Binding object of size %zd at 0x%08x\n",
  2206. obj->size, obj_priv->gtt_offset);
  2207. #endif
  2208. if (retry_alloc) {
  2209. i915_gem_object_set_page_gfp_mask (obj,
  2210. i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
  2211. }
  2212. ret = i915_gem_object_get_pages(obj);
  2213. if (retry_alloc) {
  2214. i915_gem_object_set_page_gfp_mask (obj,
  2215. i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
  2216. }
  2217. if (ret) {
  2218. drm_mm_put_block(obj_priv->gtt_space);
  2219. obj_priv->gtt_space = NULL;
  2220. if (ret == -ENOMEM) {
  2221. /* first try to clear up some space from the GTT */
  2222. ret = i915_gem_evict_something(dev, obj->size);
  2223. if (ret) {
  2224. if (ret != -ERESTARTSYS)
  2225. DRM_ERROR("Failed to allocate space for backing pages %d\n", ret);
  2226. /* now try to shrink everyone else */
  2227. if (! retry_alloc) {
  2228. retry_alloc = true;
  2229. goto search_free;
  2230. }
  2231. return ret;
  2232. }
  2233. goto search_free;
  2234. }
  2235. return ret;
  2236. }
  2237. /* Create an AGP memory structure pointing at our pages, and bind it
  2238. * into the GTT.
  2239. */
  2240. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  2241. obj_priv->pages,
  2242. obj->size >> PAGE_SHIFT,
  2243. obj_priv->gtt_offset,
  2244. obj_priv->agp_type);
  2245. if (obj_priv->agp_mem == NULL) {
  2246. i915_gem_object_put_pages(obj);
  2247. drm_mm_put_block(obj_priv->gtt_space);
  2248. obj_priv->gtt_space = NULL;
  2249. ret = i915_gem_evict_something(dev, obj->size);
  2250. if (ret) {
  2251. if (ret != -ERESTARTSYS)
  2252. DRM_ERROR("Failed to allocate space to bind AGP: %d\n", ret);
  2253. return ret;
  2254. }
  2255. goto search_free;
  2256. }
  2257. atomic_inc(&dev->gtt_count);
  2258. atomic_add(obj->size, &dev->gtt_memory);
  2259. /* Assert that the object is not currently in any GPU domain. As it
  2260. * wasn't in the GTT, there shouldn't be any way it could have been in
  2261. * a GPU cache
  2262. */
  2263. BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2264. BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2265. trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  2266. return 0;
  2267. }
  2268. void
  2269. i915_gem_clflush_object(struct drm_gem_object *obj)
  2270. {
  2271. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2272. /* If we don't have a page list set up, then we're not pinned
  2273. * to GPU, and we can ignore the cache flush because it'll happen
  2274. * again at bind time.
  2275. */
  2276. if (obj_priv->pages == NULL)
  2277. return;
  2278. trace_i915_gem_object_clflush(obj);
  2279. drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  2280. }
  2281. /** Flushes any GPU write domain for the object if it's dirty. */
  2282. static void
  2283. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
  2284. {
  2285. struct drm_device *dev = obj->dev;
  2286. uint32_t seqno;
  2287. uint32_t old_write_domain;
  2288. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2289. return;
  2290. /* Queue the GPU write cache flushing we need. */
  2291. old_write_domain = obj->write_domain;
  2292. i915_gem_flush(dev, 0, obj->write_domain);
  2293. seqno = i915_add_request(dev, NULL, obj->write_domain);
  2294. obj->write_domain = 0;
  2295. i915_gem_object_move_to_active(obj, seqno);
  2296. trace_i915_gem_object_change_domain(obj,
  2297. obj->read_domains,
  2298. old_write_domain);
  2299. }
  2300. /** Flushes the GTT write domain for the object if it's dirty. */
  2301. static void
  2302. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  2303. {
  2304. uint32_t old_write_domain;
  2305. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  2306. return;
  2307. /* No actual flushing is required for the GTT write domain. Writes
  2308. * to it immediately go to main memory as far as we know, so there's
  2309. * no chipset flush. It also doesn't land in render cache.
  2310. */
  2311. old_write_domain = obj->write_domain;
  2312. obj->write_domain = 0;
  2313. trace_i915_gem_object_change_domain(obj,
  2314. obj->read_domains,
  2315. old_write_domain);
  2316. }
  2317. /** Flushes the CPU write domain for the object if it's dirty. */
  2318. static void
  2319. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  2320. {
  2321. struct drm_device *dev = obj->dev;
  2322. uint32_t old_write_domain;
  2323. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  2324. return;
  2325. i915_gem_clflush_object(obj);
  2326. drm_agp_chipset_flush(dev);
  2327. old_write_domain = obj->write_domain;
  2328. obj->write_domain = 0;
  2329. trace_i915_gem_object_change_domain(obj,
  2330. obj->read_domains,
  2331. old_write_domain);
  2332. }
  2333. /**
  2334. * Moves a single object to the GTT read, and possibly write domain.
  2335. *
  2336. * This function returns when the move is complete, including waiting on
  2337. * flushes to occur.
  2338. */
  2339. int
  2340. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  2341. {
  2342. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2343. uint32_t old_write_domain, old_read_domains;
  2344. int ret;
  2345. /* Not valid to be called on unbound objects. */
  2346. if (obj_priv->gtt_space == NULL)
  2347. return -EINVAL;
  2348. i915_gem_object_flush_gpu_write_domain(obj);
  2349. /* Wait on any GPU rendering and flushing to occur. */
  2350. ret = i915_gem_object_wait_rendering(obj);
  2351. if (ret != 0)
  2352. return ret;
  2353. old_write_domain = obj->write_domain;
  2354. old_read_domains = obj->read_domains;
  2355. /* If we're writing through the GTT domain, then CPU and GPU caches
  2356. * will need to be invalidated at next use.
  2357. */
  2358. if (write)
  2359. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  2360. i915_gem_object_flush_cpu_write_domain(obj);
  2361. /* It should now be out of any other write domains, and we can update
  2362. * the domain values for our changes.
  2363. */
  2364. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2365. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2366. if (write) {
  2367. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2368. obj_priv->dirty = 1;
  2369. }
  2370. trace_i915_gem_object_change_domain(obj,
  2371. old_read_domains,
  2372. old_write_domain);
  2373. return 0;
  2374. }
  2375. /**
  2376. * Moves a single object to the CPU read, and possibly write domain.
  2377. *
  2378. * This function returns when the move is complete, including waiting on
  2379. * flushes to occur.
  2380. */
  2381. static int
  2382. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  2383. {
  2384. uint32_t old_write_domain, old_read_domains;
  2385. int ret;
  2386. i915_gem_object_flush_gpu_write_domain(obj);
  2387. /* Wait on any GPU rendering and flushing to occur. */
  2388. ret = i915_gem_object_wait_rendering(obj);
  2389. if (ret != 0)
  2390. return ret;
  2391. i915_gem_object_flush_gtt_write_domain(obj);
  2392. /* If we have a partially-valid cache of the object in the CPU,
  2393. * finish invalidating it and free the per-page flags.
  2394. */
  2395. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2396. old_write_domain = obj->write_domain;
  2397. old_read_domains = obj->read_domains;
  2398. /* Flush the CPU cache if it's still invalid. */
  2399. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2400. i915_gem_clflush_object(obj);
  2401. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2402. }
  2403. /* It should now be out of any other write domains, and we can update
  2404. * the domain values for our changes.
  2405. */
  2406. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2407. /* If we're writing through the CPU, then the GPU read domains will
  2408. * need to be invalidated at next use.
  2409. */
  2410. if (write) {
  2411. obj->read_domains &= I915_GEM_DOMAIN_CPU;
  2412. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2413. }
  2414. trace_i915_gem_object_change_domain(obj,
  2415. old_read_domains,
  2416. old_write_domain);
  2417. return 0;
  2418. }
  2419. /*
  2420. * Set the next domain for the specified object. This
  2421. * may not actually perform the necessary flushing/invaliding though,
  2422. * as that may want to be batched with other set_domain operations
  2423. *
  2424. * This is (we hope) the only really tricky part of gem. The goal
  2425. * is fairly simple -- track which caches hold bits of the object
  2426. * and make sure they remain coherent. A few concrete examples may
  2427. * help to explain how it works. For shorthand, we use the notation
  2428. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2429. * a pair of read and write domain masks.
  2430. *
  2431. * Case 1: the batch buffer
  2432. *
  2433. * 1. Allocated
  2434. * 2. Written by CPU
  2435. * 3. Mapped to GTT
  2436. * 4. Read by GPU
  2437. * 5. Unmapped from GTT
  2438. * 6. Freed
  2439. *
  2440. * Let's take these a step at a time
  2441. *
  2442. * 1. Allocated
  2443. * Pages allocated from the kernel may still have
  2444. * cache contents, so we set them to (CPU, CPU) always.
  2445. * 2. Written by CPU (using pwrite)
  2446. * The pwrite function calls set_domain (CPU, CPU) and
  2447. * this function does nothing (as nothing changes)
  2448. * 3. Mapped by GTT
  2449. * This function asserts that the object is not
  2450. * currently in any GPU-based read or write domains
  2451. * 4. Read by GPU
  2452. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2453. * As write_domain is zero, this function adds in the
  2454. * current read domains (CPU+COMMAND, 0).
  2455. * flush_domains is set to CPU.
  2456. * invalidate_domains is set to COMMAND
  2457. * clflush is run to get data out of the CPU caches
  2458. * then i915_dev_set_domain calls i915_gem_flush to
  2459. * emit an MI_FLUSH and drm_agp_chipset_flush
  2460. * 5. Unmapped from GTT
  2461. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2462. * flush_domains and invalidate_domains end up both zero
  2463. * so no flushing/invalidating happens
  2464. * 6. Freed
  2465. * yay, done
  2466. *
  2467. * Case 2: The shared render buffer
  2468. *
  2469. * 1. Allocated
  2470. * 2. Mapped to GTT
  2471. * 3. Read/written by GPU
  2472. * 4. set_domain to (CPU,CPU)
  2473. * 5. Read/written by CPU
  2474. * 6. Read/written by GPU
  2475. *
  2476. * 1. Allocated
  2477. * Same as last example, (CPU, CPU)
  2478. * 2. Mapped to GTT
  2479. * Nothing changes (assertions find that it is not in the GPU)
  2480. * 3. Read/written by GPU
  2481. * execbuffer calls set_domain (RENDER, RENDER)
  2482. * flush_domains gets CPU
  2483. * invalidate_domains gets GPU
  2484. * clflush (obj)
  2485. * MI_FLUSH and drm_agp_chipset_flush
  2486. * 4. set_domain (CPU, CPU)
  2487. * flush_domains gets GPU
  2488. * invalidate_domains gets CPU
  2489. * wait_rendering (obj) to make sure all drawing is complete.
  2490. * This will include an MI_FLUSH to get the data from GPU
  2491. * to memory
  2492. * clflush (obj) to invalidate the CPU cache
  2493. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2494. * 5. Read/written by CPU
  2495. * cache lines are loaded and dirtied
  2496. * 6. Read written by GPU
  2497. * Same as last GPU access
  2498. *
  2499. * Case 3: The constant buffer
  2500. *
  2501. * 1. Allocated
  2502. * 2. Written by CPU
  2503. * 3. Read by GPU
  2504. * 4. Updated (written) by CPU again
  2505. * 5. Read by GPU
  2506. *
  2507. * 1. Allocated
  2508. * (CPU, CPU)
  2509. * 2. Written by CPU
  2510. * (CPU, CPU)
  2511. * 3. Read by GPU
  2512. * (CPU+RENDER, 0)
  2513. * flush_domains = CPU
  2514. * invalidate_domains = RENDER
  2515. * clflush (obj)
  2516. * MI_FLUSH
  2517. * drm_agp_chipset_flush
  2518. * 4. Updated (written) by CPU again
  2519. * (CPU, CPU)
  2520. * flush_domains = 0 (no previous write domain)
  2521. * invalidate_domains = 0 (no new read domains)
  2522. * 5. Read by GPU
  2523. * (CPU+RENDER, 0)
  2524. * flush_domains = CPU
  2525. * invalidate_domains = RENDER
  2526. * clflush (obj)
  2527. * MI_FLUSH
  2528. * drm_agp_chipset_flush
  2529. */
  2530. static void
  2531. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
  2532. {
  2533. struct drm_device *dev = obj->dev;
  2534. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2535. uint32_t invalidate_domains = 0;
  2536. uint32_t flush_domains = 0;
  2537. uint32_t old_read_domains;
  2538. BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
  2539. BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
  2540. intel_mark_busy(dev, obj);
  2541. #if WATCH_BUF
  2542. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  2543. __func__, obj,
  2544. obj->read_domains, obj->pending_read_domains,
  2545. obj->write_domain, obj->pending_write_domain);
  2546. #endif
  2547. /*
  2548. * If the object isn't moving to a new write domain,
  2549. * let the object stay in multiple read domains
  2550. */
  2551. if (obj->pending_write_domain == 0)
  2552. obj->pending_read_domains |= obj->read_domains;
  2553. else
  2554. obj_priv->dirty = 1;
  2555. /*
  2556. * Flush the current write domain if
  2557. * the new read domains don't match. Invalidate
  2558. * any read domains which differ from the old
  2559. * write domain
  2560. */
  2561. if (obj->write_domain &&
  2562. obj->write_domain != obj->pending_read_domains) {
  2563. flush_domains |= obj->write_domain;
  2564. invalidate_domains |=
  2565. obj->pending_read_domains & ~obj->write_domain;
  2566. }
  2567. /*
  2568. * Invalidate any read caches which may have
  2569. * stale data. That is, any new read domains.
  2570. */
  2571. invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
  2572. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  2573. #if WATCH_BUF
  2574. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  2575. __func__, flush_domains, invalidate_domains);
  2576. #endif
  2577. i915_gem_clflush_object(obj);
  2578. }
  2579. old_read_domains = obj->read_domains;
  2580. /* The actual obj->write_domain will be updated with
  2581. * pending_write_domain after we emit the accumulated flush for all
  2582. * of our domain changes in execbuffers (which clears objects'
  2583. * write_domains). So if we have a current write domain that we
  2584. * aren't changing, set pending_write_domain to that.
  2585. */
  2586. if (flush_domains == 0 && obj->pending_write_domain == 0)
  2587. obj->pending_write_domain = obj->write_domain;
  2588. obj->read_domains = obj->pending_read_domains;
  2589. dev->invalidate_domains |= invalidate_domains;
  2590. dev->flush_domains |= flush_domains;
  2591. #if WATCH_BUF
  2592. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  2593. __func__,
  2594. obj->read_domains, obj->write_domain,
  2595. dev->invalidate_domains, dev->flush_domains);
  2596. #endif
  2597. trace_i915_gem_object_change_domain(obj,
  2598. old_read_domains,
  2599. obj->write_domain);
  2600. }
  2601. /**
  2602. * Moves the object from a partially CPU read to a full one.
  2603. *
  2604. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2605. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2606. */
  2607. static void
  2608. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  2609. {
  2610. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2611. if (!obj_priv->page_cpu_valid)
  2612. return;
  2613. /* If we're partially in the CPU read domain, finish moving it in.
  2614. */
  2615. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  2616. int i;
  2617. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  2618. if (obj_priv->page_cpu_valid[i])
  2619. continue;
  2620. drm_clflush_pages(obj_priv->pages + i, 1);
  2621. }
  2622. }
  2623. /* Free the page_cpu_valid mappings which are now stale, whether
  2624. * or not we've got I915_GEM_DOMAIN_CPU.
  2625. */
  2626. kfree(obj_priv->page_cpu_valid);
  2627. obj_priv->page_cpu_valid = NULL;
  2628. }
  2629. /**
  2630. * Set the CPU read domain on a range of the object.
  2631. *
  2632. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2633. * not entirely valid. The page_cpu_valid member of the object flags which
  2634. * pages have been flushed, and will be respected by
  2635. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2636. * of the whole object.
  2637. *
  2638. * This function returns when the move is complete, including waiting on
  2639. * flushes to occur.
  2640. */
  2641. static int
  2642. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  2643. uint64_t offset, uint64_t size)
  2644. {
  2645. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2646. uint32_t old_read_domains;
  2647. int i, ret;
  2648. if (offset == 0 && size == obj->size)
  2649. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2650. i915_gem_object_flush_gpu_write_domain(obj);
  2651. /* Wait on any GPU rendering and flushing to occur. */
  2652. ret = i915_gem_object_wait_rendering(obj);
  2653. if (ret != 0)
  2654. return ret;
  2655. i915_gem_object_flush_gtt_write_domain(obj);
  2656. /* If we're already fully in the CPU read domain, we're done. */
  2657. if (obj_priv->page_cpu_valid == NULL &&
  2658. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2659. return 0;
  2660. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2661. * newly adding I915_GEM_DOMAIN_CPU
  2662. */
  2663. if (obj_priv->page_cpu_valid == NULL) {
  2664. obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
  2665. GFP_KERNEL);
  2666. if (obj_priv->page_cpu_valid == NULL)
  2667. return -ENOMEM;
  2668. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2669. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  2670. /* Flush the cache on any pages that are still invalid from the CPU's
  2671. * perspective.
  2672. */
  2673. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2674. i++) {
  2675. if (obj_priv->page_cpu_valid[i])
  2676. continue;
  2677. drm_clflush_pages(obj_priv->pages + i, 1);
  2678. obj_priv->page_cpu_valid[i] = 1;
  2679. }
  2680. /* It should now be out of any other write domains, and we can update
  2681. * the domain values for our changes.
  2682. */
  2683. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2684. old_read_domains = obj->read_domains;
  2685. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2686. trace_i915_gem_object_change_domain(obj,
  2687. old_read_domains,
  2688. obj->write_domain);
  2689. return 0;
  2690. }
  2691. /**
  2692. * Pin an object to the GTT and evaluate the relocations landing in it.
  2693. */
  2694. static int
  2695. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  2696. struct drm_file *file_priv,
  2697. struct drm_i915_gem_exec_object *entry,
  2698. struct drm_i915_gem_relocation_entry *relocs)
  2699. {
  2700. struct drm_device *dev = obj->dev;
  2701. drm_i915_private_t *dev_priv = dev->dev_private;
  2702. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  2703. int i, ret;
  2704. void __iomem *reloc_page;
  2705. /* Choose the GTT offset for our buffer and put it there. */
  2706. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  2707. if (ret)
  2708. return ret;
  2709. entry->offset = obj_priv->gtt_offset;
  2710. /* Apply the relocations, using the GTT aperture to avoid cache
  2711. * flushing requirements.
  2712. */
  2713. for (i = 0; i < entry->relocation_count; i++) {
  2714. struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
  2715. struct drm_gem_object *target_obj;
  2716. struct drm_i915_gem_object *target_obj_priv;
  2717. uint32_t reloc_val, reloc_offset;
  2718. uint32_t __iomem *reloc_entry;
  2719. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  2720. reloc->target_handle);
  2721. if (target_obj == NULL) {
  2722. i915_gem_object_unpin(obj);
  2723. return -EBADF;
  2724. }
  2725. target_obj_priv = target_obj->driver_private;
  2726. #if WATCH_RELOC
  2727. DRM_INFO("%s: obj %p offset %08x target %d "
  2728. "read %08x write %08x gtt %08x "
  2729. "presumed %08x delta %08x\n",
  2730. __func__,
  2731. obj,
  2732. (int) reloc->offset,
  2733. (int) reloc->target_handle,
  2734. (int) reloc->read_domains,
  2735. (int) reloc->write_domain,
  2736. (int) target_obj_priv->gtt_offset,
  2737. (int) reloc->presumed_offset,
  2738. reloc->delta);
  2739. #endif
  2740. /* The target buffer should have appeared before us in the
  2741. * exec_object list, so it should have a GTT space bound by now.
  2742. */
  2743. if (target_obj_priv->gtt_space == NULL) {
  2744. DRM_ERROR("No GTT space found for object %d\n",
  2745. reloc->target_handle);
  2746. drm_gem_object_unreference(target_obj);
  2747. i915_gem_object_unpin(obj);
  2748. return -EINVAL;
  2749. }
  2750. /* Validate that the target is in a valid r/w GPU domain */
  2751. if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
  2752. reloc->read_domains & I915_GEM_DOMAIN_CPU) {
  2753. DRM_ERROR("reloc with read/write CPU domains: "
  2754. "obj %p target %d offset %d "
  2755. "read %08x write %08x",
  2756. obj, reloc->target_handle,
  2757. (int) reloc->offset,
  2758. reloc->read_domains,
  2759. reloc->write_domain);
  2760. drm_gem_object_unreference(target_obj);
  2761. i915_gem_object_unpin(obj);
  2762. return -EINVAL;
  2763. }
  2764. if (reloc->write_domain && target_obj->pending_write_domain &&
  2765. reloc->write_domain != target_obj->pending_write_domain) {
  2766. DRM_ERROR("Write domain conflict: "
  2767. "obj %p target %d offset %d "
  2768. "new %08x old %08x\n",
  2769. obj, reloc->target_handle,
  2770. (int) reloc->offset,
  2771. reloc->write_domain,
  2772. target_obj->pending_write_domain);
  2773. drm_gem_object_unreference(target_obj);
  2774. i915_gem_object_unpin(obj);
  2775. return -EINVAL;
  2776. }
  2777. target_obj->pending_read_domains |= reloc->read_domains;
  2778. target_obj->pending_write_domain |= reloc->write_domain;
  2779. /* If the relocation already has the right value in it, no
  2780. * more work needs to be done.
  2781. */
  2782. if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
  2783. drm_gem_object_unreference(target_obj);
  2784. continue;
  2785. }
  2786. /* Check that the relocation address is valid... */
  2787. if (reloc->offset > obj->size - 4) {
  2788. DRM_ERROR("Relocation beyond object bounds: "
  2789. "obj %p target %d offset %d size %d.\n",
  2790. obj, reloc->target_handle,
  2791. (int) reloc->offset, (int) obj->size);
  2792. drm_gem_object_unreference(target_obj);
  2793. i915_gem_object_unpin(obj);
  2794. return -EINVAL;
  2795. }
  2796. if (reloc->offset & 3) {
  2797. DRM_ERROR("Relocation not 4-byte aligned: "
  2798. "obj %p target %d offset %d.\n",
  2799. obj, reloc->target_handle,
  2800. (int) reloc->offset);
  2801. drm_gem_object_unreference(target_obj);
  2802. i915_gem_object_unpin(obj);
  2803. return -EINVAL;
  2804. }
  2805. /* and points to somewhere within the target object. */
  2806. if (reloc->delta >= target_obj->size) {
  2807. DRM_ERROR("Relocation beyond target object bounds: "
  2808. "obj %p target %d delta %d size %d.\n",
  2809. obj, reloc->target_handle,
  2810. (int) reloc->delta, (int) target_obj->size);
  2811. drm_gem_object_unreference(target_obj);
  2812. i915_gem_object_unpin(obj);
  2813. return -EINVAL;
  2814. }
  2815. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  2816. if (ret != 0) {
  2817. drm_gem_object_unreference(target_obj);
  2818. i915_gem_object_unpin(obj);
  2819. return -EINVAL;
  2820. }
  2821. /* Map the page containing the relocation we're going to
  2822. * perform.
  2823. */
  2824. reloc_offset = obj_priv->gtt_offset + reloc->offset;
  2825. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2826. (reloc_offset &
  2827. ~(PAGE_SIZE - 1)));
  2828. reloc_entry = (uint32_t __iomem *)(reloc_page +
  2829. (reloc_offset & (PAGE_SIZE - 1)));
  2830. reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  2831. #if WATCH_BUF
  2832. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  2833. obj, (unsigned int) reloc->offset,
  2834. readl(reloc_entry), reloc_val);
  2835. #endif
  2836. writel(reloc_val, reloc_entry);
  2837. io_mapping_unmap_atomic(reloc_page);
  2838. /* The updated presumed offset for this entry will be
  2839. * copied back out to the user.
  2840. */
  2841. reloc->presumed_offset = target_obj_priv->gtt_offset;
  2842. drm_gem_object_unreference(target_obj);
  2843. }
  2844. #if WATCH_BUF
  2845. if (0)
  2846. i915_gem_dump_object(obj, 128, __func__, ~0);
  2847. #endif
  2848. return 0;
  2849. }
  2850. /** Dispatch a batchbuffer to the ring
  2851. */
  2852. static int
  2853. i915_dispatch_gem_execbuffer(struct drm_device *dev,
  2854. struct drm_i915_gem_execbuffer *exec,
  2855. struct drm_clip_rect *cliprects,
  2856. uint64_t exec_offset)
  2857. {
  2858. drm_i915_private_t *dev_priv = dev->dev_private;
  2859. int nbox = exec->num_cliprects;
  2860. int i = 0, count;
  2861. uint32_t exec_start, exec_len;
  2862. RING_LOCALS;
  2863. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2864. exec_len = (uint32_t) exec->batch_len;
  2865. trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
  2866. count = nbox ? nbox : 1;
  2867. for (i = 0; i < count; i++) {
  2868. if (i < nbox) {
  2869. int ret = i915_emit_box(dev, cliprects, i,
  2870. exec->DR1, exec->DR4);
  2871. if (ret)
  2872. return ret;
  2873. }
  2874. if (IS_I830(dev) || IS_845G(dev)) {
  2875. BEGIN_LP_RING(4);
  2876. OUT_RING(MI_BATCH_BUFFER);
  2877. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2878. OUT_RING(exec_start + exec_len - 4);
  2879. OUT_RING(0);
  2880. ADVANCE_LP_RING();
  2881. } else {
  2882. BEGIN_LP_RING(2);
  2883. if (IS_I965G(dev)) {
  2884. OUT_RING(MI_BATCH_BUFFER_START |
  2885. (2 << 6) |
  2886. MI_BATCH_NON_SECURE_I965);
  2887. OUT_RING(exec_start);
  2888. } else {
  2889. OUT_RING(MI_BATCH_BUFFER_START |
  2890. (2 << 6));
  2891. OUT_RING(exec_start | MI_BATCH_NON_SECURE);
  2892. }
  2893. ADVANCE_LP_RING();
  2894. }
  2895. }
  2896. /* XXX breadcrumb */
  2897. return 0;
  2898. }
  2899. /* Throttle our rendering by waiting until the ring has completed our requests
  2900. * emitted over 20 msec ago.
  2901. *
  2902. * Note that if we were to use the current jiffies each time around the loop,
  2903. * we wouldn't escape the function with any frames outstanding if the time to
  2904. * render a frame was over 20ms.
  2905. *
  2906. * This should get us reasonable parallelism between CPU and GPU but also
  2907. * relatively low latency when blocking on a particular request to finish.
  2908. */
  2909. static int
  2910. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  2911. {
  2912. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  2913. int ret = 0;
  2914. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2915. mutex_lock(&dev->struct_mutex);
  2916. while (!list_empty(&i915_file_priv->mm.request_list)) {
  2917. struct drm_i915_gem_request *request;
  2918. request = list_first_entry(&i915_file_priv->mm.request_list,
  2919. struct drm_i915_gem_request,
  2920. client_list);
  2921. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2922. break;
  2923. ret = i915_wait_request(dev, request->seqno);
  2924. if (ret != 0)
  2925. break;
  2926. }
  2927. mutex_unlock(&dev->struct_mutex);
  2928. return ret;
  2929. }
  2930. static int
  2931. i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
  2932. uint32_t buffer_count,
  2933. struct drm_i915_gem_relocation_entry **relocs)
  2934. {
  2935. uint32_t reloc_count = 0, reloc_index = 0, i;
  2936. int ret;
  2937. *relocs = NULL;
  2938. for (i = 0; i < buffer_count; i++) {
  2939. if (reloc_count + exec_list[i].relocation_count < reloc_count)
  2940. return -EINVAL;
  2941. reloc_count += exec_list[i].relocation_count;
  2942. }
  2943. *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
  2944. if (*relocs == NULL)
  2945. return -ENOMEM;
  2946. for (i = 0; i < buffer_count; i++) {
  2947. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2948. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2949. ret = copy_from_user(&(*relocs)[reloc_index],
  2950. user_relocs,
  2951. exec_list[i].relocation_count *
  2952. sizeof(**relocs));
  2953. if (ret != 0) {
  2954. drm_free_large(*relocs);
  2955. *relocs = NULL;
  2956. return -EFAULT;
  2957. }
  2958. reloc_index += exec_list[i].relocation_count;
  2959. }
  2960. return 0;
  2961. }
  2962. static int
  2963. i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
  2964. uint32_t buffer_count,
  2965. struct drm_i915_gem_relocation_entry *relocs)
  2966. {
  2967. uint32_t reloc_count = 0, i;
  2968. int ret = 0;
  2969. for (i = 0; i < buffer_count; i++) {
  2970. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2971. int unwritten;
  2972. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2973. unwritten = copy_to_user(user_relocs,
  2974. &relocs[reloc_count],
  2975. exec_list[i].relocation_count *
  2976. sizeof(*relocs));
  2977. if (unwritten) {
  2978. ret = -EFAULT;
  2979. goto err;
  2980. }
  2981. reloc_count += exec_list[i].relocation_count;
  2982. }
  2983. err:
  2984. drm_free_large(relocs);
  2985. return ret;
  2986. }
  2987. static int
  2988. i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
  2989. uint64_t exec_offset)
  2990. {
  2991. uint32_t exec_start, exec_len;
  2992. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2993. exec_len = (uint32_t) exec->batch_len;
  2994. if ((exec_start | exec_len) & 0x7)
  2995. return -EINVAL;
  2996. if (!exec_start)
  2997. return -EINVAL;
  2998. return 0;
  2999. }
  3000. int
  3001. i915_gem_execbuffer(struct drm_device *dev, void *data,
  3002. struct drm_file *file_priv)
  3003. {
  3004. drm_i915_private_t *dev_priv = dev->dev_private;
  3005. struct drm_i915_gem_execbuffer *args = data;
  3006. struct drm_i915_gem_exec_object *exec_list = NULL;
  3007. struct drm_gem_object **object_list = NULL;
  3008. struct drm_gem_object *batch_obj;
  3009. struct drm_i915_gem_object *obj_priv;
  3010. struct drm_clip_rect *cliprects = NULL;
  3011. struct drm_i915_gem_relocation_entry *relocs;
  3012. int ret, ret2, i, pinned = 0;
  3013. uint64_t exec_offset;
  3014. uint32_t seqno, flush_domains, reloc_index;
  3015. int pin_tries;
  3016. #if WATCH_EXEC
  3017. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3018. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3019. #endif
  3020. if (args->buffer_count < 1) {
  3021. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3022. return -EINVAL;
  3023. }
  3024. /* Copy in the exec list from userland */
  3025. exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
  3026. object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
  3027. if (exec_list == NULL || object_list == NULL) {
  3028. DRM_ERROR("Failed to allocate exec or object list "
  3029. "for %d buffers\n",
  3030. args->buffer_count);
  3031. ret = -ENOMEM;
  3032. goto pre_mutex_err;
  3033. }
  3034. ret = copy_from_user(exec_list,
  3035. (struct drm_i915_relocation_entry __user *)
  3036. (uintptr_t) args->buffers_ptr,
  3037. sizeof(*exec_list) * args->buffer_count);
  3038. if (ret != 0) {
  3039. DRM_ERROR("copy %d exec entries failed %d\n",
  3040. args->buffer_count, ret);
  3041. goto pre_mutex_err;
  3042. }
  3043. if (args->num_cliprects != 0) {
  3044. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3045. GFP_KERNEL);
  3046. if (cliprects == NULL)
  3047. goto pre_mutex_err;
  3048. ret = copy_from_user(cliprects,
  3049. (struct drm_clip_rect __user *)
  3050. (uintptr_t) args->cliprects_ptr,
  3051. sizeof(*cliprects) * args->num_cliprects);
  3052. if (ret != 0) {
  3053. DRM_ERROR("copy %d cliprects failed: %d\n",
  3054. args->num_cliprects, ret);
  3055. goto pre_mutex_err;
  3056. }
  3057. }
  3058. ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
  3059. &relocs);
  3060. if (ret != 0)
  3061. goto pre_mutex_err;
  3062. mutex_lock(&dev->struct_mutex);
  3063. i915_verify_inactive(dev, __FILE__, __LINE__);
  3064. if (atomic_read(&dev_priv->mm.wedged)) {
  3065. DRM_ERROR("Execbuf while wedged\n");
  3066. mutex_unlock(&dev->struct_mutex);
  3067. ret = -EIO;
  3068. goto pre_mutex_err;
  3069. }
  3070. if (dev_priv->mm.suspended) {
  3071. DRM_ERROR("Execbuf while VT-switched.\n");
  3072. mutex_unlock(&dev->struct_mutex);
  3073. ret = -EBUSY;
  3074. goto pre_mutex_err;
  3075. }
  3076. /* Look up object handles */
  3077. for (i = 0; i < args->buffer_count; i++) {
  3078. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  3079. exec_list[i].handle);
  3080. if (object_list[i] == NULL) {
  3081. DRM_ERROR("Invalid object handle %d at index %d\n",
  3082. exec_list[i].handle, i);
  3083. ret = -EBADF;
  3084. goto err;
  3085. }
  3086. obj_priv = object_list[i]->driver_private;
  3087. if (obj_priv->in_execbuffer) {
  3088. DRM_ERROR("Object %p appears more than once in object list\n",
  3089. object_list[i]);
  3090. ret = -EBADF;
  3091. goto err;
  3092. }
  3093. obj_priv->in_execbuffer = true;
  3094. }
  3095. /* Pin and relocate */
  3096. for (pin_tries = 0; ; pin_tries++) {
  3097. ret = 0;
  3098. reloc_index = 0;
  3099. for (i = 0; i < args->buffer_count; i++) {
  3100. object_list[i]->pending_read_domains = 0;
  3101. object_list[i]->pending_write_domain = 0;
  3102. ret = i915_gem_object_pin_and_relocate(object_list[i],
  3103. file_priv,
  3104. &exec_list[i],
  3105. &relocs[reloc_index]);
  3106. if (ret)
  3107. break;
  3108. pinned = i + 1;
  3109. reloc_index += exec_list[i].relocation_count;
  3110. }
  3111. /* success */
  3112. if (ret == 0)
  3113. break;
  3114. /* error other than GTT full, or we've already tried again */
  3115. if (ret != -ENOSPC || pin_tries >= 1) {
  3116. if (ret != -ERESTARTSYS) {
  3117. unsigned long long total_size = 0;
  3118. for (i = 0; i < args->buffer_count; i++)
  3119. total_size += object_list[i]->size;
  3120. DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
  3121. pinned+1, args->buffer_count,
  3122. total_size, ret);
  3123. DRM_ERROR("%d objects [%d pinned], "
  3124. "%d object bytes [%d pinned], "
  3125. "%d/%d gtt bytes\n",
  3126. atomic_read(&dev->object_count),
  3127. atomic_read(&dev->pin_count),
  3128. atomic_read(&dev->object_memory),
  3129. atomic_read(&dev->pin_memory),
  3130. atomic_read(&dev->gtt_memory),
  3131. dev->gtt_total);
  3132. }
  3133. goto err;
  3134. }
  3135. /* unpin all of our buffers */
  3136. for (i = 0; i < pinned; i++)
  3137. i915_gem_object_unpin(object_list[i]);
  3138. pinned = 0;
  3139. /* evict everyone we can from the aperture */
  3140. ret = i915_gem_evict_everything(dev);
  3141. if (ret && ret != -ENOSPC)
  3142. goto err;
  3143. }
  3144. /* Set the pending read domains for the batch buffer to COMMAND */
  3145. batch_obj = object_list[args->buffer_count-1];
  3146. if (batch_obj->pending_write_domain) {
  3147. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3148. ret = -EINVAL;
  3149. goto err;
  3150. }
  3151. batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3152. /* Sanity check the batch buffer, prior to moving objects */
  3153. exec_offset = exec_list[args->buffer_count - 1].offset;
  3154. ret = i915_gem_check_execbuffer (args, exec_offset);
  3155. if (ret != 0) {
  3156. DRM_ERROR("execbuf with invalid offset/length\n");
  3157. goto err;
  3158. }
  3159. i915_verify_inactive(dev, __FILE__, __LINE__);
  3160. /* Zero the global flush/invalidate flags. These
  3161. * will be modified as new domains are computed
  3162. * for each object
  3163. */
  3164. dev->invalidate_domains = 0;
  3165. dev->flush_domains = 0;
  3166. for (i = 0; i < args->buffer_count; i++) {
  3167. struct drm_gem_object *obj = object_list[i];
  3168. /* Compute new gpu domains and update invalidate/flush */
  3169. i915_gem_object_set_to_gpu_domain(obj);
  3170. }
  3171. i915_verify_inactive(dev, __FILE__, __LINE__);
  3172. if (dev->invalidate_domains | dev->flush_domains) {
  3173. #if WATCH_EXEC
  3174. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3175. __func__,
  3176. dev->invalidate_domains,
  3177. dev->flush_domains);
  3178. #endif
  3179. i915_gem_flush(dev,
  3180. dev->invalidate_domains,
  3181. dev->flush_domains);
  3182. if (dev->flush_domains)
  3183. (void)i915_add_request(dev, file_priv,
  3184. dev->flush_domains);
  3185. }
  3186. for (i = 0; i < args->buffer_count; i++) {
  3187. struct drm_gem_object *obj = object_list[i];
  3188. uint32_t old_write_domain = obj->write_domain;
  3189. obj->write_domain = obj->pending_write_domain;
  3190. trace_i915_gem_object_change_domain(obj,
  3191. obj->read_domains,
  3192. old_write_domain);
  3193. }
  3194. i915_verify_inactive(dev, __FILE__, __LINE__);
  3195. #if WATCH_COHERENCY
  3196. for (i = 0; i < args->buffer_count; i++) {
  3197. i915_gem_object_check_coherency(object_list[i],
  3198. exec_list[i].handle);
  3199. }
  3200. #endif
  3201. #if WATCH_EXEC
  3202. i915_gem_dump_object(batch_obj,
  3203. args->batch_len,
  3204. __func__,
  3205. ~0);
  3206. #endif
  3207. /* Exec the batchbuffer */
  3208. ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
  3209. if (ret) {
  3210. DRM_ERROR("dispatch failed %d\n", ret);
  3211. goto err;
  3212. }
  3213. /*
  3214. * Ensure that the commands in the batch buffer are
  3215. * finished before the interrupt fires
  3216. */
  3217. flush_domains = i915_retire_commands(dev);
  3218. i915_verify_inactive(dev, __FILE__, __LINE__);
  3219. /*
  3220. * Get a seqno representing the execution of the current buffer,
  3221. * which we can wait on. We would like to mitigate these interrupts,
  3222. * likely by only creating seqnos occasionally (so that we have
  3223. * *some* interrupts representing completion of buffers that we can
  3224. * wait on when trying to clear up gtt space).
  3225. */
  3226. seqno = i915_add_request(dev, file_priv, flush_domains);
  3227. BUG_ON(seqno == 0);
  3228. for (i = 0; i < args->buffer_count; i++) {
  3229. struct drm_gem_object *obj = object_list[i];
  3230. i915_gem_object_move_to_active(obj, seqno);
  3231. #if WATCH_LRU
  3232. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  3233. #endif
  3234. }
  3235. #if WATCH_LRU
  3236. i915_dump_lru(dev, __func__);
  3237. #endif
  3238. i915_verify_inactive(dev, __FILE__, __LINE__);
  3239. err:
  3240. for (i = 0; i < pinned; i++)
  3241. i915_gem_object_unpin(object_list[i]);
  3242. for (i = 0; i < args->buffer_count; i++) {
  3243. if (object_list[i]) {
  3244. obj_priv = object_list[i]->driver_private;
  3245. obj_priv->in_execbuffer = false;
  3246. }
  3247. drm_gem_object_unreference(object_list[i]);
  3248. }
  3249. mutex_unlock(&dev->struct_mutex);
  3250. if (!ret) {
  3251. /* Copy the new buffer offsets back to the user's exec list. */
  3252. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3253. (uintptr_t) args->buffers_ptr,
  3254. exec_list,
  3255. sizeof(*exec_list) * args->buffer_count);
  3256. if (ret) {
  3257. ret = -EFAULT;
  3258. DRM_ERROR("failed to copy %d exec entries "
  3259. "back to user (%d)\n",
  3260. args->buffer_count, ret);
  3261. }
  3262. }
  3263. /* Copy the updated relocations out regardless of current error
  3264. * state. Failure to update the relocs would mean that the next
  3265. * time userland calls execbuf, it would do so with presumed offset
  3266. * state that didn't match the actual object state.
  3267. */
  3268. ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
  3269. relocs);
  3270. if (ret2 != 0) {
  3271. DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
  3272. if (ret == 0)
  3273. ret = ret2;
  3274. }
  3275. pre_mutex_err:
  3276. drm_free_large(object_list);
  3277. drm_free_large(exec_list);
  3278. kfree(cliprects);
  3279. return ret;
  3280. }
  3281. int
  3282. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  3283. {
  3284. struct drm_device *dev = obj->dev;
  3285. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3286. int ret;
  3287. i915_verify_inactive(dev, __FILE__, __LINE__);
  3288. if (obj_priv->gtt_space == NULL) {
  3289. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  3290. if (ret != 0) {
  3291. if (ret != -EBUSY && ret != -ERESTARTSYS)
  3292. DRM_ERROR("Failure to bind: %d\n", ret);
  3293. return ret;
  3294. }
  3295. }
  3296. /*
  3297. * Pre-965 chips need a fence register set up in order to
  3298. * properly handle tiled surfaces.
  3299. */
  3300. if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
  3301. ret = i915_gem_object_get_fence_reg(obj);
  3302. if (ret != 0) {
  3303. if (ret != -EBUSY && ret != -ERESTARTSYS)
  3304. DRM_ERROR("Failure to install fence: %d\n",
  3305. ret);
  3306. return ret;
  3307. }
  3308. }
  3309. obj_priv->pin_count++;
  3310. /* If the object is not active and not pending a flush,
  3311. * remove it from the inactive list
  3312. */
  3313. if (obj_priv->pin_count == 1) {
  3314. atomic_inc(&dev->pin_count);
  3315. atomic_add(obj->size, &dev->pin_memory);
  3316. if (!obj_priv->active &&
  3317. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
  3318. !list_empty(&obj_priv->list))
  3319. list_del_init(&obj_priv->list);
  3320. }
  3321. i915_verify_inactive(dev, __FILE__, __LINE__);
  3322. return 0;
  3323. }
  3324. void
  3325. i915_gem_object_unpin(struct drm_gem_object *obj)
  3326. {
  3327. struct drm_device *dev = obj->dev;
  3328. drm_i915_private_t *dev_priv = dev->dev_private;
  3329. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3330. i915_verify_inactive(dev, __FILE__, __LINE__);
  3331. obj_priv->pin_count--;
  3332. BUG_ON(obj_priv->pin_count < 0);
  3333. BUG_ON(obj_priv->gtt_space == NULL);
  3334. /* If the object is no longer pinned, and is
  3335. * neither active nor being flushed, then stick it on
  3336. * the inactive list
  3337. */
  3338. if (obj_priv->pin_count == 0) {
  3339. if (!obj_priv->active &&
  3340. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  3341. list_move_tail(&obj_priv->list,
  3342. &dev_priv->mm.inactive_list);
  3343. atomic_dec(&dev->pin_count);
  3344. atomic_sub(obj->size, &dev->pin_memory);
  3345. }
  3346. i915_verify_inactive(dev, __FILE__, __LINE__);
  3347. }
  3348. int
  3349. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3350. struct drm_file *file_priv)
  3351. {
  3352. struct drm_i915_gem_pin *args = data;
  3353. struct drm_gem_object *obj;
  3354. struct drm_i915_gem_object *obj_priv;
  3355. int ret;
  3356. mutex_lock(&dev->struct_mutex);
  3357. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3358. if (obj == NULL) {
  3359. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  3360. args->handle);
  3361. mutex_unlock(&dev->struct_mutex);
  3362. return -EBADF;
  3363. }
  3364. obj_priv = obj->driver_private;
  3365. if (obj_priv->madv == I915_MADV_DONTNEED) {
  3366. DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n");
  3367. drm_gem_object_unreference(obj);
  3368. mutex_unlock(&dev->struct_mutex);
  3369. return -EINVAL;
  3370. }
  3371. if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
  3372. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3373. args->handle);
  3374. drm_gem_object_unreference(obj);
  3375. mutex_unlock(&dev->struct_mutex);
  3376. return -EINVAL;
  3377. }
  3378. obj_priv->user_pin_count++;
  3379. obj_priv->pin_filp = file_priv;
  3380. if (obj_priv->user_pin_count == 1) {
  3381. ret = i915_gem_object_pin(obj, args->alignment);
  3382. if (ret != 0) {
  3383. drm_gem_object_unreference(obj);
  3384. mutex_unlock(&dev->struct_mutex);
  3385. return ret;
  3386. }
  3387. }
  3388. /* XXX - flush the CPU caches for pinned objects
  3389. * as the X server doesn't manage domains yet
  3390. */
  3391. i915_gem_object_flush_cpu_write_domain(obj);
  3392. args->offset = obj_priv->gtt_offset;
  3393. drm_gem_object_unreference(obj);
  3394. mutex_unlock(&dev->struct_mutex);
  3395. return 0;
  3396. }
  3397. int
  3398. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3399. struct drm_file *file_priv)
  3400. {
  3401. struct drm_i915_gem_pin *args = data;
  3402. struct drm_gem_object *obj;
  3403. struct drm_i915_gem_object *obj_priv;
  3404. mutex_lock(&dev->struct_mutex);
  3405. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3406. if (obj == NULL) {
  3407. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  3408. args->handle);
  3409. mutex_unlock(&dev->struct_mutex);
  3410. return -EBADF;
  3411. }
  3412. obj_priv = obj->driver_private;
  3413. if (obj_priv->pin_filp != file_priv) {
  3414. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3415. args->handle);
  3416. drm_gem_object_unreference(obj);
  3417. mutex_unlock(&dev->struct_mutex);
  3418. return -EINVAL;
  3419. }
  3420. obj_priv->user_pin_count--;
  3421. if (obj_priv->user_pin_count == 0) {
  3422. obj_priv->pin_filp = NULL;
  3423. i915_gem_object_unpin(obj);
  3424. }
  3425. drm_gem_object_unreference(obj);
  3426. mutex_unlock(&dev->struct_mutex);
  3427. return 0;
  3428. }
  3429. int
  3430. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3431. struct drm_file *file_priv)
  3432. {
  3433. struct drm_i915_gem_busy *args = data;
  3434. struct drm_gem_object *obj;
  3435. struct drm_i915_gem_object *obj_priv;
  3436. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3437. if (obj == NULL) {
  3438. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  3439. args->handle);
  3440. return -EBADF;
  3441. }
  3442. mutex_lock(&dev->struct_mutex);
  3443. /* Update the active list for the hardware's current position.
  3444. * Otherwise this only updates on a delayed timer or when irqs are
  3445. * actually unmasked, and our working set ends up being larger than
  3446. * required.
  3447. */
  3448. i915_gem_retire_requests(dev);
  3449. obj_priv = obj->driver_private;
  3450. /* Don't count being on the flushing list against the object being
  3451. * done. Otherwise, a buffer left on the flushing list but not getting
  3452. * flushed (because nobody's flushing that domain) won't ever return
  3453. * unbusy and get reused by libdrm's bo cache. The other expected
  3454. * consumer of this interface, OpenGL's occlusion queries, also specs
  3455. * that the objects get unbusy "eventually" without any interference.
  3456. */
  3457. args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
  3458. drm_gem_object_unreference(obj);
  3459. mutex_unlock(&dev->struct_mutex);
  3460. return 0;
  3461. }
  3462. int
  3463. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3464. struct drm_file *file_priv)
  3465. {
  3466. return i915_gem_ring_throttle(dev, file_priv);
  3467. }
  3468. int
  3469. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3470. struct drm_file *file_priv)
  3471. {
  3472. struct drm_i915_gem_madvise *args = data;
  3473. struct drm_gem_object *obj;
  3474. struct drm_i915_gem_object *obj_priv;
  3475. switch (args->madv) {
  3476. case I915_MADV_DONTNEED:
  3477. case I915_MADV_WILLNEED:
  3478. break;
  3479. default:
  3480. return -EINVAL;
  3481. }
  3482. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3483. if (obj == NULL) {
  3484. DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
  3485. args->handle);
  3486. return -EBADF;
  3487. }
  3488. mutex_lock(&dev->struct_mutex);
  3489. obj_priv = obj->driver_private;
  3490. if (obj_priv->pin_count) {
  3491. drm_gem_object_unreference(obj);
  3492. mutex_unlock(&dev->struct_mutex);
  3493. DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
  3494. return -EINVAL;
  3495. }
  3496. obj_priv->madv = args->madv;
  3497. args->retained = obj_priv->gtt_space != NULL;
  3498. drm_gem_object_unreference(obj);
  3499. mutex_unlock(&dev->struct_mutex);
  3500. return 0;
  3501. }
  3502. int i915_gem_init_object(struct drm_gem_object *obj)
  3503. {
  3504. struct drm_i915_gem_object *obj_priv;
  3505. obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
  3506. if (obj_priv == NULL)
  3507. return -ENOMEM;
  3508. /*
  3509. * We've just allocated pages from the kernel,
  3510. * so they've just been written by the CPU with
  3511. * zeros. They'll need to be clflushed before we
  3512. * use them with the GPU.
  3513. */
  3514. obj->write_domain = I915_GEM_DOMAIN_CPU;
  3515. obj->read_domains = I915_GEM_DOMAIN_CPU;
  3516. obj_priv->agp_type = AGP_USER_MEMORY;
  3517. obj->driver_private = obj_priv;
  3518. obj_priv->obj = obj;
  3519. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  3520. INIT_LIST_HEAD(&obj_priv->list);
  3521. INIT_LIST_HEAD(&obj_priv->fence_list);
  3522. obj_priv->madv = I915_MADV_WILLNEED;
  3523. trace_i915_gem_object_create(obj);
  3524. return 0;
  3525. }
  3526. void i915_gem_free_object(struct drm_gem_object *obj)
  3527. {
  3528. struct drm_device *dev = obj->dev;
  3529. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  3530. trace_i915_gem_object_destroy(obj);
  3531. while (obj_priv->pin_count > 0)
  3532. i915_gem_object_unpin(obj);
  3533. if (obj_priv->phys_obj)
  3534. i915_gem_detach_phys_object(dev, obj);
  3535. i915_gem_object_unbind(obj);
  3536. if (obj_priv->mmap_offset)
  3537. i915_gem_free_mmap_offset(obj);
  3538. kfree(obj_priv->page_cpu_valid);
  3539. kfree(obj_priv->bit_17);
  3540. kfree(obj->driver_private);
  3541. }
  3542. /** Unbinds all inactive objects. */
  3543. static int
  3544. i915_gem_evict_from_inactive_list(struct drm_device *dev)
  3545. {
  3546. drm_i915_private_t *dev_priv = dev->dev_private;
  3547. while (!list_empty(&dev_priv->mm.inactive_list)) {
  3548. struct drm_gem_object *obj;
  3549. int ret;
  3550. obj = list_first_entry(&dev_priv->mm.inactive_list,
  3551. struct drm_i915_gem_object,
  3552. list)->obj;
  3553. ret = i915_gem_object_unbind(obj);
  3554. if (ret != 0) {
  3555. DRM_ERROR("Error unbinding object: %d\n", ret);
  3556. return ret;
  3557. }
  3558. }
  3559. return 0;
  3560. }
  3561. int
  3562. i915_gem_idle(struct drm_device *dev)
  3563. {
  3564. drm_i915_private_t *dev_priv = dev->dev_private;
  3565. uint32_t seqno, cur_seqno, last_seqno;
  3566. int stuck, ret;
  3567. mutex_lock(&dev->struct_mutex);
  3568. if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
  3569. mutex_unlock(&dev->struct_mutex);
  3570. return 0;
  3571. }
  3572. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3573. * We need to replace this with a semaphore, or something.
  3574. */
  3575. dev_priv->mm.suspended = 1;
  3576. del_timer(&dev_priv->hangcheck_timer);
  3577. /* Cancel the retire work handler, wait for it to finish if running
  3578. */
  3579. mutex_unlock(&dev->struct_mutex);
  3580. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3581. mutex_lock(&dev->struct_mutex);
  3582. i915_kernel_lost_context(dev);
  3583. /* Flush the GPU along with all non-CPU write domains
  3584. */
  3585. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  3586. seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
  3587. if (seqno == 0) {
  3588. mutex_unlock(&dev->struct_mutex);
  3589. return -ENOMEM;
  3590. }
  3591. dev_priv->mm.waiting_gem_seqno = seqno;
  3592. last_seqno = 0;
  3593. stuck = 0;
  3594. for (;;) {
  3595. cur_seqno = i915_get_gem_seqno(dev);
  3596. if (i915_seqno_passed(cur_seqno, seqno))
  3597. break;
  3598. if (last_seqno == cur_seqno) {
  3599. if (stuck++ > 100) {
  3600. DRM_ERROR("hardware wedged\n");
  3601. atomic_set(&dev_priv->mm.wedged, 1);
  3602. DRM_WAKEUP(&dev_priv->irq_queue);
  3603. break;
  3604. }
  3605. }
  3606. msleep(10);
  3607. last_seqno = cur_seqno;
  3608. }
  3609. dev_priv->mm.waiting_gem_seqno = 0;
  3610. i915_gem_retire_requests(dev);
  3611. spin_lock(&dev_priv->mm.active_list_lock);
  3612. if (!atomic_read(&dev_priv->mm.wedged)) {
  3613. /* Active and flushing should now be empty as we've
  3614. * waited for a sequence higher than any pending execbuffer
  3615. */
  3616. WARN_ON(!list_empty(&dev_priv->mm.active_list));
  3617. WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
  3618. /* Request should now be empty as we've also waited
  3619. * for the last request in the list
  3620. */
  3621. WARN_ON(!list_empty(&dev_priv->mm.request_list));
  3622. }
  3623. /* Empty the active and flushing lists to inactive. If there's
  3624. * anything left at this point, it means that we're wedged and
  3625. * nothing good's going to happen by leaving them there. So strip
  3626. * the GPU domains and just stuff them onto inactive.
  3627. */
  3628. while (!list_empty(&dev_priv->mm.active_list)) {
  3629. struct drm_gem_object *obj;
  3630. uint32_t old_write_domain;
  3631. obj = list_first_entry(&dev_priv->mm.active_list,
  3632. struct drm_i915_gem_object,
  3633. list)->obj;
  3634. old_write_domain = obj->write_domain;
  3635. obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  3636. i915_gem_object_move_to_inactive(obj);
  3637. trace_i915_gem_object_change_domain(obj,
  3638. obj->read_domains,
  3639. old_write_domain);
  3640. }
  3641. spin_unlock(&dev_priv->mm.active_list_lock);
  3642. while (!list_empty(&dev_priv->mm.flushing_list)) {
  3643. struct drm_gem_object *obj;
  3644. uint32_t old_write_domain;
  3645. obj = list_first_entry(&dev_priv->mm.flushing_list,
  3646. struct drm_i915_gem_object,
  3647. list)->obj;
  3648. old_write_domain = obj->write_domain;
  3649. obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
  3650. i915_gem_object_move_to_inactive(obj);
  3651. trace_i915_gem_object_change_domain(obj,
  3652. obj->read_domains,
  3653. old_write_domain);
  3654. }
  3655. /* Move all inactive buffers out of the GTT. */
  3656. ret = i915_gem_evict_from_inactive_list(dev);
  3657. WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
  3658. if (ret) {
  3659. mutex_unlock(&dev->struct_mutex);
  3660. return ret;
  3661. }
  3662. i915_gem_cleanup_ringbuffer(dev);
  3663. mutex_unlock(&dev->struct_mutex);
  3664. return 0;
  3665. }
  3666. static int
  3667. i915_gem_init_hws(struct drm_device *dev)
  3668. {
  3669. drm_i915_private_t *dev_priv = dev->dev_private;
  3670. struct drm_gem_object *obj;
  3671. struct drm_i915_gem_object *obj_priv;
  3672. int ret;
  3673. /* If we need a physical address for the status page, it's already
  3674. * initialized at driver load time.
  3675. */
  3676. if (!I915_NEED_GFX_HWS(dev))
  3677. return 0;
  3678. obj = drm_gem_object_alloc(dev, 4096);
  3679. if (obj == NULL) {
  3680. DRM_ERROR("Failed to allocate status page\n");
  3681. return -ENOMEM;
  3682. }
  3683. obj_priv = obj->driver_private;
  3684. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  3685. ret = i915_gem_object_pin(obj, 4096);
  3686. if (ret != 0) {
  3687. drm_gem_object_unreference(obj);
  3688. return ret;
  3689. }
  3690. dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  3691. dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
  3692. if (dev_priv->hw_status_page == NULL) {
  3693. DRM_ERROR("Failed to map status page.\n");
  3694. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3695. i915_gem_object_unpin(obj);
  3696. drm_gem_object_unreference(obj);
  3697. return -EINVAL;
  3698. }
  3699. dev_priv->hws_obj = obj;
  3700. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  3701. I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
  3702. I915_READ(HWS_PGA); /* posting read */
  3703. DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  3704. return 0;
  3705. }
  3706. static void
  3707. i915_gem_cleanup_hws(struct drm_device *dev)
  3708. {
  3709. drm_i915_private_t *dev_priv = dev->dev_private;
  3710. struct drm_gem_object *obj;
  3711. struct drm_i915_gem_object *obj_priv;
  3712. if (dev_priv->hws_obj == NULL)
  3713. return;
  3714. obj = dev_priv->hws_obj;
  3715. obj_priv = obj->driver_private;
  3716. kunmap(obj_priv->pages[0]);
  3717. i915_gem_object_unpin(obj);
  3718. drm_gem_object_unreference(obj);
  3719. dev_priv->hws_obj = NULL;
  3720. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  3721. dev_priv->hw_status_page = NULL;
  3722. /* Write high address into HWS_PGA when disabling. */
  3723. I915_WRITE(HWS_PGA, 0x1ffff000);
  3724. }
  3725. int
  3726. i915_gem_init_ringbuffer(struct drm_device *dev)
  3727. {
  3728. drm_i915_private_t *dev_priv = dev->dev_private;
  3729. struct drm_gem_object *obj;
  3730. struct drm_i915_gem_object *obj_priv;
  3731. drm_i915_ring_buffer_t *ring = &dev_priv->ring;
  3732. int ret;
  3733. u32 head;
  3734. ret = i915_gem_init_hws(dev);
  3735. if (ret != 0)
  3736. return ret;
  3737. obj = drm_gem_object_alloc(dev, 128 * 1024);
  3738. if (obj == NULL) {
  3739. DRM_ERROR("Failed to allocate ringbuffer\n");
  3740. i915_gem_cleanup_hws(dev);
  3741. return -ENOMEM;
  3742. }
  3743. obj_priv = obj->driver_private;
  3744. ret = i915_gem_object_pin(obj, 4096);
  3745. if (ret != 0) {
  3746. drm_gem_object_unreference(obj);
  3747. i915_gem_cleanup_hws(dev);
  3748. return ret;
  3749. }
  3750. /* Set up the kernel mapping for the ring. */
  3751. ring->Size = obj->size;
  3752. ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
  3753. ring->map.size = obj->size;
  3754. ring->map.type = 0;
  3755. ring->map.flags = 0;
  3756. ring->map.mtrr = 0;
  3757. drm_core_ioremap_wc(&ring->map, dev);
  3758. if (ring->map.handle == NULL) {
  3759. DRM_ERROR("Failed to map ringbuffer.\n");
  3760. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3761. i915_gem_object_unpin(obj);
  3762. drm_gem_object_unreference(obj);
  3763. i915_gem_cleanup_hws(dev);
  3764. return -EINVAL;
  3765. }
  3766. ring->ring_obj = obj;
  3767. ring->virtual_start = ring->map.handle;
  3768. /* Stop the ring if it's running. */
  3769. I915_WRITE(PRB0_CTL, 0);
  3770. I915_WRITE(PRB0_TAIL, 0);
  3771. I915_WRITE(PRB0_HEAD, 0);
  3772. /* Initialize the ring. */
  3773. I915_WRITE(PRB0_START, obj_priv->gtt_offset);
  3774. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3775. /* G45 ring initialization fails to reset head to zero */
  3776. if (head != 0) {
  3777. DRM_ERROR("Ring head not reset to zero "
  3778. "ctl %08x head %08x tail %08x start %08x\n",
  3779. I915_READ(PRB0_CTL),
  3780. I915_READ(PRB0_HEAD),
  3781. I915_READ(PRB0_TAIL),
  3782. I915_READ(PRB0_START));
  3783. I915_WRITE(PRB0_HEAD, 0);
  3784. DRM_ERROR("Ring head forced to zero "
  3785. "ctl %08x head %08x tail %08x start %08x\n",
  3786. I915_READ(PRB0_CTL),
  3787. I915_READ(PRB0_HEAD),
  3788. I915_READ(PRB0_TAIL),
  3789. I915_READ(PRB0_START));
  3790. }
  3791. I915_WRITE(PRB0_CTL,
  3792. ((obj->size - 4096) & RING_NR_PAGES) |
  3793. RING_NO_REPORT |
  3794. RING_VALID);
  3795. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3796. /* If the head is still not zero, the ring is dead */
  3797. if (head != 0) {
  3798. DRM_ERROR("Ring initialization failed "
  3799. "ctl %08x head %08x tail %08x start %08x\n",
  3800. I915_READ(PRB0_CTL),
  3801. I915_READ(PRB0_HEAD),
  3802. I915_READ(PRB0_TAIL),
  3803. I915_READ(PRB0_START));
  3804. return -EIO;
  3805. }
  3806. /* Update our cache of the ring state */
  3807. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3808. i915_kernel_lost_context(dev);
  3809. else {
  3810. ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  3811. ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
  3812. ring->space = ring->head - (ring->tail + 8);
  3813. if (ring->space < 0)
  3814. ring->space += ring->Size;
  3815. }
  3816. return 0;
  3817. }
  3818. void
  3819. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3820. {
  3821. drm_i915_private_t *dev_priv = dev->dev_private;
  3822. if (dev_priv->ring.ring_obj == NULL)
  3823. return;
  3824. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  3825. i915_gem_object_unpin(dev_priv->ring.ring_obj);
  3826. drm_gem_object_unreference(dev_priv->ring.ring_obj);
  3827. dev_priv->ring.ring_obj = NULL;
  3828. memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
  3829. i915_gem_cleanup_hws(dev);
  3830. }
  3831. int
  3832. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3833. struct drm_file *file_priv)
  3834. {
  3835. drm_i915_private_t *dev_priv = dev->dev_private;
  3836. int ret;
  3837. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3838. return 0;
  3839. if (atomic_read(&dev_priv->mm.wedged)) {
  3840. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3841. atomic_set(&dev_priv->mm.wedged, 0);
  3842. }
  3843. mutex_lock(&dev->struct_mutex);
  3844. dev_priv->mm.suspended = 0;
  3845. ret = i915_gem_init_ringbuffer(dev);
  3846. if (ret != 0) {
  3847. mutex_unlock(&dev->struct_mutex);
  3848. return ret;
  3849. }
  3850. spin_lock(&dev_priv->mm.active_list_lock);
  3851. BUG_ON(!list_empty(&dev_priv->mm.active_list));
  3852. spin_unlock(&dev_priv->mm.active_list_lock);
  3853. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  3854. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  3855. BUG_ON(!list_empty(&dev_priv->mm.request_list));
  3856. mutex_unlock(&dev->struct_mutex);
  3857. drm_irq_install(dev);
  3858. return 0;
  3859. }
  3860. int
  3861. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3862. struct drm_file *file_priv)
  3863. {
  3864. int ret;
  3865. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3866. return 0;
  3867. ret = i915_gem_idle(dev);
  3868. drm_irq_uninstall(dev);
  3869. return ret;
  3870. }
  3871. void
  3872. i915_gem_lastclose(struct drm_device *dev)
  3873. {
  3874. int ret;
  3875. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3876. return;
  3877. ret = i915_gem_idle(dev);
  3878. if (ret)
  3879. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3880. }
  3881. void
  3882. i915_gem_load(struct drm_device *dev)
  3883. {
  3884. int i;
  3885. drm_i915_private_t *dev_priv = dev->dev_private;
  3886. spin_lock_init(&dev_priv->mm.active_list_lock);
  3887. INIT_LIST_HEAD(&dev_priv->mm.active_list);
  3888. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  3889. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3890. INIT_LIST_HEAD(&dev_priv->mm.request_list);
  3891. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3892. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3893. i915_gem_retire_work_handler);
  3894. dev_priv->mm.next_gem_seqno = 1;
  3895. spin_lock(&shrink_list_lock);
  3896. list_add(&dev_priv->mm.shrink_list, &shrink_list);
  3897. spin_unlock(&shrink_list_lock);
  3898. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3899. dev_priv->fence_reg_start = 3;
  3900. if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3901. dev_priv->num_fence_regs = 16;
  3902. else
  3903. dev_priv->num_fence_regs = 8;
  3904. /* Initialize fence registers to zero */
  3905. if (IS_I965G(dev)) {
  3906. for (i = 0; i < 16; i++)
  3907. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  3908. } else {
  3909. for (i = 0; i < 8; i++)
  3910. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  3911. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3912. for (i = 0; i < 8; i++)
  3913. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  3914. }
  3915. i915_gem_detect_bit_6_swizzle(dev);
  3916. }
  3917. /*
  3918. * Create a physically contiguous memory object for this object
  3919. * e.g. for cursor + overlay regs
  3920. */
  3921. int i915_gem_init_phys_object(struct drm_device *dev,
  3922. int id, int size)
  3923. {
  3924. drm_i915_private_t *dev_priv = dev->dev_private;
  3925. struct drm_i915_gem_phys_object *phys_obj;
  3926. int ret;
  3927. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3928. return 0;
  3929. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3930. if (!phys_obj)
  3931. return -ENOMEM;
  3932. phys_obj->id = id;
  3933. phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
  3934. if (!phys_obj->handle) {
  3935. ret = -ENOMEM;
  3936. goto kfree_obj;
  3937. }
  3938. #ifdef CONFIG_X86
  3939. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3940. #endif
  3941. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3942. return 0;
  3943. kfree_obj:
  3944. kfree(phys_obj);
  3945. return ret;
  3946. }
  3947. void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3948. {
  3949. drm_i915_private_t *dev_priv = dev->dev_private;
  3950. struct drm_i915_gem_phys_object *phys_obj;
  3951. if (!dev_priv->mm.phys_objs[id - 1])
  3952. return;
  3953. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3954. if (phys_obj->cur_obj) {
  3955. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3956. }
  3957. #ifdef CONFIG_X86
  3958. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3959. #endif
  3960. drm_pci_free(dev, phys_obj->handle);
  3961. kfree(phys_obj);
  3962. dev_priv->mm.phys_objs[id - 1] = NULL;
  3963. }
  3964. void i915_gem_free_all_phys_object(struct drm_device *dev)
  3965. {
  3966. int i;
  3967. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  3968. i915_gem_free_phys_object(dev, i);
  3969. }
  3970. void i915_gem_detach_phys_object(struct drm_device *dev,
  3971. struct drm_gem_object *obj)
  3972. {
  3973. struct drm_i915_gem_object *obj_priv;
  3974. int i;
  3975. int ret;
  3976. int page_count;
  3977. obj_priv = obj->driver_private;
  3978. if (!obj_priv->phys_obj)
  3979. return;
  3980. ret = i915_gem_object_get_pages(obj);
  3981. if (ret)
  3982. goto out;
  3983. page_count = obj->size / PAGE_SIZE;
  3984. for (i = 0; i < page_count; i++) {
  3985. char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
  3986. char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  3987. memcpy(dst, src, PAGE_SIZE);
  3988. kunmap_atomic(dst, KM_USER0);
  3989. }
  3990. drm_clflush_pages(obj_priv->pages, page_count);
  3991. drm_agp_chipset_flush(dev);
  3992. i915_gem_object_put_pages(obj);
  3993. out:
  3994. obj_priv->phys_obj->cur_obj = NULL;
  3995. obj_priv->phys_obj = NULL;
  3996. }
  3997. int
  3998. i915_gem_attach_phys_object(struct drm_device *dev,
  3999. struct drm_gem_object *obj, int id)
  4000. {
  4001. drm_i915_private_t *dev_priv = dev->dev_private;
  4002. struct drm_i915_gem_object *obj_priv;
  4003. int ret = 0;
  4004. int page_count;
  4005. int i;
  4006. if (id > I915_MAX_PHYS_OBJECT)
  4007. return -EINVAL;
  4008. obj_priv = obj->driver_private;
  4009. if (obj_priv->phys_obj) {
  4010. if (obj_priv->phys_obj->id == id)
  4011. return 0;
  4012. i915_gem_detach_phys_object(dev, obj);
  4013. }
  4014. /* create a new object */
  4015. if (!dev_priv->mm.phys_objs[id - 1]) {
  4016. ret = i915_gem_init_phys_object(dev, id,
  4017. obj->size);
  4018. if (ret) {
  4019. DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
  4020. goto out;
  4021. }
  4022. }
  4023. /* bind to the object */
  4024. obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4025. obj_priv->phys_obj->cur_obj = obj;
  4026. ret = i915_gem_object_get_pages(obj);
  4027. if (ret) {
  4028. DRM_ERROR("failed to get page list\n");
  4029. goto out;
  4030. }
  4031. page_count = obj->size / PAGE_SIZE;
  4032. for (i = 0; i < page_count; i++) {
  4033. char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4034. char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4035. memcpy(dst, src, PAGE_SIZE);
  4036. kunmap_atomic(src, KM_USER0);
  4037. }
  4038. i915_gem_object_put_pages(obj);
  4039. return 0;
  4040. out:
  4041. return ret;
  4042. }
  4043. static int
  4044. i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  4045. struct drm_i915_gem_pwrite *args,
  4046. struct drm_file *file_priv)
  4047. {
  4048. struct drm_i915_gem_object *obj_priv = obj->driver_private;
  4049. void *obj_addr;
  4050. int ret;
  4051. char __user *user_data;
  4052. user_data = (char __user *) (uintptr_t) args->data_ptr;
  4053. obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  4054. DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
  4055. ret = copy_from_user(obj_addr, user_data, args->size);
  4056. if (ret)
  4057. return -EFAULT;
  4058. drm_agp_chipset_flush(dev);
  4059. return 0;
  4060. }
  4061. void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
  4062. {
  4063. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  4064. /* Clean up our request list when the client is going away, so that
  4065. * later retire_requests won't dereference our soon-to-be-gone
  4066. * file_priv.
  4067. */
  4068. mutex_lock(&dev->struct_mutex);
  4069. while (!list_empty(&i915_file_priv->mm.request_list))
  4070. list_del_init(i915_file_priv->mm.request_list.next);
  4071. mutex_unlock(&dev->struct_mutex);
  4072. }
  4073. /* Immediately discard the backing storage */
  4074. static void
  4075. i915_gem_object_truncate(struct drm_gem_object *obj)
  4076. {
  4077. struct inode *inode;
  4078. inode = obj->filp->f_path.dentry->d_inode;
  4079. mutex_lock(&inode->i_mutex);
  4080. truncate_inode_pages(inode->i_mapping, 0);
  4081. mutex_unlock(&inode->i_mutex);
  4082. }
  4083. static int
  4084. i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
  4085. {
  4086. drm_i915_private_t *dev_priv, *next_dev;
  4087. struct drm_i915_gem_object *obj_priv, *next_obj;
  4088. int cnt = 0;
  4089. int would_deadlock = 1;
  4090. /* "fast-path" to count number of available objects */
  4091. if (nr_to_scan == 0) {
  4092. spin_lock(&shrink_list_lock);
  4093. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4094. struct drm_device *dev = dev_priv->dev;
  4095. if (mutex_trylock(&dev->struct_mutex)) {
  4096. list_for_each_entry(obj_priv,
  4097. &dev_priv->mm.inactive_list,
  4098. list)
  4099. cnt++;
  4100. mutex_unlock(&dev->struct_mutex);
  4101. }
  4102. }
  4103. spin_unlock(&shrink_list_lock);
  4104. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4105. }
  4106. spin_lock(&shrink_list_lock);
  4107. /* first scan for clean buffers */
  4108. list_for_each_entry_safe(dev_priv, next_dev,
  4109. &shrink_list, mm.shrink_list) {
  4110. struct drm_device *dev = dev_priv->dev;
  4111. if (! mutex_trylock(&dev->struct_mutex))
  4112. continue;
  4113. spin_unlock(&shrink_list_lock);
  4114. i915_gem_retire_requests(dev);
  4115. list_for_each_entry_safe(obj_priv, next_obj,
  4116. &dev_priv->mm.inactive_list,
  4117. list) {
  4118. if (i915_gem_object_is_purgeable(obj_priv)) {
  4119. struct drm_gem_object *obj = obj_priv->obj;
  4120. i915_gem_object_unbind(obj);
  4121. i915_gem_object_truncate(obj);
  4122. if (--nr_to_scan <= 0)
  4123. break;
  4124. }
  4125. }
  4126. spin_lock(&shrink_list_lock);
  4127. mutex_unlock(&dev->struct_mutex);
  4128. if (nr_to_scan <= 0)
  4129. break;
  4130. }
  4131. /* second pass, evict/count anything still on the inactive list */
  4132. list_for_each_entry_safe(dev_priv, next_dev,
  4133. &shrink_list, mm.shrink_list) {
  4134. struct drm_device *dev = dev_priv->dev;
  4135. if (! mutex_trylock(&dev->struct_mutex))
  4136. continue;
  4137. spin_unlock(&shrink_list_lock);
  4138. list_for_each_entry_safe(obj_priv, next_obj,
  4139. &dev_priv->mm.inactive_list,
  4140. list) {
  4141. if (nr_to_scan > 0) {
  4142. struct drm_gem_object *obj = obj_priv->obj;
  4143. i915_gem_object_unbind(obj);
  4144. if (i915_gem_object_is_purgeable(obj_priv))
  4145. i915_gem_object_truncate(obj);
  4146. nr_to_scan--;
  4147. } else
  4148. cnt++;
  4149. }
  4150. spin_lock(&shrink_list_lock);
  4151. mutex_unlock(&dev->struct_mutex);
  4152. would_deadlock = 0;
  4153. }
  4154. spin_unlock(&shrink_list_lock);
  4155. if (would_deadlock)
  4156. return -1;
  4157. else if (cnt > 0)
  4158. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4159. else
  4160. return 0;
  4161. }
  4162. static struct shrinker shrinker = {
  4163. .shrink = i915_gem_shrink,
  4164. .seeks = DEFAULT_SEEKS,
  4165. };
  4166. __init void
  4167. i915_gem_shrinker_init(void)
  4168. {
  4169. register_shrinker(&shrinker);
  4170. }
  4171. __exit void
  4172. i915_gem_shrinker_exit(void)
  4173. {
  4174. unregister_shrinker(&shrinker);
  4175. }