i915_gem.c 131 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/slab.h>
  34. #include <linux/swap.h>
  35. #include <linux/pci.h>
  36. static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
  37. static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
  38. static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  39. static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  40. static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
  41. int write);
  42. static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  43. uint64_t offset,
  44. uint64_t size);
  45. static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
  46. static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  47. static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
  48. unsigned alignment);
  49. static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
  50. static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  51. struct drm_i915_gem_pwrite *args,
  52. struct drm_file *file_priv);
  53. static void i915_gem_free_object_tail(struct drm_gem_object *obj);
  54. static LIST_HEAD(shrink_list);
  55. static DEFINE_SPINLOCK(shrink_list_lock);
  56. static inline bool
  57. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
  58. {
  59. return obj_priv->gtt_space &&
  60. !obj_priv->active &&
  61. obj_priv->pin_count == 0;
  62. }
  63. int i915_gem_do_init(struct drm_device *dev, unsigned long start,
  64. unsigned long end)
  65. {
  66. drm_i915_private_t *dev_priv = dev->dev_private;
  67. if (start >= end ||
  68. (start & (PAGE_SIZE - 1)) != 0 ||
  69. (end & (PAGE_SIZE - 1)) != 0) {
  70. return -EINVAL;
  71. }
  72. drm_mm_init(&dev_priv->mm.gtt_space, start,
  73. end - start);
  74. dev->gtt_total = (uint32_t) (end - start);
  75. return 0;
  76. }
  77. int
  78. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  79. struct drm_file *file_priv)
  80. {
  81. struct drm_i915_gem_init *args = data;
  82. int ret;
  83. mutex_lock(&dev->struct_mutex);
  84. ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
  85. mutex_unlock(&dev->struct_mutex);
  86. return ret;
  87. }
  88. int
  89. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  90. struct drm_file *file_priv)
  91. {
  92. struct drm_i915_gem_get_aperture *args = data;
  93. if (!(dev->driver->driver_features & DRIVER_GEM))
  94. return -ENODEV;
  95. args->aper_size = dev->gtt_total;
  96. args->aper_available_size = (args->aper_size -
  97. atomic_read(&dev->pin_memory));
  98. return 0;
  99. }
  100. /**
  101. * Creates a new mm object and returns a handle to it.
  102. */
  103. int
  104. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  105. struct drm_file *file_priv)
  106. {
  107. struct drm_i915_gem_create *args = data;
  108. struct drm_gem_object *obj;
  109. int ret;
  110. u32 handle;
  111. args->size = roundup(args->size, PAGE_SIZE);
  112. /* Allocate the new object */
  113. obj = i915_gem_alloc_object(dev, args->size);
  114. if (obj == NULL)
  115. return -ENOMEM;
  116. ret = drm_gem_handle_create(file_priv, obj, &handle);
  117. drm_gem_object_unreference_unlocked(obj);
  118. if (ret)
  119. return ret;
  120. args->handle = handle;
  121. return 0;
  122. }
  123. static inline int
  124. fast_shmem_read(struct page **pages,
  125. loff_t page_base, int page_offset,
  126. char __user *data,
  127. int length)
  128. {
  129. char __iomem *vaddr;
  130. int unwritten;
  131. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  132. if (vaddr == NULL)
  133. return -ENOMEM;
  134. unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
  135. kunmap_atomic(vaddr, KM_USER0);
  136. if (unwritten)
  137. return -EFAULT;
  138. return 0;
  139. }
  140. static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
  141. {
  142. drm_i915_private_t *dev_priv = obj->dev->dev_private;
  143. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  144. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  145. obj_priv->tiling_mode != I915_TILING_NONE;
  146. }
  147. static inline void
  148. slow_shmem_copy(struct page *dst_page,
  149. int dst_offset,
  150. struct page *src_page,
  151. int src_offset,
  152. int length)
  153. {
  154. char *dst_vaddr, *src_vaddr;
  155. dst_vaddr = kmap(dst_page);
  156. src_vaddr = kmap(src_page);
  157. memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
  158. kunmap(src_page);
  159. kunmap(dst_page);
  160. }
  161. static inline void
  162. slow_shmem_bit17_copy(struct page *gpu_page,
  163. int gpu_offset,
  164. struct page *cpu_page,
  165. int cpu_offset,
  166. int length,
  167. int is_read)
  168. {
  169. char *gpu_vaddr, *cpu_vaddr;
  170. /* Use the unswizzled path if this page isn't affected. */
  171. if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
  172. if (is_read)
  173. return slow_shmem_copy(cpu_page, cpu_offset,
  174. gpu_page, gpu_offset, length);
  175. else
  176. return slow_shmem_copy(gpu_page, gpu_offset,
  177. cpu_page, cpu_offset, length);
  178. }
  179. gpu_vaddr = kmap(gpu_page);
  180. cpu_vaddr = kmap(cpu_page);
  181. /* Copy the data, XORing A6 with A17 (1). The user already knows he's
  182. * XORing with the other bits (A9 for Y, A9 and A10 for X)
  183. */
  184. while (length > 0) {
  185. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  186. int this_length = min(cacheline_end - gpu_offset, length);
  187. int swizzled_gpu_offset = gpu_offset ^ 64;
  188. if (is_read) {
  189. memcpy(cpu_vaddr + cpu_offset,
  190. gpu_vaddr + swizzled_gpu_offset,
  191. this_length);
  192. } else {
  193. memcpy(gpu_vaddr + swizzled_gpu_offset,
  194. cpu_vaddr + cpu_offset,
  195. this_length);
  196. }
  197. cpu_offset += this_length;
  198. gpu_offset += this_length;
  199. length -= this_length;
  200. }
  201. kunmap(cpu_page);
  202. kunmap(gpu_page);
  203. }
  204. /**
  205. * This is the fast shmem pread path, which attempts to copy_from_user directly
  206. * from the backing pages of the object to the user's address space. On a
  207. * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  208. */
  209. static int
  210. i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  211. struct drm_i915_gem_pread *args,
  212. struct drm_file *file_priv)
  213. {
  214. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  215. ssize_t remain;
  216. loff_t offset, page_base;
  217. char __user *user_data;
  218. int page_offset, page_length;
  219. int ret;
  220. user_data = (char __user *) (uintptr_t) args->data_ptr;
  221. remain = args->size;
  222. mutex_lock(&dev->struct_mutex);
  223. ret = i915_gem_object_get_pages(obj, 0);
  224. if (ret != 0)
  225. goto fail_unlock;
  226. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  227. args->size);
  228. if (ret != 0)
  229. goto fail_put_pages;
  230. obj_priv = to_intel_bo(obj);
  231. offset = args->offset;
  232. while (remain > 0) {
  233. /* Operation in this page
  234. *
  235. * page_base = page offset within aperture
  236. * page_offset = offset within page
  237. * page_length = bytes to copy for this page
  238. */
  239. page_base = (offset & ~(PAGE_SIZE-1));
  240. page_offset = offset & (PAGE_SIZE-1);
  241. page_length = remain;
  242. if ((page_offset + remain) > PAGE_SIZE)
  243. page_length = PAGE_SIZE - page_offset;
  244. ret = fast_shmem_read(obj_priv->pages,
  245. page_base, page_offset,
  246. user_data, page_length);
  247. if (ret)
  248. goto fail_put_pages;
  249. remain -= page_length;
  250. user_data += page_length;
  251. offset += page_length;
  252. }
  253. fail_put_pages:
  254. i915_gem_object_put_pages(obj);
  255. fail_unlock:
  256. mutex_unlock(&dev->struct_mutex);
  257. return ret;
  258. }
  259. static int
  260. i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  261. {
  262. int ret;
  263. ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
  264. /* If we've insufficient memory to map in the pages, attempt
  265. * to make some space by throwing out some old buffers.
  266. */
  267. if (ret == -ENOMEM) {
  268. struct drm_device *dev = obj->dev;
  269. ret = i915_gem_evict_something(dev, obj->size,
  270. i915_gem_get_gtt_alignment(obj));
  271. if (ret)
  272. return ret;
  273. ret = i915_gem_object_get_pages(obj, 0);
  274. }
  275. return ret;
  276. }
  277. /**
  278. * This is the fallback shmem pread path, which allocates temporary storage
  279. * in kernel space to copy_to_user into outside of the struct_mutex, so we
  280. * can copy out of the object's backing pages while holding the struct mutex
  281. * and not take page faults.
  282. */
  283. static int
  284. i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
  285. struct drm_i915_gem_pread *args,
  286. struct drm_file *file_priv)
  287. {
  288. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  289. struct mm_struct *mm = current->mm;
  290. struct page **user_pages;
  291. ssize_t remain;
  292. loff_t offset, pinned_pages, i;
  293. loff_t first_data_page, last_data_page, num_pages;
  294. int shmem_page_index, shmem_page_offset;
  295. int data_page_index, data_page_offset;
  296. int page_length;
  297. int ret;
  298. uint64_t data_ptr = args->data_ptr;
  299. int do_bit17_swizzling;
  300. remain = args->size;
  301. /* Pin the user pages containing the data. We can't fault while
  302. * holding the struct mutex, yet we want to hold it while
  303. * dereferencing the user data.
  304. */
  305. first_data_page = data_ptr / PAGE_SIZE;
  306. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  307. num_pages = last_data_page - first_data_page + 1;
  308. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  309. if (user_pages == NULL)
  310. return -ENOMEM;
  311. down_read(&mm->mmap_sem);
  312. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  313. num_pages, 1, 0, user_pages, NULL);
  314. up_read(&mm->mmap_sem);
  315. if (pinned_pages < num_pages) {
  316. ret = -EFAULT;
  317. goto fail_put_user_pages;
  318. }
  319. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  320. mutex_lock(&dev->struct_mutex);
  321. ret = i915_gem_object_get_pages_or_evict(obj);
  322. if (ret)
  323. goto fail_unlock;
  324. ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
  325. args->size);
  326. if (ret != 0)
  327. goto fail_put_pages;
  328. obj_priv = to_intel_bo(obj);
  329. offset = args->offset;
  330. while (remain > 0) {
  331. /* Operation in this page
  332. *
  333. * shmem_page_index = page number within shmem file
  334. * shmem_page_offset = offset within page in shmem file
  335. * data_page_index = page number in get_user_pages return
  336. * data_page_offset = offset with data_page_index page.
  337. * page_length = bytes to copy for this page
  338. */
  339. shmem_page_index = offset / PAGE_SIZE;
  340. shmem_page_offset = offset & ~PAGE_MASK;
  341. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  342. data_page_offset = data_ptr & ~PAGE_MASK;
  343. page_length = remain;
  344. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  345. page_length = PAGE_SIZE - shmem_page_offset;
  346. if ((data_page_offset + page_length) > PAGE_SIZE)
  347. page_length = PAGE_SIZE - data_page_offset;
  348. if (do_bit17_swizzling) {
  349. slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  350. shmem_page_offset,
  351. user_pages[data_page_index],
  352. data_page_offset,
  353. page_length,
  354. 1);
  355. } else {
  356. slow_shmem_copy(user_pages[data_page_index],
  357. data_page_offset,
  358. obj_priv->pages[shmem_page_index],
  359. shmem_page_offset,
  360. page_length);
  361. }
  362. remain -= page_length;
  363. data_ptr += page_length;
  364. offset += page_length;
  365. }
  366. fail_put_pages:
  367. i915_gem_object_put_pages(obj);
  368. fail_unlock:
  369. mutex_unlock(&dev->struct_mutex);
  370. fail_put_user_pages:
  371. for (i = 0; i < pinned_pages; i++) {
  372. SetPageDirty(user_pages[i]);
  373. page_cache_release(user_pages[i]);
  374. }
  375. drm_free_large(user_pages);
  376. return ret;
  377. }
  378. /**
  379. * Reads data from the object referenced by handle.
  380. *
  381. * On error, the contents of *data are undefined.
  382. */
  383. int
  384. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  385. struct drm_file *file_priv)
  386. {
  387. struct drm_i915_gem_pread *args = data;
  388. struct drm_gem_object *obj;
  389. struct drm_i915_gem_object *obj_priv;
  390. int ret;
  391. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  392. if (obj == NULL)
  393. return -ENOENT;
  394. obj_priv = to_intel_bo(obj);
  395. /* Bounds check source.
  396. *
  397. * XXX: This could use review for overflow issues...
  398. */
  399. if (args->offset > obj->size || args->size > obj->size ||
  400. args->offset + args->size > obj->size) {
  401. drm_gem_object_unreference_unlocked(obj);
  402. return -EINVAL;
  403. }
  404. if (i915_gem_object_needs_bit17_swizzle(obj)) {
  405. ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  406. } else {
  407. ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
  408. if (ret != 0)
  409. ret = i915_gem_shmem_pread_slow(dev, obj, args,
  410. file_priv);
  411. }
  412. drm_gem_object_unreference_unlocked(obj);
  413. return ret;
  414. }
  415. /* This is the fast write path which cannot handle
  416. * page faults in the source data
  417. */
  418. static inline int
  419. fast_user_write(struct io_mapping *mapping,
  420. loff_t page_base, int page_offset,
  421. char __user *user_data,
  422. int length)
  423. {
  424. char *vaddr_atomic;
  425. unsigned long unwritten;
  426. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
  427. unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
  428. user_data, length);
  429. io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
  430. if (unwritten)
  431. return -EFAULT;
  432. return 0;
  433. }
  434. /* Here's the write path which can sleep for
  435. * page faults
  436. */
  437. static inline void
  438. slow_kernel_write(struct io_mapping *mapping,
  439. loff_t gtt_base, int gtt_offset,
  440. struct page *user_page, int user_offset,
  441. int length)
  442. {
  443. char __iomem *dst_vaddr;
  444. char *src_vaddr;
  445. dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
  446. src_vaddr = kmap(user_page);
  447. memcpy_toio(dst_vaddr + gtt_offset,
  448. src_vaddr + user_offset,
  449. length);
  450. kunmap(user_page);
  451. io_mapping_unmap(dst_vaddr);
  452. }
  453. static inline int
  454. fast_shmem_write(struct page **pages,
  455. loff_t page_base, int page_offset,
  456. char __user *data,
  457. int length)
  458. {
  459. char __iomem *vaddr;
  460. unsigned long unwritten;
  461. vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
  462. if (vaddr == NULL)
  463. return -ENOMEM;
  464. unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
  465. kunmap_atomic(vaddr, KM_USER0);
  466. if (unwritten)
  467. return -EFAULT;
  468. return 0;
  469. }
  470. /**
  471. * This is the fast pwrite path, where we copy the data directly from the
  472. * user into the GTT, uncached.
  473. */
  474. static int
  475. i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  476. struct drm_i915_gem_pwrite *args,
  477. struct drm_file *file_priv)
  478. {
  479. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  480. drm_i915_private_t *dev_priv = dev->dev_private;
  481. ssize_t remain;
  482. loff_t offset, page_base;
  483. char __user *user_data;
  484. int page_offset, page_length;
  485. int ret;
  486. user_data = (char __user *) (uintptr_t) args->data_ptr;
  487. remain = args->size;
  488. if (!access_ok(VERIFY_READ, user_data, remain))
  489. return -EFAULT;
  490. mutex_lock(&dev->struct_mutex);
  491. ret = i915_gem_object_pin(obj, 0);
  492. if (ret) {
  493. mutex_unlock(&dev->struct_mutex);
  494. return ret;
  495. }
  496. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  497. if (ret)
  498. goto fail;
  499. obj_priv = to_intel_bo(obj);
  500. offset = obj_priv->gtt_offset + args->offset;
  501. while (remain > 0) {
  502. /* Operation in this page
  503. *
  504. * page_base = page offset within aperture
  505. * page_offset = offset within page
  506. * page_length = bytes to copy for this page
  507. */
  508. page_base = (offset & ~(PAGE_SIZE-1));
  509. page_offset = offset & (PAGE_SIZE-1);
  510. page_length = remain;
  511. if ((page_offset + remain) > PAGE_SIZE)
  512. page_length = PAGE_SIZE - page_offset;
  513. ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
  514. page_offset, user_data, page_length);
  515. /* If we get a fault while copying data, then (presumably) our
  516. * source page isn't available. Return the error and we'll
  517. * retry in the slow path.
  518. */
  519. if (ret)
  520. goto fail;
  521. remain -= page_length;
  522. user_data += page_length;
  523. offset += page_length;
  524. }
  525. fail:
  526. i915_gem_object_unpin(obj);
  527. mutex_unlock(&dev->struct_mutex);
  528. return ret;
  529. }
  530. /**
  531. * This is the fallback GTT pwrite path, which uses get_user_pages to pin
  532. * the memory and maps it using kmap_atomic for copying.
  533. *
  534. * This code resulted in x11perf -rgb10text consuming about 10% more CPU
  535. * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  536. */
  537. static int
  538. i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  539. struct drm_i915_gem_pwrite *args,
  540. struct drm_file *file_priv)
  541. {
  542. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  543. drm_i915_private_t *dev_priv = dev->dev_private;
  544. ssize_t remain;
  545. loff_t gtt_page_base, offset;
  546. loff_t first_data_page, last_data_page, num_pages;
  547. loff_t pinned_pages, i;
  548. struct page **user_pages;
  549. struct mm_struct *mm = current->mm;
  550. int gtt_page_offset, data_page_offset, data_page_index, page_length;
  551. int ret;
  552. uint64_t data_ptr = args->data_ptr;
  553. remain = args->size;
  554. /* Pin the user pages containing the data. We can't fault while
  555. * holding the struct mutex, and all of the pwrite implementations
  556. * want to hold it while dereferencing the user data.
  557. */
  558. first_data_page = data_ptr / PAGE_SIZE;
  559. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  560. num_pages = last_data_page - first_data_page + 1;
  561. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  562. if (user_pages == NULL)
  563. return -ENOMEM;
  564. down_read(&mm->mmap_sem);
  565. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  566. num_pages, 0, 0, user_pages, NULL);
  567. up_read(&mm->mmap_sem);
  568. if (pinned_pages < num_pages) {
  569. ret = -EFAULT;
  570. goto out_unpin_pages;
  571. }
  572. mutex_lock(&dev->struct_mutex);
  573. ret = i915_gem_object_pin(obj, 0);
  574. if (ret)
  575. goto out_unlock;
  576. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  577. if (ret)
  578. goto out_unpin_object;
  579. obj_priv = to_intel_bo(obj);
  580. offset = obj_priv->gtt_offset + args->offset;
  581. while (remain > 0) {
  582. /* Operation in this page
  583. *
  584. * gtt_page_base = page offset within aperture
  585. * gtt_page_offset = offset within page in aperture
  586. * data_page_index = page number in get_user_pages return
  587. * data_page_offset = offset with data_page_index page.
  588. * page_length = bytes to copy for this page
  589. */
  590. gtt_page_base = offset & PAGE_MASK;
  591. gtt_page_offset = offset & ~PAGE_MASK;
  592. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  593. data_page_offset = data_ptr & ~PAGE_MASK;
  594. page_length = remain;
  595. if ((gtt_page_offset + page_length) > PAGE_SIZE)
  596. page_length = PAGE_SIZE - gtt_page_offset;
  597. if ((data_page_offset + page_length) > PAGE_SIZE)
  598. page_length = PAGE_SIZE - data_page_offset;
  599. slow_kernel_write(dev_priv->mm.gtt_mapping,
  600. gtt_page_base, gtt_page_offset,
  601. user_pages[data_page_index],
  602. data_page_offset,
  603. page_length);
  604. remain -= page_length;
  605. offset += page_length;
  606. data_ptr += page_length;
  607. }
  608. out_unpin_object:
  609. i915_gem_object_unpin(obj);
  610. out_unlock:
  611. mutex_unlock(&dev->struct_mutex);
  612. out_unpin_pages:
  613. for (i = 0; i < pinned_pages; i++)
  614. page_cache_release(user_pages[i]);
  615. drm_free_large(user_pages);
  616. return ret;
  617. }
  618. /**
  619. * This is the fast shmem pwrite path, which attempts to directly
  620. * copy_from_user into the kmapped pages backing the object.
  621. */
  622. static int
  623. i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  624. struct drm_i915_gem_pwrite *args,
  625. struct drm_file *file_priv)
  626. {
  627. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  628. ssize_t remain;
  629. loff_t offset, page_base;
  630. char __user *user_data;
  631. int page_offset, page_length;
  632. int ret;
  633. user_data = (char __user *) (uintptr_t) args->data_ptr;
  634. remain = args->size;
  635. mutex_lock(&dev->struct_mutex);
  636. ret = i915_gem_object_get_pages(obj, 0);
  637. if (ret != 0)
  638. goto fail_unlock;
  639. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  640. if (ret != 0)
  641. goto fail_put_pages;
  642. obj_priv = to_intel_bo(obj);
  643. offset = args->offset;
  644. obj_priv->dirty = 1;
  645. while (remain > 0) {
  646. /* Operation in this page
  647. *
  648. * page_base = page offset within aperture
  649. * page_offset = offset within page
  650. * page_length = bytes to copy for this page
  651. */
  652. page_base = (offset & ~(PAGE_SIZE-1));
  653. page_offset = offset & (PAGE_SIZE-1);
  654. page_length = remain;
  655. if ((page_offset + remain) > PAGE_SIZE)
  656. page_length = PAGE_SIZE - page_offset;
  657. ret = fast_shmem_write(obj_priv->pages,
  658. page_base, page_offset,
  659. user_data, page_length);
  660. if (ret)
  661. goto fail_put_pages;
  662. remain -= page_length;
  663. user_data += page_length;
  664. offset += page_length;
  665. }
  666. fail_put_pages:
  667. i915_gem_object_put_pages(obj);
  668. fail_unlock:
  669. mutex_unlock(&dev->struct_mutex);
  670. return ret;
  671. }
  672. /**
  673. * This is the fallback shmem pwrite path, which uses get_user_pages to pin
  674. * the memory and maps it using kmap_atomic for copying.
  675. *
  676. * This avoids taking mmap_sem for faulting on the user's address while the
  677. * struct_mutex is held.
  678. */
  679. static int
  680. i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
  681. struct drm_i915_gem_pwrite *args,
  682. struct drm_file *file_priv)
  683. {
  684. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  685. struct mm_struct *mm = current->mm;
  686. struct page **user_pages;
  687. ssize_t remain;
  688. loff_t offset, pinned_pages, i;
  689. loff_t first_data_page, last_data_page, num_pages;
  690. int shmem_page_index, shmem_page_offset;
  691. int data_page_index, data_page_offset;
  692. int page_length;
  693. int ret;
  694. uint64_t data_ptr = args->data_ptr;
  695. int do_bit17_swizzling;
  696. remain = args->size;
  697. /* Pin the user pages containing the data. We can't fault while
  698. * holding the struct mutex, and all of the pwrite implementations
  699. * want to hold it while dereferencing the user data.
  700. */
  701. first_data_page = data_ptr / PAGE_SIZE;
  702. last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
  703. num_pages = last_data_page - first_data_page + 1;
  704. user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
  705. if (user_pages == NULL)
  706. return -ENOMEM;
  707. down_read(&mm->mmap_sem);
  708. pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
  709. num_pages, 0, 0, user_pages, NULL);
  710. up_read(&mm->mmap_sem);
  711. if (pinned_pages < num_pages) {
  712. ret = -EFAULT;
  713. goto fail_put_user_pages;
  714. }
  715. do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  716. mutex_lock(&dev->struct_mutex);
  717. ret = i915_gem_object_get_pages_or_evict(obj);
  718. if (ret)
  719. goto fail_unlock;
  720. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  721. if (ret != 0)
  722. goto fail_put_pages;
  723. obj_priv = to_intel_bo(obj);
  724. offset = args->offset;
  725. obj_priv->dirty = 1;
  726. while (remain > 0) {
  727. /* Operation in this page
  728. *
  729. * shmem_page_index = page number within shmem file
  730. * shmem_page_offset = offset within page in shmem file
  731. * data_page_index = page number in get_user_pages return
  732. * data_page_offset = offset with data_page_index page.
  733. * page_length = bytes to copy for this page
  734. */
  735. shmem_page_index = offset / PAGE_SIZE;
  736. shmem_page_offset = offset & ~PAGE_MASK;
  737. data_page_index = data_ptr / PAGE_SIZE - first_data_page;
  738. data_page_offset = data_ptr & ~PAGE_MASK;
  739. page_length = remain;
  740. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  741. page_length = PAGE_SIZE - shmem_page_offset;
  742. if ((data_page_offset + page_length) > PAGE_SIZE)
  743. page_length = PAGE_SIZE - data_page_offset;
  744. if (do_bit17_swizzling) {
  745. slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
  746. shmem_page_offset,
  747. user_pages[data_page_index],
  748. data_page_offset,
  749. page_length,
  750. 0);
  751. } else {
  752. slow_shmem_copy(obj_priv->pages[shmem_page_index],
  753. shmem_page_offset,
  754. user_pages[data_page_index],
  755. data_page_offset,
  756. page_length);
  757. }
  758. remain -= page_length;
  759. data_ptr += page_length;
  760. offset += page_length;
  761. }
  762. fail_put_pages:
  763. i915_gem_object_put_pages(obj);
  764. fail_unlock:
  765. mutex_unlock(&dev->struct_mutex);
  766. fail_put_user_pages:
  767. for (i = 0; i < pinned_pages; i++)
  768. page_cache_release(user_pages[i]);
  769. drm_free_large(user_pages);
  770. return ret;
  771. }
  772. /**
  773. * Writes data to the object referenced by handle.
  774. *
  775. * On error, the contents of the buffer that were to be modified are undefined.
  776. */
  777. int
  778. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  779. struct drm_file *file_priv)
  780. {
  781. struct drm_i915_gem_pwrite *args = data;
  782. struct drm_gem_object *obj;
  783. struct drm_i915_gem_object *obj_priv;
  784. int ret = 0;
  785. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  786. if (obj == NULL)
  787. return -ENOENT;
  788. obj_priv = to_intel_bo(obj);
  789. /* Bounds check destination.
  790. *
  791. * XXX: This could use review for overflow issues...
  792. */
  793. if (args->offset > obj->size || args->size > obj->size ||
  794. args->offset + args->size > obj->size) {
  795. drm_gem_object_unreference_unlocked(obj);
  796. return -EINVAL;
  797. }
  798. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  799. * it would end up going through the fenced access, and we'll get
  800. * different detiling behavior between reading and writing.
  801. * pread/pwrite currently are reading and writing from the CPU
  802. * perspective, requiring manual detiling by the client.
  803. */
  804. if (obj_priv->phys_obj)
  805. ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
  806. else if (obj_priv->tiling_mode == I915_TILING_NONE &&
  807. dev->gtt_total != 0 &&
  808. obj->write_domain != I915_GEM_DOMAIN_CPU) {
  809. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
  810. if (ret == -EFAULT) {
  811. ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
  812. file_priv);
  813. }
  814. } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
  815. ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
  816. } else {
  817. ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
  818. if (ret == -EFAULT) {
  819. ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
  820. file_priv);
  821. }
  822. }
  823. #if WATCH_PWRITE
  824. if (ret)
  825. DRM_INFO("pwrite failed %d\n", ret);
  826. #endif
  827. drm_gem_object_unreference_unlocked(obj);
  828. return ret;
  829. }
  830. /**
  831. * Called when user space prepares to use an object with the CPU, either
  832. * through the mmap ioctl's mapping or a GTT mapping.
  833. */
  834. int
  835. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  836. struct drm_file *file_priv)
  837. {
  838. struct drm_i915_private *dev_priv = dev->dev_private;
  839. struct drm_i915_gem_set_domain *args = data;
  840. struct drm_gem_object *obj;
  841. struct drm_i915_gem_object *obj_priv;
  842. uint32_t read_domains = args->read_domains;
  843. uint32_t write_domain = args->write_domain;
  844. int ret;
  845. if (!(dev->driver->driver_features & DRIVER_GEM))
  846. return -ENODEV;
  847. /* Only handle setting domains to types used by the CPU. */
  848. if (write_domain & I915_GEM_GPU_DOMAINS)
  849. return -EINVAL;
  850. if (read_domains & I915_GEM_GPU_DOMAINS)
  851. return -EINVAL;
  852. /* Having something in the write domain implies it's in the read
  853. * domain, and only that read domain. Enforce that in the request.
  854. */
  855. if (write_domain != 0 && read_domains != write_domain)
  856. return -EINVAL;
  857. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  858. if (obj == NULL)
  859. return -ENOENT;
  860. obj_priv = to_intel_bo(obj);
  861. mutex_lock(&dev->struct_mutex);
  862. intel_mark_busy(dev, obj);
  863. #if WATCH_BUF
  864. DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
  865. obj, obj->size, read_domains, write_domain);
  866. #endif
  867. if (read_domains & I915_GEM_DOMAIN_GTT) {
  868. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  869. /* Update the LRU on the fence for the CPU access that's
  870. * about to occur.
  871. */
  872. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  873. struct drm_i915_fence_reg *reg =
  874. &dev_priv->fence_regs[obj_priv->fence_reg];
  875. list_move_tail(&reg->lru_list,
  876. &dev_priv->mm.fence_list);
  877. }
  878. /* Silently promote "you're not bound, there was nothing to do"
  879. * to success, since the client was just asking us to
  880. * make sure everything was done.
  881. */
  882. if (ret == -EINVAL)
  883. ret = 0;
  884. } else {
  885. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  886. }
  887. /* Maintain LRU order of "inactive" objects */
  888. if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
  889. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  890. drm_gem_object_unreference(obj);
  891. mutex_unlock(&dev->struct_mutex);
  892. return ret;
  893. }
  894. /**
  895. * Called when user space has done writes to this buffer
  896. */
  897. int
  898. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  899. struct drm_file *file_priv)
  900. {
  901. struct drm_i915_gem_sw_finish *args = data;
  902. struct drm_gem_object *obj;
  903. struct drm_i915_gem_object *obj_priv;
  904. int ret = 0;
  905. if (!(dev->driver->driver_features & DRIVER_GEM))
  906. return -ENODEV;
  907. mutex_lock(&dev->struct_mutex);
  908. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  909. if (obj == NULL) {
  910. mutex_unlock(&dev->struct_mutex);
  911. return -ENOENT;
  912. }
  913. #if WATCH_BUF
  914. DRM_INFO("%s: sw_finish %d (%p %zd)\n",
  915. __func__, args->handle, obj, obj->size);
  916. #endif
  917. obj_priv = to_intel_bo(obj);
  918. /* Pinned buffers may be scanout, so flush the cache */
  919. if (obj_priv->pin_count)
  920. i915_gem_object_flush_cpu_write_domain(obj);
  921. drm_gem_object_unreference(obj);
  922. mutex_unlock(&dev->struct_mutex);
  923. return ret;
  924. }
  925. /**
  926. * Maps the contents of an object, returning the address it is mapped
  927. * into.
  928. *
  929. * While the mapping holds a reference on the contents of the object, it doesn't
  930. * imply a ref on the object itself.
  931. */
  932. int
  933. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  934. struct drm_file *file_priv)
  935. {
  936. struct drm_i915_gem_mmap *args = data;
  937. struct drm_gem_object *obj;
  938. loff_t offset;
  939. unsigned long addr;
  940. if (!(dev->driver->driver_features & DRIVER_GEM))
  941. return -ENODEV;
  942. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  943. if (obj == NULL)
  944. return -ENOENT;
  945. offset = args->offset;
  946. down_write(&current->mm->mmap_sem);
  947. addr = do_mmap(obj->filp, 0, args->size,
  948. PROT_READ | PROT_WRITE, MAP_SHARED,
  949. args->offset);
  950. up_write(&current->mm->mmap_sem);
  951. drm_gem_object_unreference_unlocked(obj);
  952. if (IS_ERR((void *)addr))
  953. return addr;
  954. args->addr_ptr = (uint64_t) addr;
  955. return 0;
  956. }
  957. /**
  958. * i915_gem_fault - fault a page into the GTT
  959. * vma: VMA in question
  960. * vmf: fault info
  961. *
  962. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  963. * from userspace. The fault handler takes care of binding the object to
  964. * the GTT (if needed), allocating and programming a fence register (again,
  965. * only if needed based on whether the old reg is still valid or the object
  966. * is tiled) and inserting a new PTE into the faulting process.
  967. *
  968. * Note that the faulting process may involve evicting existing objects
  969. * from the GTT and/or fence registers to make room. So performance may
  970. * suffer if the GTT working set is large or there are few fence registers
  971. * left.
  972. */
  973. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  974. {
  975. struct drm_gem_object *obj = vma->vm_private_data;
  976. struct drm_device *dev = obj->dev;
  977. drm_i915_private_t *dev_priv = dev->dev_private;
  978. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  979. pgoff_t page_offset;
  980. unsigned long pfn;
  981. int ret = 0;
  982. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  983. /* We don't use vmf->pgoff since that has the fake offset */
  984. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  985. PAGE_SHIFT;
  986. /* Now bind it into the GTT if needed */
  987. mutex_lock(&dev->struct_mutex);
  988. if (!obj_priv->gtt_space) {
  989. ret = i915_gem_object_bind_to_gtt(obj, 0);
  990. if (ret)
  991. goto unlock;
  992. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  993. if (ret)
  994. goto unlock;
  995. }
  996. /* Need a new fence register? */
  997. if (obj_priv->tiling_mode != I915_TILING_NONE) {
  998. ret = i915_gem_object_get_fence_reg(obj);
  999. if (ret)
  1000. goto unlock;
  1001. }
  1002. if (i915_gem_object_is_inactive(obj_priv))
  1003. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1004. pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
  1005. page_offset;
  1006. /* Finally, remap it using the new GTT offset */
  1007. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  1008. unlock:
  1009. mutex_unlock(&dev->struct_mutex);
  1010. switch (ret) {
  1011. case 0:
  1012. case -ERESTARTSYS:
  1013. return VM_FAULT_NOPAGE;
  1014. case -ENOMEM:
  1015. case -EAGAIN:
  1016. return VM_FAULT_OOM;
  1017. default:
  1018. return VM_FAULT_SIGBUS;
  1019. }
  1020. }
  1021. /**
  1022. * i915_gem_create_mmap_offset - create a fake mmap offset for an object
  1023. * @obj: obj in question
  1024. *
  1025. * GEM memory mapping works by handing back to userspace a fake mmap offset
  1026. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  1027. * up the object based on the offset and sets up the various memory mapping
  1028. * structures.
  1029. *
  1030. * This routine allocates and attaches a fake offset for @obj.
  1031. */
  1032. static int
  1033. i915_gem_create_mmap_offset(struct drm_gem_object *obj)
  1034. {
  1035. struct drm_device *dev = obj->dev;
  1036. struct drm_gem_mm *mm = dev->mm_private;
  1037. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1038. struct drm_map_list *list;
  1039. struct drm_local_map *map;
  1040. int ret = 0;
  1041. /* Set the object up for mmap'ing */
  1042. list = &obj->map_list;
  1043. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  1044. if (!list->map)
  1045. return -ENOMEM;
  1046. map = list->map;
  1047. map->type = _DRM_GEM;
  1048. map->size = obj->size;
  1049. map->handle = obj;
  1050. /* Get a DRM GEM mmap offset allocated... */
  1051. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  1052. obj->size / PAGE_SIZE, 0, 0);
  1053. if (!list->file_offset_node) {
  1054. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  1055. ret = -ENOMEM;
  1056. goto out_free_list;
  1057. }
  1058. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  1059. obj->size / PAGE_SIZE, 0);
  1060. if (!list->file_offset_node) {
  1061. ret = -ENOMEM;
  1062. goto out_free_list;
  1063. }
  1064. list->hash.key = list->file_offset_node->start;
  1065. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  1066. DRM_ERROR("failed to add to map hash\n");
  1067. ret = -ENOMEM;
  1068. goto out_free_mm;
  1069. }
  1070. /* By now we should be all set, any drm_mmap request on the offset
  1071. * below will get to our mmap & fault handler */
  1072. obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  1073. return 0;
  1074. out_free_mm:
  1075. drm_mm_put_block(list->file_offset_node);
  1076. out_free_list:
  1077. kfree(list->map);
  1078. return ret;
  1079. }
  1080. /**
  1081. * i915_gem_release_mmap - remove physical page mappings
  1082. * @obj: obj in question
  1083. *
  1084. * Preserve the reservation of the mmapping with the DRM core code, but
  1085. * relinquish ownership of the pages back to the system.
  1086. *
  1087. * It is vital that we remove the page mapping if we have mapped a tiled
  1088. * object through the GTT and then lose the fence register due to
  1089. * resource pressure. Similarly if the object has been moved out of the
  1090. * aperture, than pages mapped into userspace must be revoked. Removing the
  1091. * mapping will then trigger a page fault on the next user access, allowing
  1092. * fixup by i915_gem_fault().
  1093. */
  1094. void
  1095. i915_gem_release_mmap(struct drm_gem_object *obj)
  1096. {
  1097. struct drm_device *dev = obj->dev;
  1098. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1099. if (dev->dev_mapping)
  1100. unmap_mapping_range(dev->dev_mapping,
  1101. obj_priv->mmap_offset, obj->size, 1);
  1102. }
  1103. static void
  1104. i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  1105. {
  1106. struct drm_device *dev = obj->dev;
  1107. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1108. struct drm_gem_mm *mm = dev->mm_private;
  1109. struct drm_map_list *list;
  1110. list = &obj->map_list;
  1111. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  1112. if (list->file_offset_node) {
  1113. drm_mm_put_block(list->file_offset_node);
  1114. list->file_offset_node = NULL;
  1115. }
  1116. if (list->map) {
  1117. kfree(list->map);
  1118. list->map = NULL;
  1119. }
  1120. obj_priv->mmap_offset = 0;
  1121. }
  1122. /**
  1123. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1124. * @obj: object to check
  1125. *
  1126. * Return the required GTT alignment for an object, taking into account
  1127. * potential fence register mapping if needed.
  1128. */
  1129. static uint32_t
  1130. i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
  1131. {
  1132. struct drm_device *dev = obj->dev;
  1133. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1134. int start, i;
  1135. /*
  1136. * Minimum alignment is 4k (GTT page size), but might be greater
  1137. * if a fence register is needed for the object.
  1138. */
  1139. if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
  1140. return 4096;
  1141. /*
  1142. * Previous chips need to be aligned to the size of the smallest
  1143. * fence register that can contain the object.
  1144. */
  1145. if (IS_I9XX(dev))
  1146. start = 1024*1024;
  1147. else
  1148. start = 512*1024;
  1149. for (i = start; i < obj->size; i <<= 1)
  1150. ;
  1151. return i;
  1152. }
  1153. /**
  1154. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1155. * @dev: DRM device
  1156. * @data: GTT mapping ioctl data
  1157. * @file_priv: GEM object info
  1158. *
  1159. * Simply returns the fake offset to userspace so it can mmap it.
  1160. * The mmap call will end up in drm_gem_mmap(), which will set things
  1161. * up so we can get faults in the handler above.
  1162. *
  1163. * The fault handler will take care of binding the object into the GTT
  1164. * (since it may have been evicted to make room for something), allocating
  1165. * a fence register, and mapping the appropriate aperture address into
  1166. * userspace.
  1167. */
  1168. int
  1169. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1170. struct drm_file *file_priv)
  1171. {
  1172. struct drm_i915_gem_mmap_gtt *args = data;
  1173. struct drm_gem_object *obj;
  1174. struct drm_i915_gem_object *obj_priv;
  1175. int ret;
  1176. if (!(dev->driver->driver_features & DRIVER_GEM))
  1177. return -ENODEV;
  1178. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1179. if (obj == NULL)
  1180. return -ENOENT;
  1181. mutex_lock(&dev->struct_mutex);
  1182. obj_priv = to_intel_bo(obj);
  1183. if (obj_priv->madv != I915_MADV_WILLNEED) {
  1184. DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1185. drm_gem_object_unreference(obj);
  1186. mutex_unlock(&dev->struct_mutex);
  1187. return -EINVAL;
  1188. }
  1189. if (!obj_priv->mmap_offset) {
  1190. ret = i915_gem_create_mmap_offset(obj);
  1191. if (ret) {
  1192. drm_gem_object_unreference(obj);
  1193. mutex_unlock(&dev->struct_mutex);
  1194. return ret;
  1195. }
  1196. }
  1197. args->offset = obj_priv->mmap_offset;
  1198. /*
  1199. * Pull it into the GTT so that we have a page list (makes the
  1200. * initial fault faster and any subsequent flushing possible).
  1201. */
  1202. if (!obj_priv->agp_mem) {
  1203. ret = i915_gem_object_bind_to_gtt(obj, 0);
  1204. if (ret) {
  1205. drm_gem_object_unreference(obj);
  1206. mutex_unlock(&dev->struct_mutex);
  1207. return ret;
  1208. }
  1209. }
  1210. drm_gem_object_unreference(obj);
  1211. mutex_unlock(&dev->struct_mutex);
  1212. return 0;
  1213. }
  1214. void
  1215. i915_gem_object_put_pages(struct drm_gem_object *obj)
  1216. {
  1217. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1218. int page_count = obj->size / PAGE_SIZE;
  1219. int i;
  1220. BUG_ON(obj_priv->pages_refcount == 0);
  1221. BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
  1222. if (--obj_priv->pages_refcount != 0)
  1223. return;
  1224. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1225. i915_gem_object_save_bit_17_swizzle(obj);
  1226. if (obj_priv->madv == I915_MADV_DONTNEED)
  1227. obj_priv->dirty = 0;
  1228. for (i = 0; i < page_count; i++) {
  1229. if (obj_priv->dirty)
  1230. set_page_dirty(obj_priv->pages[i]);
  1231. if (obj_priv->madv == I915_MADV_WILLNEED)
  1232. mark_page_accessed(obj_priv->pages[i]);
  1233. page_cache_release(obj_priv->pages[i]);
  1234. }
  1235. obj_priv->dirty = 0;
  1236. drm_free_large(obj_priv->pages);
  1237. obj_priv->pages = NULL;
  1238. }
  1239. static void
  1240. i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
  1241. struct intel_ring_buffer *ring)
  1242. {
  1243. struct drm_device *dev = obj->dev;
  1244. drm_i915_private_t *dev_priv = dev->dev_private;
  1245. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1246. BUG_ON(ring == NULL);
  1247. obj_priv->ring = ring;
  1248. /* Add a reference if we're newly entering the active list. */
  1249. if (!obj_priv->active) {
  1250. drm_gem_object_reference(obj);
  1251. obj_priv->active = 1;
  1252. }
  1253. /* Move from whatever list we were on to the tail of execution. */
  1254. spin_lock(&dev_priv->mm.active_list_lock);
  1255. list_move_tail(&obj_priv->list, &ring->active_list);
  1256. spin_unlock(&dev_priv->mm.active_list_lock);
  1257. obj_priv->last_rendering_seqno = seqno;
  1258. }
  1259. static void
  1260. i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
  1261. {
  1262. struct drm_device *dev = obj->dev;
  1263. drm_i915_private_t *dev_priv = dev->dev_private;
  1264. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1265. BUG_ON(!obj_priv->active);
  1266. list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
  1267. obj_priv->last_rendering_seqno = 0;
  1268. }
  1269. /* Immediately discard the backing storage */
  1270. static void
  1271. i915_gem_object_truncate(struct drm_gem_object *obj)
  1272. {
  1273. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1274. struct inode *inode;
  1275. /* Our goal here is to return as much of the memory as
  1276. * is possible back to the system as we are called from OOM.
  1277. * To do this we must instruct the shmfs to drop all of its
  1278. * backing pages, *now*. Here we mirror the actions taken
  1279. * when by shmem_delete_inode() to release the backing store.
  1280. */
  1281. inode = obj->filp->f_path.dentry->d_inode;
  1282. truncate_inode_pages(inode->i_mapping, 0);
  1283. if (inode->i_op->truncate_range)
  1284. inode->i_op->truncate_range(inode, 0, (loff_t)-1);
  1285. obj_priv->madv = __I915_MADV_PURGED;
  1286. }
  1287. static inline int
  1288. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
  1289. {
  1290. return obj_priv->madv == I915_MADV_DONTNEED;
  1291. }
  1292. static void
  1293. i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  1294. {
  1295. struct drm_device *dev = obj->dev;
  1296. drm_i915_private_t *dev_priv = dev->dev_private;
  1297. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1298. i915_verify_inactive(dev, __FILE__, __LINE__);
  1299. if (obj_priv->pin_count != 0)
  1300. list_del_init(&obj_priv->list);
  1301. else
  1302. list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  1303. BUG_ON(!list_empty(&obj_priv->gpu_write_list));
  1304. obj_priv->last_rendering_seqno = 0;
  1305. obj_priv->ring = NULL;
  1306. if (obj_priv->active) {
  1307. obj_priv->active = 0;
  1308. drm_gem_object_unreference(obj);
  1309. }
  1310. i915_verify_inactive(dev, __FILE__, __LINE__);
  1311. }
  1312. static void
  1313. i915_gem_process_flushing_list(struct drm_device *dev,
  1314. uint32_t flush_domains, uint32_t seqno,
  1315. struct intel_ring_buffer *ring)
  1316. {
  1317. drm_i915_private_t *dev_priv = dev->dev_private;
  1318. struct drm_i915_gem_object *obj_priv, *next;
  1319. list_for_each_entry_safe(obj_priv, next,
  1320. &dev_priv->mm.gpu_write_list,
  1321. gpu_write_list) {
  1322. struct drm_gem_object *obj = &obj_priv->base;
  1323. if ((obj->write_domain & flush_domains) ==
  1324. obj->write_domain &&
  1325. obj_priv->ring->ring_flag == ring->ring_flag) {
  1326. uint32_t old_write_domain = obj->write_domain;
  1327. obj->write_domain = 0;
  1328. list_del_init(&obj_priv->gpu_write_list);
  1329. i915_gem_object_move_to_active(obj, seqno, ring);
  1330. /* update the fence lru list */
  1331. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  1332. struct drm_i915_fence_reg *reg =
  1333. &dev_priv->fence_regs[obj_priv->fence_reg];
  1334. list_move_tail(&reg->lru_list,
  1335. &dev_priv->mm.fence_list);
  1336. }
  1337. trace_i915_gem_object_change_domain(obj,
  1338. obj->read_domains,
  1339. old_write_domain);
  1340. }
  1341. }
  1342. }
  1343. uint32_t
  1344. i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  1345. uint32_t flush_domains, struct intel_ring_buffer *ring)
  1346. {
  1347. drm_i915_private_t *dev_priv = dev->dev_private;
  1348. struct drm_i915_file_private *i915_file_priv = NULL;
  1349. struct drm_i915_gem_request *request;
  1350. uint32_t seqno;
  1351. int was_empty;
  1352. if (file_priv != NULL)
  1353. i915_file_priv = file_priv->driver_priv;
  1354. request = kzalloc(sizeof(*request), GFP_KERNEL);
  1355. if (request == NULL)
  1356. return 0;
  1357. seqno = ring->add_request(dev, ring, file_priv, flush_domains);
  1358. request->seqno = seqno;
  1359. request->ring = ring;
  1360. request->emitted_jiffies = jiffies;
  1361. was_empty = list_empty(&ring->request_list);
  1362. list_add_tail(&request->list, &ring->request_list);
  1363. if (i915_file_priv) {
  1364. list_add_tail(&request->client_list,
  1365. &i915_file_priv->mm.request_list);
  1366. } else {
  1367. INIT_LIST_HEAD(&request->client_list);
  1368. }
  1369. /* Associate any objects on the flushing list matching the write
  1370. * domain we're flushing with our flush.
  1371. */
  1372. if (flush_domains != 0)
  1373. i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
  1374. if (!dev_priv->mm.suspended) {
  1375. mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
  1376. if (was_empty)
  1377. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1378. }
  1379. return seqno;
  1380. }
  1381. /**
  1382. * Command execution barrier
  1383. *
  1384. * Ensures that all commands in the ring are finished
  1385. * before signalling the CPU
  1386. */
  1387. static uint32_t
  1388. i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
  1389. {
  1390. uint32_t flush_domains = 0;
  1391. /* The sampler always gets flushed on i965 (sigh) */
  1392. if (IS_I965G(dev))
  1393. flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  1394. ring->flush(dev, ring,
  1395. I915_GEM_DOMAIN_COMMAND, flush_domains);
  1396. return flush_domains;
  1397. }
  1398. /**
  1399. * Moves buffers associated only with the given active seqno from the active
  1400. * to inactive list, potentially freeing them.
  1401. */
  1402. static void
  1403. i915_gem_retire_request(struct drm_device *dev,
  1404. struct drm_i915_gem_request *request)
  1405. {
  1406. drm_i915_private_t *dev_priv = dev->dev_private;
  1407. trace_i915_gem_request_retire(dev, request->seqno);
  1408. /* Move any buffers on the active list that are no longer referenced
  1409. * by the ringbuffer to the flushing/inactive lists as appropriate.
  1410. */
  1411. spin_lock(&dev_priv->mm.active_list_lock);
  1412. while (!list_empty(&request->ring->active_list)) {
  1413. struct drm_gem_object *obj;
  1414. struct drm_i915_gem_object *obj_priv;
  1415. obj_priv = list_first_entry(&request->ring->active_list,
  1416. struct drm_i915_gem_object,
  1417. list);
  1418. obj = &obj_priv->base;
  1419. /* If the seqno being retired doesn't match the oldest in the
  1420. * list, then the oldest in the list must still be newer than
  1421. * this seqno.
  1422. */
  1423. if (obj_priv->last_rendering_seqno != request->seqno)
  1424. goto out;
  1425. #if WATCH_LRU
  1426. DRM_INFO("%s: retire %d moves to inactive list %p\n",
  1427. __func__, request->seqno, obj);
  1428. #endif
  1429. if (obj->write_domain != 0)
  1430. i915_gem_object_move_to_flushing(obj);
  1431. else {
  1432. /* Take a reference on the object so it won't be
  1433. * freed while the spinlock is held. The list
  1434. * protection for this spinlock is safe when breaking
  1435. * the lock like this since the next thing we do
  1436. * is just get the head of the list again.
  1437. */
  1438. drm_gem_object_reference(obj);
  1439. i915_gem_object_move_to_inactive(obj);
  1440. spin_unlock(&dev_priv->mm.active_list_lock);
  1441. drm_gem_object_unreference(obj);
  1442. spin_lock(&dev_priv->mm.active_list_lock);
  1443. }
  1444. }
  1445. out:
  1446. spin_unlock(&dev_priv->mm.active_list_lock);
  1447. }
  1448. /**
  1449. * Returns true if seq1 is later than seq2.
  1450. */
  1451. bool
  1452. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1453. {
  1454. return (int32_t)(seq1 - seq2) >= 0;
  1455. }
  1456. uint32_t
  1457. i915_get_gem_seqno(struct drm_device *dev,
  1458. struct intel_ring_buffer *ring)
  1459. {
  1460. return ring->get_gem_seqno(dev, ring);
  1461. }
  1462. /**
  1463. * This function clears the request list as sequence numbers are passed.
  1464. */
  1465. static void
  1466. i915_gem_retire_requests_ring(struct drm_device *dev,
  1467. struct intel_ring_buffer *ring)
  1468. {
  1469. drm_i915_private_t *dev_priv = dev->dev_private;
  1470. uint32_t seqno;
  1471. if (!ring->status_page.page_addr
  1472. || list_empty(&ring->request_list))
  1473. return;
  1474. seqno = i915_get_gem_seqno(dev, ring);
  1475. while (!list_empty(&ring->request_list)) {
  1476. struct drm_i915_gem_request *request;
  1477. uint32_t retiring_seqno;
  1478. request = list_first_entry(&ring->request_list,
  1479. struct drm_i915_gem_request,
  1480. list);
  1481. retiring_seqno = request->seqno;
  1482. if (i915_seqno_passed(seqno, retiring_seqno) ||
  1483. atomic_read(&dev_priv->mm.wedged)) {
  1484. i915_gem_retire_request(dev, request);
  1485. list_del(&request->list);
  1486. list_del(&request->client_list);
  1487. kfree(request);
  1488. } else
  1489. break;
  1490. }
  1491. if (unlikely (dev_priv->trace_irq_seqno &&
  1492. i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
  1493. ring->user_irq_put(dev, ring);
  1494. dev_priv->trace_irq_seqno = 0;
  1495. }
  1496. }
  1497. void
  1498. i915_gem_retire_requests(struct drm_device *dev)
  1499. {
  1500. drm_i915_private_t *dev_priv = dev->dev_private;
  1501. if (!list_empty(&dev_priv->mm.deferred_free_list)) {
  1502. struct drm_i915_gem_object *obj_priv, *tmp;
  1503. /* We must be careful that during unbind() we do not
  1504. * accidentally infinitely recurse into retire requests.
  1505. * Currently:
  1506. * retire -> free -> unbind -> wait -> retire_ring
  1507. */
  1508. list_for_each_entry_safe(obj_priv, tmp,
  1509. &dev_priv->mm.deferred_free_list,
  1510. list)
  1511. i915_gem_free_object_tail(&obj_priv->base);
  1512. }
  1513. i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
  1514. if (HAS_BSD(dev))
  1515. i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
  1516. }
  1517. void
  1518. i915_gem_retire_work_handler(struct work_struct *work)
  1519. {
  1520. drm_i915_private_t *dev_priv;
  1521. struct drm_device *dev;
  1522. dev_priv = container_of(work, drm_i915_private_t,
  1523. mm.retire_work.work);
  1524. dev = dev_priv->dev;
  1525. mutex_lock(&dev->struct_mutex);
  1526. i915_gem_retire_requests(dev);
  1527. if (!dev_priv->mm.suspended &&
  1528. (!list_empty(&dev_priv->render_ring.request_list) ||
  1529. (HAS_BSD(dev) &&
  1530. !list_empty(&dev_priv->bsd_ring.request_list))))
  1531. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
  1532. mutex_unlock(&dev->struct_mutex);
  1533. }
  1534. int
  1535. i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
  1536. int interruptible, struct intel_ring_buffer *ring)
  1537. {
  1538. drm_i915_private_t *dev_priv = dev->dev_private;
  1539. u32 ier;
  1540. int ret = 0;
  1541. BUG_ON(seqno == 0);
  1542. if (atomic_read(&dev_priv->mm.wedged))
  1543. return -EIO;
  1544. if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
  1545. if (HAS_PCH_SPLIT(dev))
  1546. ier = I915_READ(DEIER) | I915_READ(GTIER);
  1547. else
  1548. ier = I915_READ(IER);
  1549. if (!ier) {
  1550. DRM_ERROR("something (likely vbetool) disabled "
  1551. "interrupts, re-enabling\n");
  1552. i915_driver_irq_preinstall(dev);
  1553. i915_driver_irq_postinstall(dev);
  1554. }
  1555. trace_i915_gem_request_wait_begin(dev, seqno);
  1556. ring->waiting_gem_seqno = seqno;
  1557. ring->user_irq_get(dev, ring);
  1558. if (interruptible)
  1559. ret = wait_event_interruptible(ring->irq_queue,
  1560. i915_seqno_passed(
  1561. ring->get_gem_seqno(dev, ring), seqno)
  1562. || atomic_read(&dev_priv->mm.wedged));
  1563. else
  1564. wait_event(ring->irq_queue,
  1565. i915_seqno_passed(
  1566. ring->get_gem_seqno(dev, ring), seqno)
  1567. || atomic_read(&dev_priv->mm.wedged));
  1568. ring->user_irq_put(dev, ring);
  1569. ring->waiting_gem_seqno = 0;
  1570. trace_i915_gem_request_wait_end(dev, seqno);
  1571. }
  1572. if (atomic_read(&dev_priv->mm.wedged))
  1573. ret = -EIO;
  1574. if (ret && ret != -ERESTARTSYS)
  1575. DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
  1576. __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
  1577. /* Directly dispatch request retiring. While we have the work queue
  1578. * to handle this, the waiter on a request often wants an associated
  1579. * buffer to have made it to the inactive list, and we would need
  1580. * a separate wait queue to handle that.
  1581. */
  1582. if (ret == 0)
  1583. i915_gem_retire_requests_ring(dev, ring);
  1584. return ret;
  1585. }
  1586. /**
  1587. * Waits for a sequence number to be signaled, and cleans up the
  1588. * request and object lists appropriately for that event.
  1589. */
  1590. static int
  1591. i915_wait_request(struct drm_device *dev, uint32_t seqno,
  1592. struct intel_ring_buffer *ring)
  1593. {
  1594. return i915_do_wait_request(dev, seqno, 1, ring);
  1595. }
  1596. static void
  1597. i915_gem_flush(struct drm_device *dev,
  1598. uint32_t invalidate_domains,
  1599. uint32_t flush_domains)
  1600. {
  1601. drm_i915_private_t *dev_priv = dev->dev_private;
  1602. if (flush_domains & I915_GEM_DOMAIN_CPU)
  1603. drm_agp_chipset_flush(dev);
  1604. dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
  1605. invalidate_domains,
  1606. flush_domains);
  1607. if (HAS_BSD(dev))
  1608. dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
  1609. invalidate_domains,
  1610. flush_domains);
  1611. }
  1612. /**
  1613. * Ensures that all rendering to the object has completed and the object is
  1614. * safe to unbind from the GTT or access from the CPU.
  1615. */
  1616. static int
  1617. i915_gem_object_wait_rendering(struct drm_gem_object *obj)
  1618. {
  1619. struct drm_device *dev = obj->dev;
  1620. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1621. int ret;
  1622. /* This function only exists to support waiting for existing rendering,
  1623. * not for emitting required flushes.
  1624. */
  1625. BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
  1626. /* If there is rendering queued on the buffer being evicted, wait for
  1627. * it.
  1628. */
  1629. if (obj_priv->active) {
  1630. #if WATCH_BUF
  1631. DRM_INFO("%s: object %p wait for seqno %08x\n",
  1632. __func__, obj, obj_priv->last_rendering_seqno);
  1633. #endif
  1634. ret = i915_wait_request(dev,
  1635. obj_priv->last_rendering_seqno, obj_priv->ring);
  1636. if (ret != 0)
  1637. return ret;
  1638. }
  1639. return 0;
  1640. }
  1641. /**
  1642. * Unbinds an object from the GTT aperture.
  1643. */
  1644. int
  1645. i915_gem_object_unbind(struct drm_gem_object *obj)
  1646. {
  1647. struct drm_device *dev = obj->dev;
  1648. drm_i915_private_t *dev_priv = dev->dev_private;
  1649. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1650. int ret = 0;
  1651. #if WATCH_BUF
  1652. DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
  1653. DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
  1654. #endif
  1655. if (obj_priv->gtt_space == NULL)
  1656. return 0;
  1657. if (obj_priv->pin_count != 0) {
  1658. DRM_ERROR("Attempting to unbind pinned buffer\n");
  1659. return -EINVAL;
  1660. }
  1661. /* blow away mappings if mapped through GTT */
  1662. i915_gem_release_mmap(obj);
  1663. /* Move the object to the CPU domain to ensure that
  1664. * any possible CPU writes while it's not in the GTT
  1665. * are flushed when we go to remap it. This will
  1666. * also ensure that all pending GPU writes are finished
  1667. * before we unbind.
  1668. */
  1669. ret = i915_gem_object_set_to_cpu_domain(obj, 1);
  1670. if (ret == -ERESTARTSYS)
  1671. return ret;
  1672. /* Continue on if we fail due to EIO, the GPU is hung so we
  1673. * should be safe and we need to cleanup or else we might
  1674. * cause memory corruption through use-after-free.
  1675. */
  1676. /* release the fence reg _after_ flushing */
  1677. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  1678. i915_gem_clear_fence_reg(obj);
  1679. if (obj_priv->agp_mem != NULL) {
  1680. drm_unbind_agp(obj_priv->agp_mem);
  1681. drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  1682. obj_priv->agp_mem = NULL;
  1683. }
  1684. i915_gem_object_put_pages(obj);
  1685. BUG_ON(obj_priv->pages_refcount);
  1686. if (obj_priv->gtt_space) {
  1687. atomic_dec(&dev->gtt_count);
  1688. atomic_sub(obj->size, &dev->gtt_memory);
  1689. drm_mm_put_block(obj_priv->gtt_space);
  1690. obj_priv->gtt_space = NULL;
  1691. }
  1692. /* Remove ourselves from the LRU list if present. */
  1693. spin_lock(&dev_priv->mm.active_list_lock);
  1694. if (!list_empty(&obj_priv->list))
  1695. list_del_init(&obj_priv->list);
  1696. spin_unlock(&dev_priv->mm.active_list_lock);
  1697. if (i915_gem_object_is_purgeable(obj_priv))
  1698. i915_gem_object_truncate(obj);
  1699. trace_i915_gem_object_unbind(obj);
  1700. return ret;
  1701. }
  1702. int
  1703. i915_gpu_idle(struct drm_device *dev)
  1704. {
  1705. drm_i915_private_t *dev_priv = dev->dev_private;
  1706. bool lists_empty;
  1707. uint32_t seqno1, seqno2;
  1708. int ret;
  1709. spin_lock(&dev_priv->mm.active_list_lock);
  1710. lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
  1711. list_empty(&dev_priv->render_ring.active_list) &&
  1712. (!HAS_BSD(dev) ||
  1713. list_empty(&dev_priv->bsd_ring.active_list)));
  1714. spin_unlock(&dev_priv->mm.active_list_lock);
  1715. if (lists_empty)
  1716. return 0;
  1717. /* Flush everything onto the inactive list. */
  1718. i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
  1719. seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
  1720. &dev_priv->render_ring);
  1721. if (seqno1 == 0)
  1722. return -ENOMEM;
  1723. ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
  1724. if (HAS_BSD(dev)) {
  1725. seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
  1726. &dev_priv->bsd_ring);
  1727. if (seqno2 == 0)
  1728. return -ENOMEM;
  1729. ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
  1730. if (ret)
  1731. return ret;
  1732. }
  1733. return ret;
  1734. }
  1735. int
  1736. i915_gem_object_get_pages(struct drm_gem_object *obj,
  1737. gfp_t gfpmask)
  1738. {
  1739. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1740. int page_count, i;
  1741. struct address_space *mapping;
  1742. struct inode *inode;
  1743. struct page *page;
  1744. BUG_ON(obj_priv->pages_refcount
  1745. == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
  1746. if (obj_priv->pages_refcount++ != 0)
  1747. return 0;
  1748. /* Get the list of pages out of our struct file. They'll be pinned
  1749. * at this point until we release them.
  1750. */
  1751. page_count = obj->size / PAGE_SIZE;
  1752. BUG_ON(obj_priv->pages != NULL);
  1753. obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
  1754. if (obj_priv->pages == NULL) {
  1755. obj_priv->pages_refcount--;
  1756. return -ENOMEM;
  1757. }
  1758. inode = obj->filp->f_path.dentry->d_inode;
  1759. mapping = inode->i_mapping;
  1760. for (i = 0; i < page_count; i++) {
  1761. page = read_cache_page_gfp(mapping, i,
  1762. GFP_HIGHUSER |
  1763. __GFP_COLD |
  1764. __GFP_RECLAIMABLE |
  1765. gfpmask);
  1766. if (IS_ERR(page))
  1767. goto err_pages;
  1768. obj_priv->pages[i] = page;
  1769. }
  1770. if (obj_priv->tiling_mode != I915_TILING_NONE)
  1771. i915_gem_object_do_bit_17_swizzle(obj);
  1772. return 0;
  1773. err_pages:
  1774. while (i--)
  1775. page_cache_release(obj_priv->pages[i]);
  1776. drm_free_large(obj_priv->pages);
  1777. obj_priv->pages = NULL;
  1778. obj_priv->pages_refcount--;
  1779. return PTR_ERR(page);
  1780. }
  1781. static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
  1782. {
  1783. struct drm_gem_object *obj = reg->obj;
  1784. struct drm_device *dev = obj->dev;
  1785. drm_i915_private_t *dev_priv = dev->dev_private;
  1786. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1787. int regnum = obj_priv->fence_reg;
  1788. uint64_t val;
  1789. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1790. 0xfffff000) << 32;
  1791. val |= obj_priv->gtt_offset & 0xfffff000;
  1792. val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
  1793. SANDYBRIDGE_FENCE_PITCH_SHIFT;
  1794. if (obj_priv->tiling_mode == I915_TILING_Y)
  1795. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1796. val |= I965_FENCE_REG_VALID;
  1797. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
  1798. }
  1799. static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
  1800. {
  1801. struct drm_gem_object *obj = reg->obj;
  1802. struct drm_device *dev = obj->dev;
  1803. drm_i915_private_t *dev_priv = dev->dev_private;
  1804. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1805. int regnum = obj_priv->fence_reg;
  1806. uint64_t val;
  1807. val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
  1808. 0xfffff000) << 32;
  1809. val |= obj_priv->gtt_offset & 0xfffff000;
  1810. val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
  1811. if (obj_priv->tiling_mode == I915_TILING_Y)
  1812. val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  1813. val |= I965_FENCE_REG_VALID;
  1814. I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
  1815. }
  1816. static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
  1817. {
  1818. struct drm_gem_object *obj = reg->obj;
  1819. struct drm_device *dev = obj->dev;
  1820. drm_i915_private_t *dev_priv = dev->dev_private;
  1821. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1822. int regnum = obj_priv->fence_reg;
  1823. int tile_width;
  1824. uint32_t fence_reg, val;
  1825. uint32_t pitch_val;
  1826. if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
  1827. (obj_priv->gtt_offset & (obj->size - 1))) {
  1828. WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
  1829. __func__, obj_priv->gtt_offset, obj->size);
  1830. return;
  1831. }
  1832. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1833. HAS_128_BYTE_Y_TILING(dev))
  1834. tile_width = 128;
  1835. else
  1836. tile_width = 512;
  1837. /* Note: pitch better be a power of two tile widths */
  1838. pitch_val = obj_priv->stride / tile_width;
  1839. pitch_val = ffs(pitch_val) - 1;
  1840. if (obj_priv->tiling_mode == I915_TILING_Y &&
  1841. HAS_128_BYTE_Y_TILING(dev))
  1842. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  1843. else
  1844. WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
  1845. val = obj_priv->gtt_offset;
  1846. if (obj_priv->tiling_mode == I915_TILING_Y)
  1847. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1848. val |= I915_FENCE_SIZE_BITS(obj->size);
  1849. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1850. val |= I830_FENCE_REG_VALID;
  1851. if (regnum < 8)
  1852. fence_reg = FENCE_REG_830_0 + (regnum * 4);
  1853. else
  1854. fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
  1855. I915_WRITE(fence_reg, val);
  1856. }
  1857. static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  1858. {
  1859. struct drm_gem_object *obj = reg->obj;
  1860. struct drm_device *dev = obj->dev;
  1861. drm_i915_private_t *dev_priv = dev->dev_private;
  1862. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1863. int regnum = obj_priv->fence_reg;
  1864. uint32_t val;
  1865. uint32_t pitch_val;
  1866. uint32_t fence_size_bits;
  1867. if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
  1868. (obj_priv->gtt_offset & (obj->size - 1))) {
  1869. WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
  1870. __func__, obj_priv->gtt_offset);
  1871. return;
  1872. }
  1873. pitch_val = obj_priv->stride / 128;
  1874. pitch_val = ffs(pitch_val) - 1;
  1875. WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
  1876. val = obj_priv->gtt_offset;
  1877. if (obj_priv->tiling_mode == I915_TILING_Y)
  1878. val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  1879. fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
  1880. WARN_ON(fence_size_bits & ~0x00000f00);
  1881. val |= fence_size_bits;
  1882. val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  1883. val |= I830_FENCE_REG_VALID;
  1884. I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  1885. }
  1886. static int i915_find_fence_reg(struct drm_device *dev)
  1887. {
  1888. struct drm_i915_fence_reg *reg = NULL;
  1889. struct drm_i915_gem_object *obj_priv = NULL;
  1890. struct drm_i915_private *dev_priv = dev->dev_private;
  1891. struct drm_gem_object *obj = NULL;
  1892. int i, avail, ret;
  1893. /* First try to find a free reg */
  1894. avail = 0;
  1895. for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  1896. reg = &dev_priv->fence_regs[i];
  1897. if (!reg->obj)
  1898. return i;
  1899. obj_priv = to_intel_bo(reg->obj);
  1900. if (!obj_priv->pin_count)
  1901. avail++;
  1902. }
  1903. if (avail == 0)
  1904. return -ENOSPC;
  1905. /* None available, try to steal one or wait for a user to finish */
  1906. i = I915_FENCE_REG_NONE;
  1907. list_for_each_entry(reg, &dev_priv->mm.fence_list,
  1908. lru_list) {
  1909. obj = reg->obj;
  1910. obj_priv = to_intel_bo(obj);
  1911. if (obj_priv->pin_count)
  1912. continue;
  1913. /* found one! */
  1914. i = obj_priv->fence_reg;
  1915. break;
  1916. }
  1917. BUG_ON(i == I915_FENCE_REG_NONE);
  1918. /* We only have a reference on obj from the active list. put_fence_reg
  1919. * might drop that one, causing a use-after-free in it. So hold a
  1920. * private reference to obj like the other callers of put_fence_reg
  1921. * (set_tiling ioctl) do. */
  1922. drm_gem_object_reference(obj);
  1923. ret = i915_gem_object_put_fence_reg(obj);
  1924. drm_gem_object_unreference(obj);
  1925. if (ret != 0)
  1926. return ret;
  1927. return i;
  1928. }
  1929. /**
  1930. * i915_gem_object_get_fence_reg - set up a fence reg for an object
  1931. * @obj: object to map through a fence reg
  1932. *
  1933. * When mapping objects through the GTT, userspace wants to be able to write
  1934. * to them without having to worry about swizzling if the object is tiled.
  1935. *
  1936. * This function walks the fence regs looking for a free one for @obj,
  1937. * stealing one if it can't find any.
  1938. *
  1939. * It then sets up the reg based on the object's properties: address, pitch
  1940. * and tiling format.
  1941. */
  1942. int
  1943. i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
  1944. {
  1945. struct drm_device *dev = obj->dev;
  1946. struct drm_i915_private *dev_priv = dev->dev_private;
  1947. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  1948. struct drm_i915_fence_reg *reg = NULL;
  1949. int ret;
  1950. /* Just update our place in the LRU if our fence is getting used. */
  1951. if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  1952. reg = &dev_priv->fence_regs[obj_priv->fence_reg];
  1953. list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  1954. return 0;
  1955. }
  1956. switch (obj_priv->tiling_mode) {
  1957. case I915_TILING_NONE:
  1958. WARN(1, "allocating a fence for non-tiled object?\n");
  1959. break;
  1960. case I915_TILING_X:
  1961. if (!obj_priv->stride)
  1962. return -EINVAL;
  1963. WARN((obj_priv->stride & (512 - 1)),
  1964. "object 0x%08x is X tiled but has non-512B pitch\n",
  1965. obj_priv->gtt_offset);
  1966. break;
  1967. case I915_TILING_Y:
  1968. if (!obj_priv->stride)
  1969. return -EINVAL;
  1970. WARN((obj_priv->stride & (128 - 1)),
  1971. "object 0x%08x is Y tiled but has non-128B pitch\n",
  1972. obj_priv->gtt_offset);
  1973. break;
  1974. }
  1975. ret = i915_find_fence_reg(dev);
  1976. if (ret < 0)
  1977. return ret;
  1978. obj_priv->fence_reg = ret;
  1979. reg = &dev_priv->fence_regs[obj_priv->fence_reg];
  1980. list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
  1981. reg->obj = obj;
  1982. if (IS_GEN6(dev))
  1983. sandybridge_write_fence_reg(reg);
  1984. else if (IS_I965G(dev))
  1985. i965_write_fence_reg(reg);
  1986. else if (IS_I9XX(dev))
  1987. i915_write_fence_reg(reg);
  1988. else
  1989. i830_write_fence_reg(reg);
  1990. trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
  1991. obj_priv->tiling_mode);
  1992. return 0;
  1993. }
  1994. /**
  1995. * i915_gem_clear_fence_reg - clear out fence register info
  1996. * @obj: object to clear
  1997. *
  1998. * Zeroes out the fence register itself and clears out the associated
  1999. * data structures in dev_priv and obj_priv.
  2000. */
  2001. static void
  2002. i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  2003. {
  2004. struct drm_device *dev = obj->dev;
  2005. drm_i915_private_t *dev_priv = dev->dev_private;
  2006. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2007. struct drm_i915_fence_reg *reg =
  2008. &dev_priv->fence_regs[obj_priv->fence_reg];
  2009. if (IS_GEN6(dev)) {
  2010. I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
  2011. (obj_priv->fence_reg * 8), 0);
  2012. } else if (IS_I965G(dev)) {
  2013. I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
  2014. } else {
  2015. uint32_t fence_reg;
  2016. if (obj_priv->fence_reg < 8)
  2017. fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
  2018. else
  2019. fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
  2020. 8) * 4;
  2021. I915_WRITE(fence_reg, 0);
  2022. }
  2023. reg->obj = NULL;
  2024. obj_priv->fence_reg = I915_FENCE_REG_NONE;
  2025. list_del_init(&reg->lru_list);
  2026. }
  2027. /**
  2028. * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  2029. * to the buffer to finish, and then resets the fence register.
  2030. * @obj: tiled object holding a fence register.
  2031. *
  2032. * Zeroes out the fence register itself and clears out the associated
  2033. * data structures in dev_priv and obj_priv.
  2034. */
  2035. int
  2036. i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
  2037. {
  2038. struct drm_device *dev = obj->dev;
  2039. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2040. if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
  2041. return 0;
  2042. /* If we've changed tiling, GTT-mappings of the object
  2043. * need to re-fault to ensure that the correct fence register
  2044. * setup is in place.
  2045. */
  2046. i915_gem_release_mmap(obj);
  2047. /* On the i915, GPU access to tiled buffers is via a fence,
  2048. * therefore we must wait for any outstanding access to complete
  2049. * before clearing the fence.
  2050. */
  2051. if (!IS_I965G(dev)) {
  2052. int ret;
  2053. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2054. if (ret != 0)
  2055. return ret;
  2056. ret = i915_gem_object_wait_rendering(obj);
  2057. if (ret != 0)
  2058. return ret;
  2059. }
  2060. i915_gem_object_flush_gtt_write_domain(obj);
  2061. i915_gem_clear_fence_reg (obj);
  2062. return 0;
  2063. }
  2064. /**
  2065. * Finds free space in the GTT aperture and binds the object there.
  2066. */
  2067. static int
  2068. i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  2069. {
  2070. struct drm_device *dev = obj->dev;
  2071. drm_i915_private_t *dev_priv = dev->dev_private;
  2072. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2073. struct drm_mm_node *free_space;
  2074. gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
  2075. int ret;
  2076. if (obj_priv->madv != I915_MADV_WILLNEED) {
  2077. DRM_ERROR("Attempting to bind a purgeable object\n");
  2078. return -EINVAL;
  2079. }
  2080. if (alignment == 0)
  2081. alignment = i915_gem_get_gtt_alignment(obj);
  2082. if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
  2083. DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  2084. return -EINVAL;
  2085. }
  2086. /* If the object is bigger than the entire aperture, reject it early
  2087. * before evicting everything in a vain attempt to find space.
  2088. */
  2089. if (obj->size > dev->gtt_total) {
  2090. DRM_ERROR("Attempting to bind an object larger than the aperture\n");
  2091. return -E2BIG;
  2092. }
  2093. search_free:
  2094. free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
  2095. obj->size, alignment, 0);
  2096. if (free_space != NULL) {
  2097. obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
  2098. alignment);
  2099. if (obj_priv->gtt_space != NULL)
  2100. obj_priv->gtt_offset = obj_priv->gtt_space->start;
  2101. }
  2102. if (obj_priv->gtt_space == NULL) {
  2103. /* If the gtt is empty and we're still having trouble
  2104. * fitting our object in, we're out of memory.
  2105. */
  2106. #if WATCH_LRU
  2107. DRM_INFO("%s: GTT full, evicting something\n", __func__);
  2108. #endif
  2109. ret = i915_gem_evict_something(dev, obj->size, alignment);
  2110. if (ret)
  2111. return ret;
  2112. goto search_free;
  2113. }
  2114. #if WATCH_BUF
  2115. DRM_INFO("Binding object of size %zd at 0x%08x\n",
  2116. obj->size, obj_priv->gtt_offset);
  2117. #endif
  2118. ret = i915_gem_object_get_pages(obj, gfpmask);
  2119. if (ret) {
  2120. drm_mm_put_block(obj_priv->gtt_space);
  2121. obj_priv->gtt_space = NULL;
  2122. if (ret == -ENOMEM) {
  2123. /* first try to clear up some space from the GTT */
  2124. ret = i915_gem_evict_something(dev, obj->size,
  2125. alignment);
  2126. if (ret) {
  2127. /* now try to shrink everyone else */
  2128. if (gfpmask) {
  2129. gfpmask = 0;
  2130. goto search_free;
  2131. }
  2132. return ret;
  2133. }
  2134. goto search_free;
  2135. }
  2136. return ret;
  2137. }
  2138. /* Create an AGP memory structure pointing at our pages, and bind it
  2139. * into the GTT.
  2140. */
  2141. obj_priv->agp_mem = drm_agp_bind_pages(dev,
  2142. obj_priv->pages,
  2143. obj->size >> PAGE_SHIFT,
  2144. obj_priv->gtt_offset,
  2145. obj_priv->agp_type);
  2146. if (obj_priv->agp_mem == NULL) {
  2147. i915_gem_object_put_pages(obj);
  2148. drm_mm_put_block(obj_priv->gtt_space);
  2149. obj_priv->gtt_space = NULL;
  2150. ret = i915_gem_evict_something(dev, obj->size, alignment);
  2151. if (ret)
  2152. return ret;
  2153. goto search_free;
  2154. }
  2155. atomic_inc(&dev->gtt_count);
  2156. atomic_add(obj->size, &dev->gtt_memory);
  2157. /* keep track of bounds object by adding it to the inactive list */
  2158. list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  2159. /* Assert that the object is not currently in any GPU domain. As it
  2160. * wasn't in the GTT, there shouldn't be any way it could have been in
  2161. * a GPU cache
  2162. */
  2163. BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2164. BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2165. trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  2166. return 0;
  2167. }
  2168. void
  2169. i915_gem_clflush_object(struct drm_gem_object *obj)
  2170. {
  2171. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2172. /* If we don't have a page list set up, then we're not pinned
  2173. * to GPU, and we can ignore the cache flush because it'll happen
  2174. * again at bind time.
  2175. */
  2176. if (obj_priv->pages == NULL)
  2177. return;
  2178. trace_i915_gem_object_clflush(obj);
  2179. drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  2180. }
  2181. /** Flushes any GPU write domain for the object if it's dirty. */
  2182. static int
  2183. i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
  2184. {
  2185. struct drm_device *dev = obj->dev;
  2186. uint32_t old_write_domain;
  2187. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2188. if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  2189. return 0;
  2190. /* Queue the GPU write cache flushing we need. */
  2191. old_write_domain = obj->write_domain;
  2192. i915_gem_flush(dev, 0, obj->write_domain);
  2193. if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
  2194. return -ENOMEM;
  2195. trace_i915_gem_object_change_domain(obj,
  2196. obj->read_domains,
  2197. old_write_domain);
  2198. return 0;
  2199. }
  2200. /** Flushes the GTT write domain for the object if it's dirty. */
  2201. static void
  2202. i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
  2203. {
  2204. uint32_t old_write_domain;
  2205. if (obj->write_domain != I915_GEM_DOMAIN_GTT)
  2206. return;
  2207. /* No actual flushing is required for the GTT write domain. Writes
  2208. * to it immediately go to main memory as far as we know, so there's
  2209. * no chipset flush. It also doesn't land in render cache.
  2210. */
  2211. old_write_domain = obj->write_domain;
  2212. obj->write_domain = 0;
  2213. trace_i915_gem_object_change_domain(obj,
  2214. obj->read_domains,
  2215. old_write_domain);
  2216. }
  2217. /** Flushes the CPU write domain for the object if it's dirty. */
  2218. static void
  2219. i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  2220. {
  2221. struct drm_device *dev = obj->dev;
  2222. uint32_t old_write_domain;
  2223. if (obj->write_domain != I915_GEM_DOMAIN_CPU)
  2224. return;
  2225. i915_gem_clflush_object(obj);
  2226. drm_agp_chipset_flush(dev);
  2227. old_write_domain = obj->write_domain;
  2228. obj->write_domain = 0;
  2229. trace_i915_gem_object_change_domain(obj,
  2230. obj->read_domains,
  2231. old_write_domain);
  2232. }
  2233. int
  2234. i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
  2235. {
  2236. int ret = 0;
  2237. switch (obj->write_domain) {
  2238. case I915_GEM_DOMAIN_GTT:
  2239. i915_gem_object_flush_gtt_write_domain(obj);
  2240. break;
  2241. case I915_GEM_DOMAIN_CPU:
  2242. i915_gem_object_flush_cpu_write_domain(obj);
  2243. break;
  2244. default:
  2245. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2246. break;
  2247. }
  2248. return ret;
  2249. }
  2250. /**
  2251. * Moves a single object to the GTT read, and possibly write domain.
  2252. *
  2253. * This function returns when the move is complete, including waiting on
  2254. * flushes to occur.
  2255. */
  2256. int
  2257. i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  2258. {
  2259. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2260. uint32_t old_write_domain, old_read_domains;
  2261. int ret;
  2262. /* Not valid to be called on unbound objects. */
  2263. if (obj_priv->gtt_space == NULL)
  2264. return -EINVAL;
  2265. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2266. if (ret != 0)
  2267. return ret;
  2268. /* Wait on any GPU rendering and flushing to occur. */
  2269. ret = i915_gem_object_wait_rendering(obj);
  2270. if (ret != 0)
  2271. return ret;
  2272. old_write_domain = obj->write_domain;
  2273. old_read_domains = obj->read_domains;
  2274. /* If we're writing through the GTT domain, then CPU and GPU caches
  2275. * will need to be invalidated at next use.
  2276. */
  2277. if (write)
  2278. obj->read_domains &= I915_GEM_DOMAIN_GTT;
  2279. i915_gem_object_flush_cpu_write_domain(obj);
  2280. /* It should now be out of any other write domains, and we can update
  2281. * the domain values for our changes.
  2282. */
  2283. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2284. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  2285. if (write) {
  2286. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2287. obj_priv->dirty = 1;
  2288. }
  2289. trace_i915_gem_object_change_domain(obj,
  2290. old_read_domains,
  2291. old_write_domain);
  2292. return 0;
  2293. }
  2294. /*
  2295. * Prepare buffer for display plane. Use uninterruptible for possible flush
  2296. * wait, as in modesetting process we're not supposed to be interrupted.
  2297. */
  2298. int
  2299. i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
  2300. {
  2301. struct drm_device *dev = obj->dev;
  2302. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2303. uint32_t old_write_domain, old_read_domains;
  2304. int ret;
  2305. /* Not valid to be called on unbound objects. */
  2306. if (obj_priv->gtt_space == NULL)
  2307. return -EINVAL;
  2308. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2309. if (ret)
  2310. return ret;
  2311. /* Wait on any GPU rendering and flushing to occur. */
  2312. if (obj_priv->active) {
  2313. #if WATCH_BUF
  2314. DRM_INFO("%s: object %p wait for seqno %08x\n",
  2315. __func__, obj, obj_priv->last_rendering_seqno);
  2316. #endif
  2317. ret = i915_do_wait_request(dev,
  2318. obj_priv->last_rendering_seqno,
  2319. 0,
  2320. obj_priv->ring);
  2321. if (ret != 0)
  2322. return ret;
  2323. }
  2324. i915_gem_object_flush_cpu_write_domain(obj);
  2325. old_write_domain = obj->write_domain;
  2326. old_read_domains = obj->read_domains;
  2327. /* It should now be out of any other write domains, and we can update
  2328. * the domain values for our changes.
  2329. */
  2330. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2331. obj->read_domains = I915_GEM_DOMAIN_GTT;
  2332. obj->write_domain = I915_GEM_DOMAIN_GTT;
  2333. obj_priv->dirty = 1;
  2334. trace_i915_gem_object_change_domain(obj,
  2335. old_read_domains,
  2336. old_write_domain);
  2337. return 0;
  2338. }
  2339. /**
  2340. * Moves a single object to the CPU read, and possibly write domain.
  2341. *
  2342. * This function returns when the move is complete, including waiting on
  2343. * flushes to occur.
  2344. */
  2345. static int
  2346. i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  2347. {
  2348. uint32_t old_write_domain, old_read_domains;
  2349. int ret;
  2350. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2351. if (ret)
  2352. return ret;
  2353. /* Wait on any GPU rendering and flushing to occur. */
  2354. ret = i915_gem_object_wait_rendering(obj);
  2355. if (ret != 0)
  2356. return ret;
  2357. i915_gem_object_flush_gtt_write_domain(obj);
  2358. /* If we have a partially-valid cache of the object in the CPU,
  2359. * finish invalidating it and free the per-page flags.
  2360. */
  2361. i915_gem_object_set_to_full_cpu_read_domain(obj);
  2362. old_write_domain = obj->write_domain;
  2363. old_read_domains = obj->read_domains;
  2364. /* Flush the CPU cache if it's still invalid. */
  2365. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  2366. i915_gem_clflush_object(obj);
  2367. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2368. }
  2369. /* It should now be out of any other write domains, and we can update
  2370. * the domain values for our changes.
  2371. */
  2372. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2373. /* If we're writing through the CPU, then the GPU read domains will
  2374. * need to be invalidated at next use.
  2375. */
  2376. if (write) {
  2377. obj->read_domains &= I915_GEM_DOMAIN_CPU;
  2378. obj->write_domain = I915_GEM_DOMAIN_CPU;
  2379. }
  2380. trace_i915_gem_object_change_domain(obj,
  2381. old_read_domains,
  2382. old_write_domain);
  2383. return 0;
  2384. }
  2385. /*
  2386. * Set the next domain for the specified object. This
  2387. * may not actually perform the necessary flushing/invaliding though,
  2388. * as that may want to be batched with other set_domain operations
  2389. *
  2390. * This is (we hope) the only really tricky part of gem. The goal
  2391. * is fairly simple -- track which caches hold bits of the object
  2392. * and make sure they remain coherent. A few concrete examples may
  2393. * help to explain how it works. For shorthand, we use the notation
  2394. * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
  2395. * a pair of read and write domain masks.
  2396. *
  2397. * Case 1: the batch buffer
  2398. *
  2399. * 1. Allocated
  2400. * 2. Written by CPU
  2401. * 3. Mapped to GTT
  2402. * 4. Read by GPU
  2403. * 5. Unmapped from GTT
  2404. * 6. Freed
  2405. *
  2406. * Let's take these a step at a time
  2407. *
  2408. * 1. Allocated
  2409. * Pages allocated from the kernel may still have
  2410. * cache contents, so we set them to (CPU, CPU) always.
  2411. * 2. Written by CPU (using pwrite)
  2412. * The pwrite function calls set_domain (CPU, CPU) and
  2413. * this function does nothing (as nothing changes)
  2414. * 3. Mapped by GTT
  2415. * This function asserts that the object is not
  2416. * currently in any GPU-based read or write domains
  2417. * 4. Read by GPU
  2418. * i915_gem_execbuffer calls set_domain (COMMAND, 0).
  2419. * As write_domain is zero, this function adds in the
  2420. * current read domains (CPU+COMMAND, 0).
  2421. * flush_domains is set to CPU.
  2422. * invalidate_domains is set to COMMAND
  2423. * clflush is run to get data out of the CPU caches
  2424. * then i915_dev_set_domain calls i915_gem_flush to
  2425. * emit an MI_FLUSH and drm_agp_chipset_flush
  2426. * 5. Unmapped from GTT
  2427. * i915_gem_object_unbind calls set_domain (CPU, CPU)
  2428. * flush_domains and invalidate_domains end up both zero
  2429. * so no flushing/invalidating happens
  2430. * 6. Freed
  2431. * yay, done
  2432. *
  2433. * Case 2: The shared render buffer
  2434. *
  2435. * 1. Allocated
  2436. * 2. Mapped to GTT
  2437. * 3. Read/written by GPU
  2438. * 4. set_domain to (CPU,CPU)
  2439. * 5. Read/written by CPU
  2440. * 6. Read/written by GPU
  2441. *
  2442. * 1. Allocated
  2443. * Same as last example, (CPU, CPU)
  2444. * 2. Mapped to GTT
  2445. * Nothing changes (assertions find that it is not in the GPU)
  2446. * 3. Read/written by GPU
  2447. * execbuffer calls set_domain (RENDER, RENDER)
  2448. * flush_domains gets CPU
  2449. * invalidate_domains gets GPU
  2450. * clflush (obj)
  2451. * MI_FLUSH and drm_agp_chipset_flush
  2452. * 4. set_domain (CPU, CPU)
  2453. * flush_domains gets GPU
  2454. * invalidate_domains gets CPU
  2455. * wait_rendering (obj) to make sure all drawing is complete.
  2456. * This will include an MI_FLUSH to get the data from GPU
  2457. * to memory
  2458. * clflush (obj) to invalidate the CPU cache
  2459. * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
  2460. * 5. Read/written by CPU
  2461. * cache lines are loaded and dirtied
  2462. * 6. Read written by GPU
  2463. * Same as last GPU access
  2464. *
  2465. * Case 3: The constant buffer
  2466. *
  2467. * 1. Allocated
  2468. * 2. Written by CPU
  2469. * 3. Read by GPU
  2470. * 4. Updated (written) by CPU again
  2471. * 5. Read by GPU
  2472. *
  2473. * 1. Allocated
  2474. * (CPU, CPU)
  2475. * 2. Written by CPU
  2476. * (CPU, CPU)
  2477. * 3. Read by GPU
  2478. * (CPU+RENDER, 0)
  2479. * flush_domains = CPU
  2480. * invalidate_domains = RENDER
  2481. * clflush (obj)
  2482. * MI_FLUSH
  2483. * drm_agp_chipset_flush
  2484. * 4. Updated (written) by CPU again
  2485. * (CPU, CPU)
  2486. * flush_domains = 0 (no previous write domain)
  2487. * invalidate_domains = 0 (no new read domains)
  2488. * 5. Read by GPU
  2489. * (CPU+RENDER, 0)
  2490. * flush_domains = CPU
  2491. * invalidate_domains = RENDER
  2492. * clflush (obj)
  2493. * MI_FLUSH
  2494. * drm_agp_chipset_flush
  2495. */
  2496. static void
  2497. i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
  2498. {
  2499. struct drm_device *dev = obj->dev;
  2500. drm_i915_private_t *dev_priv = dev->dev_private;
  2501. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2502. uint32_t invalidate_domains = 0;
  2503. uint32_t flush_domains = 0;
  2504. uint32_t old_read_domains;
  2505. BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
  2506. BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
  2507. intel_mark_busy(dev, obj);
  2508. #if WATCH_BUF
  2509. DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
  2510. __func__, obj,
  2511. obj->read_domains, obj->pending_read_domains,
  2512. obj->write_domain, obj->pending_write_domain);
  2513. #endif
  2514. /*
  2515. * If the object isn't moving to a new write domain,
  2516. * let the object stay in multiple read domains
  2517. */
  2518. if (obj->pending_write_domain == 0)
  2519. obj->pending_read_domains |= obj->read_domains;
  2520. else
  2521. obj_priv->dirty = 1;
  2522. /*
  2523. * Flush the current write domain if
  2524. * the new read domains don't match. Invalidate
  2525. * any read domains which differ from the old
  2526. * write domain
  2527. */
  2528. if (obj->write_domain &&
  2529. obj->write_domain != obj->pending_read_domains) {
  2530. flush_domains |= obj->write_domain;
  2531. invalidate_domains |=
  2532. obj->pending_read_domains & ~obj->write_domain;
  2533. }
  2534. /*
  2535. * Invalidate any read caches which may have
  2536. * stale data. That is, any new read domains.
  2537. */
  2538. invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
  2539. if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
  2540. #if WATCH_BUF
  2541. DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
  2542. __func__, flush_domains, invalidate_domains);
  2543. #endif
  2544. i915_gem_clflush_object(obj);
  2545. }
  2546. old_read_domains = obj->read_domains;
  2547. /* The actual obj->write_domain will be updated with
  2548. * pending_write_domain after we emit the accumulated flush for all
  2549. * of our domain changes in execbuffers (which clears objects'
  2550. * write_domains). So if we have a current write domain that we
  2551. * aren't changing, set pending_write_domain to that.
  2552. */
  2553. if (flush_domains == 0 && obj->pending_write_domain == 0)
  2554. obj->pending_write_domain = obj->write_domain;
  2555. obj->read_domains = obj->pending_read_domains;
  2556. if (flush_domains & I915_GEM_GPU_DOMAINS) {
  2557. if (obj_priv->ring == &dev_priv->render_ring)
  2558. dev_priv->flush_rings |= FLUSH_RENDER_RING;
  2559. else if (obj_priv->ring == &dev_priv->bsd_ring)
  2560. dev_priv->flush_rings |= FLUSH_BSD_RING;
  2561. }
  2562. dev->invalidate_domains |= invalidate_domains;
  2563. dev->flush_domains |= flush_domains;
  2564. #if WATCH_BUF
  2565. DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
  2566. __func__,
  2567. obj->read_domains, obj->write_domain,
  2568. dev->invalidate_domains, dev->flush_domains);
  2569. #endif
  2570. trace_i915_gem_object_change_domain(obj,
  2571. old_read_domains,
  2572. obj->write_domain);
  2573. }
  2574. /**
  2575. * Moves the object from a partially CPU read to a full one.
  2576. *
  2577. * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
  2578. * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  2579. */
  2580. static void
  2581. i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  2582. {
  2583. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2584. if (!obj_priv->page_cpu_valid)
  2585. return;
  2586. /* If we're partially in the CPU read domain, finish moving it in.
  2587. */
  2588. if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
  2589. int i;
  2590. for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
  2591. if (obj_priv->page_cpu_valid[i])
  2592. continue;
  2593. drm_clflush_pages(obj_priv->pages + i, 1);
  2594. }
  2595. }
  2596. /* Free the page_cpu_valid mappings which are now stale, whether
  2597. * or not we've got I915_GEM_DOMAIN_CPU.
  2598. */
  2599. kfree(obj_priv->page_cpu_valid);
  2600. obj_priv->page_cpu_valid = NULL;
  2601. }
  2602. /**
  2603. * Set the CPU read domain on a range of the object.
  2604. *
  2605. * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
  2606. * not entirely valid. The page_cpu_valid member of the object flags which
  2607. * pages have been flushed, and will be respected by
  2608. * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
  2609. * of the whole object.
  2610. *
  2611. * This function returns when the move is complete, including waiting on
  2612. * flushes to occur.
  2613. */
  2614. static int
  2615. i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  2616. uint64_t offset, uint64_t size)
  2617. {
  2618. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2619. uint32_t old_read_domains;
  2620. int i, ret;
  2621. if (offset == 0 && size == obj->size)
  2622. return i915_gem_object_set_to_cpu_domain(obj, 0);
  2623. ret = i915_gem_object_flush_gpu_write_domain(obj);
  2624. if (ret)
  2625. return ret;
  2626. /* Wait on any GPU rendering and flushing to occur. */
  2627. ret = i915_gem_object_wait_rendering(obj);
  2628. if (ret != 0)
  2629. return ret;
  2630. i915_gem_object_flush_gtt_write_domain(obj);
  2631. /* If we're already fully in the CPU read domain, we're done. */
  2632. if (obj_priv->page_cpu_valid == NULL &&
  2633. (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
  2634. return 0;
  2635. /* Otherwise, create/clear the per-page CPU read domain flag if we're
  2636. * newly adding I915_GEM_DOMAIN_CPU
  2637. */
  2638. if (obj_priv->page_cpu_valid == NULL) {
  2639. obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
  2640. GFP_KERNEL);
  2641. if (obj_priv->page_cpu_valid == NULL)
  2642. return -ENOMEM;
  2643. } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
  2644. memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
  2645. /* Flush the cache on any pages that are still invalid from the CPU's
  2646. * perspective.
  2647. */
  2648. for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
  2649. i++) {
  2650. if (obj_priv->page_cpu_valid[i])
  2651. continue;
  2652. drm_clflush_pages(obj_priv->pages + i, 1);
  2653. obj_priv->page_cpu_valid[i] = 1;
  2654. }
  2655. /* It should now be out of any other write domains, and we can update
  2656. * the domain values for our changes.
  2657. */
  2658. BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  2659. old_read_domains = obj->read_domains;
  2660. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  2661. trace_i915_gem_object_change_domain(obj,
  2662. old_read_domains,
  2663. obj->write_domain);
  2664. return 0;
  2665. }
  2666. /**
  2667. * Pin an object to the GTT and evaluate the relocations landing in it.
  2668. */
  2669. static int
  2670. i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  2671. struct drm_file *file_priv,
  2672. struct drm_i915_gem_exec_object2 *entry,
  2673. struct drm_i915_gem_relocation_entry *relocs)
  2674. {
  2675. struct drm_device *dev = obj->dev;
  2676. drm_i915_private_t *dev_priv = dev->dev_private;
  2677. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  2678. int i, ret;
  2679. void __iomem *reloc_page;
  2680. bool need_fence;
  2681. need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  2682. obj_priv->tiling_mode != I915_TILING_NONE;
  2683. /* Check fence reg constraints and rebind if necessary */
  2684. if (need_fence &&
  2685. !i915_gem_object_fence_offset_ok(obj,
  2686. obj_priv->tiling_mode)) {
  2687. ret = i915_gem_object_unbind(obj);
  2688. if (ret)
  2689. return ret;
  2690. }
  2691. /* Choose the GTT offset for our buffer and put it there. */
  2692. ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
  2693. if (ret)
  2694. return ret;
  2695. /*
  2696. * Pre-965 chips need a fence register set up in order to
  2697. * properly handle blits to/from tiled surfaces.
  2698. */
  2699. if (need_fence) {
  2700. ret = i915_gem_object_get_fence_reg(obj);
  2701. if (ret != 0) {
  2702. i915_gem_object_unpin(obj);
  2703. return ret;
  2704. }
  2705. }
  2706. entry->offset = obj_priv->gtt_offset;
  2707. /* Apply the relocations, using the GTT aperture to avoid cache
  2708. * flushing requirements.
  2709. */
  2710. for (i = 0; i < entry->relocation_count; i++) {
  2711. struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
  2712. struct drm_gem_object *target_obj;
  2713. struct drm_i915_gem_object *target_obj_priv;
  2714. uint32_t reloc_val, reloc_offset;
  2715. uint32_t __iomem *reloc_entry;
  2716. target_obj = drm_gem_object_lookup(obj->dev, file_priv,
  2717. reloc->target_handle);
  2718. if (target_obj == NULL) {
  2719. i915_gem_object_unpin(obj);
  2720. return -ENOENT;
  2721. }
  2722. target_obj_priv = to_intel_bo(target_obj);
  2723. #if WATCH_RELOC
  2724. DRM_INFO("%s: obj %p offset %08x target %d "
  2725. "read %08x write %08x gtt %08x "
  2726. "presumed %08x delta %08x\n",
  2727. __func__,
  2728. obj,
  2729. (int) reloc->offset,
  2730. (int) reloc->target_handle,
  2731. (int) reloc->read_domains,
  2732. (int) reloc->write_domain,
  2733. (int) target_obj_priv->gtt_offset,
  2734. (int) reloc->presumed_offset,
  2735. reloc->delta);
  2736. #endif
  2737. /* The target buffer should have appeared before us in the
  2738. * exec_object list, so it should have a GTT space bound by now.
  2739. */
  2740. if (target_obj_priv->gtt_space == NULL) {
  2741. DRM_ERROR("No GTT space found for object %d\n",
  2742. reloc->target_handle);
  2743. drm_gem_object_unreference(target_obj);
  2744. i915_gem_object_unpin(obj);
  2745. return -EINVAL;
  2746. }
  2747. /* Validate that the target is in a valid r/w GPU domain */
  2748. if (reloc->write_domain & (reloc->write_domain - 1)) {
  2749. DRM_ERROR("reloc with multiple write domains: "
  2750. "obj %p target %d offset %d "
  2751. "read %08x write %08x",
  2752. obj, reloc->target_handle,
  2753. (int) reloc->offset,
  2754. reloc->read_domains,
  2755. reloc->write_domain);
  2756. return -EINVAL;
  2757. }
  2758. if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
  2759. reloc->read_domains & I915_GEM_DOMAIN_CPU) {
  2760. DRM_ERROR("reloc with read/write CPU domains: "
  2761. "obj %p target %d offset %d "
  2762. "read %08x write %08x",
  2763. obj, reloc->target_handle,
  2764. (int) reloc->offset,
  2765. reloc->read_domains,
  2766. reloc->write_domain);
  2767. drm_gem_object_unreference(target_obj);
  2768. i915_gem_object_unpin(obj);
  2769. return -EINVAL;
  2770. }
  2771. if (reloc->write_domain && target_obj->pending_write_domain &&
  2772. reloc->write_domain != target_obj->pending_write_domain) {
  2773. DRM_ERROR("Write domain conflict: "
  2774. "obj %p target %d offset %d "
  2775. "new %08x old %08x\n",
  2776. obj, reloc->target_handle,
  2777. (int) reloc->offset,
  2778. reloc->write_domain,
  2779. target_obj->pending_write_domain);
  2780. drm_gem_object_unreference(target_obj);
  2781. i915_gem_object_unpin(obj);
  2782. return -EINVAL;
  2783. }
  2784. target_obj->pending_read_domains |= reloc->read_domains;
  2785. target_obj->pending_write_domain |= reloc->write_domain;
  2786. /* If the relocation already has the right value in it, no
  2787. * more work needs to be done.
  2788. */
  2789. if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
  2790. drm_gem_object_unreference(target_obj);
  2791. continue;
  2792. }
  2793. /* Check that the relocation address is valid... */
  2794. if (reloc->offset > obj->size - 4) {
  2795. DRM_ERROR("Relocation beyond object bounds: "
  2796. "obj %p target %d offset %d size %d.\n",
  2797. obj, reloc->target_handle,
  2798. (int) reloc->offset, (int) obj->size);
  2799. drm_gem_object_unreference(target_obj);
  2800. i915_gem_object_unpin(obj);
  2801. return -EINVAL;
  2802. }
  2803. if (reloc->offset & 3) {
  2804. DRM_ERROR("Relocation not 4-byte aligned: "
  2805. "obj %p target %d offset %d.\n",
  2806. obj, reloc->target_handle,
  2807. (int) reloc->offset);
  2808. drm_gem_object_unreference(target_obj);
  2809. i915_gem_object_unpin(obj);
  2810. return -EINVAL;
  2811. }
  2812. /* and points to somewhere within the target object. */
  2813. if (reloc->delta >= target_obj->size) {
  2814. DRM_ERROR("Relocation beyond target object bounds: "
  2815. "obj %p target %d delta %d size %d.\n",
  2816. obj, reloc->target_handle,
  2817. (int) reloc->delta, (int) target_obj->size);
  2818. drm_gem_object_unreference(target_obj);
  2819. i915_gem_object_unpin(obj);
  2820. return -EINVAL;
  2821. }
  2822. ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  2823. if (ret != 0) {
  2824. drm_gem_object_unreference(target_obj);
  2825. i915_gem_object_unpin(obj);
  2826. return -EINVAL;
  2827. }
  2828. /* Map the page containing the relocation we're going to
  2829. * perform.
  2830. */
  2831. reloc_offset = obj_priv->gtt_offset + reloc->offset;
  2832. reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  2833. (reloc_offset &
  2834. ~(PAGE_SIZE - 1)),
  2835. KM_USER0);
  2836. reloc_entry = (uint32_t __iomem *)(reloc_page +
  2837. (reloc_offset & (PAGE_SIZE - 1)));
  2838. reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  2839. #if WATCH_BUF
  2840. DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
  2841. obj, (unsigned int) reloc->offset,
  2842. readl(reloc_entry), reloc_val);
  2843. #endif
  2844. writel(reloc_val, reloc_entry);
  2845. io_mapping_unmap_atomic(reloc_page, KM_USER0);
  2846. /* The updated presumed offset for this entry will be
  2847. * copied back out to the user.
  2848. */
  2849. reloc->presumed_offset = target_obj_priv->gtt_offset;
  2850. drm_gem_object_unreference(target_obj);
  2851. }
  2852. #if WATCH_BUF
  2853. if (0)
  2854. i915_gem_dump_object(obj, 128, __func__, ~0);
  2855. #endif
  2856. return 0;
  2857. }
  2858. /* Throttle our rendering by waiting until the ring has completed our requests
  2859. * emitted over 20 msec ago.
  2860. *
  2861. * Note that if we were to use the current jiffies each time around the loop,
  2862. * we wouldn't escape the function with any frames outstanding if the time to
  2863. * render a frame was over 20ms.
  2864. *
  2865. * This should get us reasonable parallelism between CPU and GPU but also
  2866. * relatively low latency when blocking on a particular request to finish.
  2867. */
  2868. static int
  2869. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
  2870. {
  2871. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  2872. int ret = 0;
  2873. unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  2874. mutex_lock(&dev->struct_mutex);
  2875. while (!list_empty(&i915_file_priv->mm.request_list)) {
  2876. struct drm_i915_gem_request *request;
  2877. request = list_first_entry(&i915_file_priv->mm.request_list,
  2878. struct drm_i915_gem_request,
  2879. client_list);
  2880. if (time_after_eq(request->emitted_jiffies, recent_enough))
  2881. break;
  2882. ret = i915_wait_request(dev, request->seqno, request->ring);
  2883. if (ret != 0)
  2884. break;
  2885. }
  2886. mutex_unlock(&dev->struct_mutex);
  2887. return ret;
  2888. }
  2889. static int
  2890. i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
  2891. uint32_t buffer_count,
  2892. struct drm_i915_gem_relocation_entry **relocs)
  2893. {
  2894. uint32_t reloc_count = 0, reloc_index = 0, i;
  2895. int ret;
  2896. *relocs = NULL;
  2897. for (i = 0; i < buffer_count; i++) {
  2898. if (reloc_count + exec_list[i].relocation_count < reloc_count)
  2899. return -EINVAL;
  2900. reloc_count += exec_list[i].relocation_count;
  2901. }
  2902. *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
  2903. if (*relocs == NULL) {
  2904. DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
  2905. return -ENOMEM;
  2906. }
  2907. for (i = 0; i < buffer_count; i++) {
  2908. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2909. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2910. ret = copy_from_user(&(*relocs)[reloc_index],
  2911. user_relocs,
  2912. exec_list[i].relocation_count *
  2913. sizeof(**relocs));
  2914. if (ret != 0) {
  2915. drm_free_large(*relocs);
  2916. *relocs = NULL;
  2917. return -EFAULT;
  2918. }
  2919. reloc_index += exec_list[i].relocation_count;
  2920. }
  2921. return 0;
  2922. }
  2923. static int
  2924. i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
  2925. uint32_t buffer_count,
  2926. struct drm_i915_gem_relocation_entry *relocs)
  2927. {
  2928. uint32_t reloc_count = 0, i;
  2929. int ret = 0;
  2930. if (relocs == NULL)
  2931. return 0;
  2932. for (i = 0; i < buffer_count; i++) {
  2933. struct drm_i915_gem_relocation_entry __user *user_relocs;
  2934. int unwritten;
  2935. user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
  2936. unwritten = copy_to_user(user_relocs,
  2937. &relocs[reloc_count],
  2938. exec_list[i].relocation_count *
  2939. sizeof(*relocs));
  2940. if (unwritten) {
  2941. ret = -EFAULT;
  2942. goto err;
  2943. }
  2944. reloc_count += exec_list[i].relocation_count;
  2945. }
  2946. err:
  2947. drm_free_large(relocs);
  2948. return ret;
  2949. }
  2950. static int
  2951. i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
  2952. uint64_t exec_offset)
  2953. {
  2954. uint32_t exec_start, exec_len;
  2955. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  2956. exec_len = (uint32_t) exec->batch_len;
  2957. if ((exec_start | exec_len) & 0x7)
  2958. return -EINVAL;
  2959. if (!exec_start)
  2960. return -EINVAL;
  2961. return 0;
  2962. }
  2963. static int
  2964. i915_gem_wait_for_pending_flip(struct drm_device *dev,
  2965. struct drm_gem_object **object_list,
  2966. int count)
  2967. {
  2968. drm_i915_private_t *dev_priv = dev->dev_private;
  2969. struct drm_i915_gem_object *obj_priv;
  2970. DEFINE_WAIT(wait);
  2971. int i, ret = 0;
  2972. for (;;) {
  2973. prepare_to_wait(&dev_priv->pending_flip_queue,
  2974. &wait, TASK_INTERRUPTIBLE);
  2975. for (i = 0; i < count; i++) {
  2976. obj_priv = to_intel_bo(object_list[i]);
  2977. if (atomic_read(&obj_priv->pending_flip) > 0)
  2978. break;
  2979. }
  2980. if (i == count)
  2981. break;
  2982. if (!signal_pending(current)) {
  2983. mutex_unlock(&dev->struct_mutex);
  2984. schedule();
  2985. mutex_lock(&dev->struct_mutex);
  2986. continue;
  2987. }
  2988. ret = -ERESTARTSYS;
  2989. break;
  2990. }
  2991. finish_wait(&dev_priv->pending_flip_queue, &wait);
  2992. return ret;
  2993. }
  2994. int
  2995. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  2996. struct drm_file *file_priv,
  2997. struct drm_i915_gem_execbuffer2 *args,
  2998. struct drm_i915_gem_exec_object2 *exec_list)
  2999. {
  3000. drm_i915_private_t *dev_priv = dev->dev_private;
  3001. struct drm_gem_object **object_list = NULL;
  3002. struct drm_gem_object *batch_obj;
  3003. struct drm_i915_gem_object *obj_priv;
  3004. struct drm_clip_rect *cliprects = NULL;
  3005. struct drm_i915_gem_relocation_entry *relocs = NULL;
  3006. int ret = 0, ret2, i, pinned = 0;
  3007. uint64_t exec_offset;
  3008. uint32_t seqno, flush_domains, reloc_index;
  3009. int pin_tries, flips;
  3010. struct intel_ring_buffer *ring = NULL;
  3011. #if WATCH_EXEC
  3012. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3013. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3014. #endif
  3015. if (args->flags & I915_EXEC_BSD) {
  3016. if (!HAS_BSD(dev)) {
  3017. DRM_ERROR("execbuf with wrong flag\n");
  3018. return -EINVAL;
  3019. }
  3020. ring = &dev_priv->bsd_ring;
  3021. } else {
  3022. ring = &dev_priv->render_ring;
  3023. }
  3024. if (args->buffer_count < 1) {
  3025. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3026. return -EINVAL;
  3027. }
  3028. object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
  3029. if (object_list == NULL) {
  3030. DRM_ERROR("Failed to allocate object list for %d buffers\n",
  3031. args->buffer_count);
  3032. ret = -ENOMEM;
  3033. goto pre_mutex_err;
  3034. }
  3035. if (args->num_cliprects != 0) {
  3036. cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
  3037. GFP_KERNEL);
  3038. if (cliprects == NULL) {
  3039. ret = -ENOMEM;
  3040. goto pre_mutex_err;
  3041. }
  3042. ret = copy_from_user(cliprects,
  3043. (struct drm_clip_rect __user *)
  3044. (uintptr_t) args->cliprects_ptr,
  3045. sizeof(*cliprects) * args->num_cliprects);
  3046. if (ret != 0) {
  3047. DRM_ERROR("copy %d cliprects failed: %d\n",
  3048. args->num_cliprects, ret);
  3049. goto pre_mutex_err;
  3050. }
  3051. }
  3052. ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
  3053. &relocs);
  3054. if (ret != 0)
  3055. goto pre_mutex_err;
  3056. mutex_lock(&dev->struct_mutex);
  3057. i915_verify_inactive(dev, __FILE__, __LINE__);
  3058. if (atomic_read(&dev_priv->mm.wedged)) {
  3059. mutex_unlock(&dev->struct_mutex);
  3060. ret = -EIO;
  3061. goto pre_mutex_err;
  3062. }
  3063. if (dev_priv->mm.suspended) {
  3064. mutex_unlock(&dev->struct_mutex);
  3065. ret = -EBUSY;
  3066. goto pre_mutex_err;
  3067. }
  3068. /* Look up object handles */
  3069. flips = 0;
  3070. for (i = 0; i < args->buffer_count; i++) {
  3071. object_list[i] = drm_gem_object_lookup(dev, file_priv,
  3072. exec_list[i].handle);
  3073. if (object_list[i] == NULL) {
  3074. DRM_ERROR("Invalid object handle %d at index %d\n",
  3075. exec_list[i].handle, i);
  3076. /* prevent error path from reading uninitialized data */
  3077. args->buffer_count = i + 1;
  3078. ret = -ENOENT;
  3079. goto err;
  3080. }
  3081. obj_priv = to_intel_bo(object_list[i]);
  3082. if (obj_priv->in_execbuffer) {
  3083. DRM_ERROR("Object %p appears more than once in object list\n",
  3084. object_list[i]);
  3085. /* prevent error path from reading uninitialized data */
  3086. args->buffer_count = i + 1;
  3087. ret = -EINVAL;
  3088. goto err;
  3089. }
  3090. obj_priv->in_execbuffer = true;
  3091. flips += atomic_read(&obj_priv->pending_flip);
  3092. }
  3093. if (flips > 0) {
  3094. ret = i915_gem_wait_for_pending_flip(dev, object_list,
  3095. args->buffer_count);
  3096. if (ret)
  3097. goto err;
  3098. }
  3099. /* Pin and relocate */
  3100. for (pin_tries = 0; ; pin_tries++) {
  3101. ret = 0;
  3102. reloc_index = 0;
  3103. for (i = 0; i < args->buffer_count; i++) {
  3104. object_list[i]->pending_read_domains = 0;
  3105. object_list[i]->pending_write_domain = 0;
  3106. ret = i915_gem_object_pin_and_relocate(object_list[i],
  3107. file_priv,
  3108. &exec_list[i],
  3109. &relocs[reloc_index]);
  3110. if (ret)
  3111. break;
  3112. pinned = i + 1;
  3113. reloc_index += exec_list[i].relocation_count;
  3114. }
  3115. /* success */
  3116. if (ret == 0)
  3117. break;
  3118. /* error other than GTT full, or we've already tried again */
  3119. if (ret != -ENOSPC || pin_tries >= 1) {
  3120. if (ret != -ERESTARTSYS) {
  3121. unsigned long long total_size = 0;
  3122. int num_fences = 0;
  3123. for (i = 0; i < args->buffer_count; i++) {
  3124. obj_priv = to_intel_bo(object_list[i]);
  3125. total_size += object_list[i]->size;
  3126. num_fences +=
  3127. exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
  3128. obj_priv->tiling_mode != I915_TILING_NONE;
  3129. }
  3130. DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
  3131. pinned+1, args->buffer_count,
  3132. total_size, num_fences,
  3133. ret);
  3134. DRM_ERROR("%d objects [%d pinned], "
  3135. "%d object bytes [%d pinned], "
  3136. "%d/%d gtt bytes\n",
  3137. atomic_read(&dev->object_count),
  3138. atomic_read(&dev->pin_count),
  3139. atomic_read(&dev->object_memory),
  3140. atomic_read(&dev->pin_memory),
  3141. atomic_read(&dev->gtt_memory),
  3142. dev->gtt_total);
  3143. }
  3144. goto err;
  3145. }
  3146. /* unpin all of our buffers */
  3147. for (i = 0; i < pinned; i++)
  3148. i915_gem_object_unpin(object_list[i]);
  3149. pinned = 0;
  3150. /* evict everyone we can from the aperture */
  3151. ret = i915_gem_evict_everything(dev);
  3152. if (ret && ret != -ENOSPC)
  3153. goto err;
  3154. }
  3155. /* Set the pending read domains for the batch buffer to COMMAND */
  3156. batch_obj = object_list[args->buffer_count-1];
  3157. if (batch_obj->pending_write_domain) {
  3158. DRM_ERROR("Attempting to use self-modifying batch buffer\n");
  3159. ret = -EINVAL;
  3160. goto err;
  3161. }
  3162. batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  3163. /* Sanity check the batch buffer, prior to moving objects */
  3164. exec_offset = exec_list[args->buffer_count - 1].offset;
  3165. ret = i915_gem_check_execbuffer (args, exec_offset);
  3166. if (ret != 0) {
  3167. DRM_ERROR("execbuf with invalid offset/length\n");
  3168. goto err;
  3169. }
  3170. i915_verify_inactive(dev, __FILE__, __LINE__);
  3171. /* Zero the global flush/invalidate flags. These
  3172. * will be modified as new domains are computed
  3173. * for each object
  3174. */
  3175. dev->invalidate_domains = 0;
  3176. dev->flush_domains = 0;
  3177. dev_priv->flush_rings = 0;
  3178. for (i = 0; i < args->buffer_count; i++) {
  3179. struct drm_gem_object *obj = object_list[i];
  3180. /* Compute new gpu domains and update invalidate/flush */
  3181. i915_gem_object_set_to_gpu_domain(obj);
  3182. }
  3183. i915_verify_inactive(dev, __FILE__, __LINE__);
  3184. if (dev->invalidate_domains | dev->flush_domains) {
  3185. #if WATCH_EXEC
  3186. DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
  3187. __func__,
  3188. dev->invalidate_domains,
  3189. dev->flush_domains);
  3190. #endif
  3191. i915_gem_flush(dev,
  3192. dev->invalidate_domains,
  3193. dev->flush_domains);
  3194. if (dev_priv->flush_rings & FLUSH_RENDER_RING)
  3195. (void)i915_add_request(dev, file_priv,
  3196. dev->flush_domains,
  3197. &dev_priv->render_ring);
  3198. if (dev_priv->flush_rings & FLUSH_BSD_RING)
  3199. (void)i915_add_request(dev, file_priv,
  3200. dev->flush_domains,
  3201. &dev_priv->bsd_ring);
  3202. }
  3203. for (i = 0; i < args->buffer_count; i++) {
  3204. struct drm_gem_object *obj = object_list[i];
  3205. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3206. uint32_t old_write_domain = obj->write_domain;
  3207. obj->write_domain = obj->pending_write_domain;
  3208. if (obj->write_domain)
  3209. list_move_tail(&obj_priv->gpu_write_list,
  3210. &dev_priv->mm.gpu_write_list);
  3211. else
  3212. list_del_init(&obj_priv->gpu_write_list);
  3213. trace_i915_gem_object_change_domain(obj,
  3214. obj->read_domains,
  3215. old_write_domain);
  3216. }
  3217. i915_verify_inactive(dev, __FILE__, __LINE__);
  3218. #if WATCH_COHERENCY
  3219. for (i = 0; i < args->buffer_count; i++) {
  3220. i915_gem_object_check_coherency(object_list[i],
  3221. exec_list[i].handle);
  3222. }
  3223. #endif
  3224. #if WATCH_EXEC
  3225. i915_gem_dump_object(batch_obj,
  3226. args->batch_len,
  3227. __func__,
  3228. ~0);
  3229. #endif
  3230. /* Exec the batchbuffer */
  3231. ret = ring->dispatch_gem_execbuffer(dev, ring, args,
  3232. cliprects, exec_offset);
  3233. if (ret) {
  3234. DRM_ERROR("dispatch failed %d\n", ret);
  3235. goto err;
  3236. }
  3237. /*
  3238. * Ensure that the commands in the batch buffer are
  3239. * finished before the interrupt fires
  3240. */
  3241. flush_domains = i915_retire_commands(dev, ring);
  3242. i915_verify_inactive(dev, __FILE__, __LINE__);
  3243. /*
  3244. * Get a seqno representing the execution of the current buffer,
  3245. * which we can wait on. We would like to mitigate these interrupts,
  3246. * likely by only creating seqnos occasionally (so that we have
  3247. * *some* interrupts representing completion of buffers that we can
  3248. * wait on when trying to clear up gtt space).
  3249. */
  3250. seqno = i915_add_request(dev, file_priv, flush_domains, ring);
  3251. BUG_ON(seqno == 0);
  3252. for (i = 0; i < args->buffer_count; i++) {
  3253. struct drm_gem_object *obj = object_list[i];
  3254. obj_priv = to_intel_bo(obj);
  3255. i915_gem_object_move_to_active(obj, seqno, ring);
  3256. #if WATCH_LRU
  3257. DRM_INFO("%s: move to exec list %p\n", __func__, obj);
  3258. #endif
  3259. }
  3260. #if WATCH_LRU
  3261. i915_dump_lru(dev, __func__);
  3262. #endif
  3263. i915_verify_inactive(dev, __FILE__, __LINE__);
  3264. err:
  3265. for (i = 0; i < pinned; i++)
  3266. i915_gem_object_unpin(object_list[i]);
  3267. for (i = 0; i < args->buffer_count; i++) {
  3268. if (object_list[i]) {
  3269. obj_priv = to_intel_bo(object_list[i]);
  3270. obj_priv->in_execbuffer = false;
  3271. }
  3272. drm_gem_object_unreference(object_list[i]);
  3273. }
  3274. mutex_unlock(&dev->struct_mutex);
  3275. pre_mutex_err:
  3276. /* Copy the updated relocations out regardless of current error
  3277. * state. Failure to update the relocs would mean that the next
  3278. * time userland calls execbuf, it would do so with presumed offset
  3279. * state that didn't match the actual object state.
  3280. */
  3281. ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
  3282. relocs);
  3283. if (ret2 != 0) {
  3284. DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
  3285. if (ret == 0)
  3286. ret = ret2;
  3287. }
  3288. drm_free_large(object_list);
  3289. kfree(cliprects);
  3290. return ret;
  3291. }
  3292. /*
  3293. * Legacy execbuffer just creates an exec2 list from the original exec object
  3294. * list array and passes it to the real function.
  3295. */
  3296. int
  3297. i915_gem_execbuffer(struct drm_device *dev, void *data,
  3298. struct drm_file *file_priv)
  3299. {
  3300. struct drm_i915_gem_execbuffer *args = data;
  3301. struct drm_i915_gem_execbuffer2 exec2;
  3302. struct drm_i915_gem_exec_object *exec_list = NULL;
  3303. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3304. int ret, i;
  3305. #if WATCH_EXEC
  3306. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3307. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3308. #endif
  3309. if (args->buffer_count < 1) {
  3310. DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
  3311. return -EINVAL;
  3312. }
  3313. /* Copy in the exec list from userland */
  3314. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  3315. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3316. if (exec_list == NULL || exec2_list == NULL) {
  3317. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3318. args->buffer_count);
  3319. drm_free_large(exec_list);
  3320. drm_free_large(exec2_list);
  3321. return -ENOMEM;
  3322. }
  3323. ret = copy_from_user(exec_list,
  3324. (struct drm_i915_relocation_entry __user *)
  3325. (uintptr_t) args->buffers_ptr,
  3326. sizeof(*exec_list) * args->buffer_count);
  3327. if (ret != 0) {
  3328. DRM_ERROR("copy %d exec entries failed %d\n",
  3329. args->buffer_count, ret);
  3330. drm_free_large(exec_list);
  3331. drm_free_large(exec2_list);
  3332. return -EFAULT;
  3333. }
  3334. for (i = 0; i < args->buffer_count; i++) {
  3335. exec2_list[i].handle = exec_list[i].handle;
  3336. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  3337. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  3338. exec2_list[i].alignment = exec_list[i].alignment;
  3339. exec2_list[i].offset = exec_list[i].offset;
  3340. if (!IS_I965G(dev))
  3341. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  3342. else
  3343. exec2_list[i].flags = 0;
  3344. }
  3345. exec2.buffers_ptr = args->buffers_ptr;
  3346. exec2.buffer_count = args->buffer_count;
  3347. exec2.batch_start_offset = args->batch_start_offset;
  3348. exec2.batch_len = args->batch_len;
  3349. exec2.DR1 = args->DR1;
  3350. exec2.DR4 = args->DR4;
  3351. exec2.num_cliprects = args->num_cliprects;
  3352. exec2.cliprects_ptr = args->cliprects_ptr;
  3353. exec2.flags = I915_EXEC_RENDER;
  3354. ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
  3355. if (!ret) {
  3356. /* Copy the new buffer offsets back to the user's exec list. */
  3357. for (i = 0; i < args->buffer_count; i++)
  3358. exec_list[i].offset = exec2_list[i].offset;
  3359. /* ... and back out to userspace */
  3360. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3361. (uintptr_t) args->buffers_ptr,
  3362. exec_list,
  3363. sizeof(*exec_list) * args->buffer_count);
  3364. if (ret) {
  3365. ret = -EFAULT;
  3366. DRM_ERROR("failed to copy %d exec entries "
  3367. "back to user (%d)\n",
  3368. args->buffer_count, ret);
  3369. }
  3370. }
  3371. drm_free_large(exec_list);
  3372. drm_free_large(exec2_list);
  3373. return ret;
  3374. }
  3375. int
  3376. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  3377. struct drm_file *file_priv)
  3378. {
  3379. struct drm_i915_gem_execbuffer2 *args = data;
  3380. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  3381. int ret;
  3382. #if WATCH_EXEC
  3383. DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
  3384. (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  3385. #endif
  3386. if (args->buffer_count < 1) {
  3387. DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
  3388. return -EINVAL;
  3389. }
  3390. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  3391. if (exec2_list == NULL) {
  3392. DRM_ERROR("Failed to allocate exec list for %d buffers\n",
  3393. args->buffer_count);
  3394. return -ENOMEM;
  3395. }
  3396. ret = copy_from_user(exec2_list,
  3397. (struct drm_i915_relocation_entry __user *)
  3398. (uintptr_t) args->buffers_ptr,
  3399. sizeof(*exec2_list) * args->buffer_count);
  3400. if (ret != 0) {
  3401. DRM_ERROR("copy %d exec entries failed %d\n",
  3402. args->buffer_count, ret);
  3403. drm_free_large(exec2_list);
  3404. return -EFAULT;
  3405. }
  3406. ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
  3407. if (!ret) {
  3408. /* Copy the new buffer offsets back to the user's exec list. */
  3409. ret = copy_to_user((struct drm_i915_relocation_entry __user *)
  3410. (uintptr_t) args->buffers_ptr,
  3411. exec2_list,
  3412. sizeof(*exec2_list) * args->buffer_count);
  3413. if (ret) {
  3414. ret = -EFAULT;
  3415. DRM_ERROR("failed to copy %d exec entries "
  3416. "back to user (%d)\n",
  3417. args->buffer_count, ret);
  3418. }
  3419. }
  3420. drm_free_large(exec2_list);
  3421. return ret;
  3422. }
  3423. int
  3424. i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  3425. {
  3426. struct drm_device *dev = obj->dev;
  3427. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3428. int ret;
  3429. BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
  3430. i915_verify_inactive(dev, __FILE__, __LINE__);
  3431. if (obj_priv->gtt_space != NULL) {
  3432. if (alignment == 0)
  3433. alignment = i915_gem_get_gtt_alignment(obj);
  3434. if (obj_priv->gtt_offset & (alignment - 1)) {
  3435. WARN(obj_priv->pin_count,
  3436. "bo is already pinned with incorrect alignment:"
  3437. " offset=%x, req.alignment=%x\n",
  3438. obj_priv->gtt_offset, alignment);
  3439. ret = i915_gem_object_unbind(obj);
  3440. if (ret)
  3441. return ret;
  3442. }
  3443. }
  3444. if (obj_priv->gtt_space == NULL) {
  3445. ret = i915_gem_object_bind_to_gtt(obj, alignment);
  3446. if (ret)
  3447. return ret;
  3448. }
  3449. obj_priv->pin_count++;
  3450. /* If the object is not active and not pending a flush,
  3451. * remove it from the inactive list
  3452. */
  3453. if (obj_priv->pin_count == 1) {
  3454. atomic_inc(&dev->pin_count);
  3455. atomic_add(obj->size, &dev->pin_memory);
  3456. if (!obj_priv->active &&
  3457. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  3458. list_del_init(&obj_priv->list);
  3459. }
  3460. i915_verify_inactive(dev, __FILE__, __LINE__);
  3461. return 0;
  3462. }
  3463. void
  3464. i915_gem_object_unpin(struct drm_gem_object *obj)
  3465. {
  3466. struct drm_device *dev = obj->dev;
  3467. drm_i915_private_t *dev_priv = dev->dev_private;
  3468. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3469. i915_verify_inactive(dev, __FILE__, __LINE__);
  3470. obj_priv->pin_count--;
  3471. BUG_ON(obj_priv->pin_count < 0);
  3472. BUG_ON(obj_priv->gtt_space == NULL);
  3473. /* If the object is no longer pinned, and is
  3474. * neither active nor being flushed, then stick it on
  3475. * the inactive list
  3476. */
  3477. if (obj_priv->pin_count == 0) {
  3478. if (!obj_priv->active &&
  3479. (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
  3480. list_move_tail(&obj_priv->list,
  3481. &dev_priv->mm.inactive_list);
  3482. atomic_dec(&dev->pin_count);
  3483. atomic_sub(obj->size, &dev->pin_memory);
  3484. }
  3485. i915_verify_inactive(dev, __FILE__, __LINE__);
  3486. }
  3487. int
  3488. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3489. struct drm_file *file_priv)
  3490. {
  3491. struct drm_i915_gem_pin *args = data;
  3492. struct drm_gem_object *obj;
  3493. struct drm_i915_gem_object *obj_priv;
  3494. int ret;
  3495. mutex_lock(&dev->struct_mutex);
  3496. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3497. if (obj == NULL) {
  3498. DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
  3499. args->handle);
  3500. mutex_unlock(&dev->struct_mutex);
  3501. return -ENOENT;
  3502. }
  3503. obj_priv = to_intel_bo(obj);
  3504. if (obj_priv->madv != I915_MADV_WILLNEED) {
  3505. DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3506. drm_gem_object_unreference(obj);
  3507. mutex_unlock(&dev->struct_mutex);
  3508. return -EINVAL;
  3509. }
  3510. if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
  3511. DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3512. args->handle);
  3513. drm_gem_object_unreference(obj);
  3514. mutex_unlock(&dev->struct_mutex);
  3515. return -EINVAL;
  3516. }
  3517. obj_priv->user_pin_count++;
  3518. obj_priv->pin_filp = file_priv;
  3519. if (obj_priv->user_pin_count == 1) {
  3520. ret = i915_gem_object_pin(obj, args->alignment);
  3521. if (ret != 0) {
  3522. drm_gem_object_unreference(obj);
  3523. mutex_unlock(&dev->struct_mutex);
  3524. return ret;
  3525. }
  3526. }
  3527. /* XXX - flush the CPU caches for pinned objects
  3528. * as the X server doesn't manage domains yet
  3529. */
  3530. i915_gem_object_flush_cpu_write_domain(obj);
  3531. args->offset = obj_priv->gtt_offset;
  3532. drm_gem_object_unreference(obj);
  3533. mutex_unlock(&dev->struct_mutex);
  3534. return 0;
  3535. }
  3536. int
  3537. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3538. struct drm_file *file_priv)
  3539. {
  3540. struct drm_i915_gem_pin *args = data;
  3541. struct drm_gem_object *obj;
  3542. struct drm_i915_gem_object *obj_priv;
  3543. mutex_lock(&dev->struct_mutex);
  3544. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3545. if (obj == NULL) {
  3546. DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
  3547. args->handle);
  3548. mutex_unlock(&dev->struct_mutex);
  3549. return -ENOENT;
  3550. }
  3551. obj_priv = to_intel_bo(obj);
  3552. if (obj_priv->pin_filp != file_priv) {
  3553. DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3554. args->handle);
  3555. drm_gem_object_unreference(obj);
  3556. mutex_unlock(&dev->struct_mutex);
  3557. return -EINVAL;
  3558. }
  3559. obj_priv->user_pin_count--;
  3560. if (obj_priv->user_pin_count == 0) {
  3561. obj_priv->pin_filp = NULL;
  3562. i915_gem_object_unpin(obj);
  3563. }
  3564. drm_gem_object_unreference(obj);
  3565. mutex_unlock(&dev->struct_mutex);
  3566. return 0;
  3567. }
  3568. int
  3569. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3570. struct drm_file *file_priv)
  3571. {
  3572. struct drm_i915_gem_busy *args = data;
  3573. struct drm_gem_object *obj;
  3574. struct drm_i915_gem_object *obj_priv;
  3575. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3576. if (obj == NULL) {
  3577. DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
  3578. args->handle);
  3579. return -ENOENT;
  3580. }
  3581. mutex_lock(&dev->struct_mutex);
  3582. /* Count all active objects as busy, even if they are currently not used
  3583. * by the gpu. Users of this interface expect objects to eventually
  3584. * become non-busy without any further actions, therefore emit any
  3585. * necessary flushes here.
  3586. */
  3587. obj_priv = to_intel_bo(obj);
  3588. args->busy = obj_priv->active;
  3589. if (args->busy) {
  3590. /* Unconditionally flush objects, even when the gpu still uses this
  3591. * object. Userspace calling this function indicates that it wants to
  3592. * use this buffer rather sooner than later, so issuing the required
  3593. * flush earlier is beneficial.
  3594. */
  3595. if (obj->write_domain) {
  3596. i915_gem_flush(dev, 0, obj->write_domain);
  3597. (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
  3598. }
  3599. /* Update the active list for the hardware's current position.
  3600. * Otherwise this only updates on a delayed timer or when irqs
  3601. * are actually unmasked, and our working set ends up being
  3602. * larger than required.
  3603. */
  3604. i915_gem_retire_requests_ring(dev, obj_priv->ring);
  3605. args->busy = obj_priv->active;
  3606. }
  3607. drm_gem_object_unreference(obj);
  3608. mutex_unlock(&dev->struct_mutex);
  3609. return 0;
  3610. }
  3611. int
  3612. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3613. struct drm_file *file_priv)
  3614. {
  3615. return i915_gem_ring_throttle(dev, file_priv);
  3616. }
  3617. int
  3618. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3619. struct drm_file *file_priv)
  3620. {
  3621. struct drm_i915_gem_madvise *args = data;
  3622. struct drm_gem_object *obj;
  3623. struct drm_i915_gem_object *obj_priv;
  3624. switch (args->madv) {
  3625. case I915_MADV_DONTNEED:
  3626. case I915_MADV_WILLNEED:
  3627. break;
  3628. default:
  3629. return -EINVAL;
  3630. }
  3631. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  3632. if (obj == NULL) {
  3633. DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
  3634. args->handle);
  3635. return -ENOENT;
  3636. }
  3637. mutex_lock(&dev->struct_mutex);
  3638. obj_priv = to_intel_bo(obj);
  3639. if (obj_priv->pin_count) {
  3640. drm_gem_object_unreference(obj);
  3641. mutex_unlock(&dev->struct_mutex);
  3642. DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
  3643. return -EINVAL;
  3644. }
  3645. if (obj_priv->madv != __I915_MADV_PURGED)
  3646. obj_priv->madv = args->madv;
  3647. /* if the object is no longer bound, discard its backing storage */
  3648. if (i915_gem_object_is_purgeable(obj_priv) &&
  3649. obj_priv->gtt_space == NULL)
  3650. i915_gem_object_truncate(obj);
  3651. args->retained = obj_priv->madv != __I915_MADV_PURGED;
  3652. drm_gem_object_unreference(obj);
  3653. mutex_unlock(&dev->struct_mutex);
  3654. return 0;
  3655. }
  3656. struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
  3657. size_t size)
  3658. {
  3659. struct drm_i915_gem_object *obj;
  3660. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  3661. if (obj == NULL)
  3662. return NULL;
  3663. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3664. kfree(obj);
  3665. return NULL;
  3666. }
  3667. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3668. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3669. obj->agp_type = AGP_USER_MEMORY;
  3670. obj->base.driver_private = NULL;
  3671. obj->fence_reg = I915_FENCE_REG_NONE;
  3672. INIT_LIST_HEAD(&obj->list);
  3673. INIT_LIST_HEAD(&obj->gpu_write_list);
  3674. obj->madv = I915_MADV_WILLNEED;
  3675. trace_i915_gem_object_create(&obj->base);
  3676. return &obj->base;
  3677. }
  3678. int i915_gem_init_object(struct drm_gem_object *obj)
  3679. {
  3680. BUG();
  3681. return 0;
  3682. }
  3683. static void i915_gem_free_object_tail(struct drm_gem_object *obj)
  3684. {
  3685. struct drm_device *dev = obj->dev;
  3686. drm_i915_private_t *dev_priv = dev->dev_private;
  3687. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3688. int ret;
  3689. ret = i915_gem_object_unbind(obj);
  3690. if (ret == -ERESTARTSYS) {
  3691. list_move(&obj_priv->list,
  3692. &dev_priv->mm.deferred_free_list);
  3693. return;
  3694. }
  3695. if (obj_priv->mmap_offset)
  3696. i915_gem_free_mmap_offset(obj);
  3697. drm_gem_object_release(obj);
  3698. kfree(obj_priv->page_cpu_valid);
  3699. kfree(obj_priv->bit_17);
  3700. kfree(obj_priv);
  3701. }
  3702. void i915_gem_free_object(struct drm_gem_object *obj)
  3703. {
  3704. struct drm_device *dev = obj->dev;
  3705. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  3706. trace_i915_gem_object_destroy(obj);
  3707. while (obj_priv->pin_count > 0)
  3708. i915_gem_object_unpin(obj);
  3709. if (obj_priv->phys_obj)
  3710. i915_gem_detach_phys_object(dev, obj);
  3711. i915_gem_free_object_tail(obj);
  3712. }
  3713. int
  3714. i915_gem_idle(struct drm_device *dev)
  3715. {
  3716. drm_i915_private_t *dev_priv = dev->dev_private;
  3717. int ret;
  3718. mutex_lock(&dev->struct_mutex);
  3719. if (dev_priv->mm.suspended ||
  3720. (dev_priv->render_ring.gem_object == NULL) ||
  3721. (HAS_BSD(dev) &&
  3722. dev_priv->bsd_ring.gem_object == NULL)) {
  3723. mutex_unlock(&dev->struct_mutex);
  3724. return 0;
  3725. }
  3726. ret = i915_gpu_idle(dev);
  3727. if (ret) {
  3728. mutex_unlock(&dev->struct_mutex);
  3729. return ret;
  3730. }
  3731. /* Under UMS, be paranoid and evict. */
  3732. if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
  3733. ret = i915_gem_evict_inactive(dev);
  3734. if (ret) {
  3735. mutex_unlock(&dev->struct_mutex);
  3736. return ret;
  3737. }
  3738. }
  3739. /* Hack! Don't let anybody do execbuf while we don't control the chip.
  3740. * We need to replace this with a semaphore, or something.
  3741. * And not confound mm.suspended!
  3742. */
  3743. dev_priv->mm.suspended = 1;
  3744. del_timer(&dev_priv->hangcheck_timer);
  3745. i915_kernel_lost_context(dev);
  3746. i915_gem_cleanup_ringbuffer(dev);
  3747. mutex_unlock(&dev->struct_mutex);
  3748. /* Cancel the retire work handler, which should be idle now. */
  3749. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3750. return 0;
  3751. }
  3752. /*
  3753. * 965+ support PIPE_CONTROL commands, which provide finer grained control
  3754. * over cache flushing.
  3755. */
  3756. static int
  3757. i915_gem_init_pipe_control(struct drm_device *dev)
  3758. {
  3759. drm_i915_private_t *dev_priv = dev->dev_private;
  3760. struct drm_gem_object *obj;
  3761. struct drm_i915_gem_object *obj_priv;
  3762. int ret;
  3763. obj = i915_gem_alloc_object(dev, 4096);
  3764. if (obj == NULL) {
  3765. DRM_ERROR("Failed to allocate seqno page\n");
  3766. ret = -ENOMEM;
  3767. goto err;
  3768. }
  3769. obj_priv = to_intel_bo(obj);
  3770. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  3771. ret = i915_gem_object_pin(obj, 4096);
  3772. if (ret)
  3773. goto err_unref;
  3774. dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
  3775. dev_priv->seqno_page = kmap(obj_priv->pages[0]);
  3776. if (dev_priv->seqno_page == NULL)
  3777. goto err_unpin;
  3778. dev_priv->seqno_obj = obj;
  3779. memset(dev_priv->seqno_page, 0, PAGE_SIZE);
  3780. return 0;
  3781. err_unpin:
  3782. i915_gem_object_unpin(obj);
  3783. err_unref:
  3784. drm_gem_object_unreference(obj);
  3785. err:
  3786. return ret;
  3787. }
  3788. static void
  3789. i915_gem_cleanup_pipe_control(struct drm_device *dev)
  3790. {
  3791. drm_i915_private_t *dev_priv = dev->dev_private;
  3792. struct drm_gem_object *obj;
  3793. struct drm_i915_gem_object *obj_priv;
  3794. obj = dev_priv->seqno_obj;
  3795. obj_priv = to_intel_bo(obj);
  3796. kunmap(obj_priv->pages[0]);
  3797. i915_gem_object_unpin(obj);
  3798. drm_gem_object_unreference(obj);
  3799. dev_priv->seqno_obj = NULL;
  3800. dev_priv->seqno_page = NULL;
  3801. }
  3802. int
  3803. i915_gem_init_ringbuffer(struct drm_device *dev)
  3804. {
  3805. drm_i915_private_t *dev_priv = dev->dev_private;
  3806. int ret;
  3807. dev_priv->render_ring = render_ring;
  3808. if (!I915_NEED_GFX_HWS(dev)) {
  3809. dev_priv->render_ring.status_page.page_addr
  3810. = dev_priv->status_page_dmah->vaddr;
  3811. memset(dev_priv->render_ring.status_page.page_addr,
  3812. 0, PAGE_SIZE);
  3813. }
  3814. if (HAS_PIPE_CONTROL(dev)) {
  3815. ret = i915_gem_init_pipe_control(dev);
  3816. if (ret)
  3817. return ret;
  3818. }
  3819. ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
  3820. if (ret)
  3821. goto cleanup_pipe_control;
  3822. if (HAS_BSD(dev)) {
  3823. dev_priv->bsd_ring = bsd_ring;
  3824. ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
  3825. if (ret)
  3826. goto cleanup_render_ring;
  3827. }
  3828. dev_priv->next_seqno = 1;
  3829. return 0;
  3830. cleanup_render_ring:
  3831. intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
  3832. cleanup_pipe_control:
  3833. if (HAS_PIPE_CONTROL(dev))
  3834. i915_gem_cleanup_pipe_control(dev);
  3835. return ret;
  3836. }
  3837. void
  3838. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3839. {
  3840. drm_i915_private_t *dev_priv = dev->dev_private;
  3841. intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
  3842. if (HAS_BSD(dev))
  3843. intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
  3844. if (HAS_PIPE_CONTROL(dev))
  3845. i915_gem_cleanup_pipe_control(dev);
  3846. }
  3847. int
  3848. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  3849. struct drm_file *file_priv)
  3850. {
  3851. drm_i915_private_t *dev_priv = dev->dev_private;
  3852. int ret;
  3853. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3854. return 0;
  3855. if (atomic_read(&dev_priv->mm.wedged)) {
  3856. DRM_ERROR("Reenabling wedged hardware, good luck\n");
  3857. atomic_set(&dev_priv->mm.wedged, 0);
  3858. }
  3859. mutex_lock(&dev->struct_mutex);
  3860. dev_priv->mm.suspended = 0;
  3861. ret = i915_gem_init_ringbuffer(dev);
  3862. if (ret != 0) {
  3863. mutex_unlock(&dev->struct_mutex);
  3864. return ret;
  3865. }
  3866. spin_lock(&dev_priv->mm.active_list_lock);
  3867. BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
  3868. BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
  3869. spin_unlock(&dev_priv->mm.active_list_lock);
  3870. BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
  3871. BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
  3872. BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
  3873. BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
  3874. mutex_unlock(&dev->struct_mutex);
  3875. ret = drm_irq_install(dev);
  3876. if (ret)
  3877. goto cleanup_ringbuffer;
  3878. return 0;
  3879. cleanup_ringbuffer:
  3880. mutex_lock(&dev->struct_mutex);
  3881. i915_gem_cleanup_ringbuffer(dev);
  3882. dev_priv->mm.suspended = 1;
  3883. mutex_unlock(&dev->struct_mutex);
  3884. return ret;
  3885. }
  3886. int
  3887. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  3888. struct drm_file *file_priv)
  3889. {
  3890. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3891. return 0;
  3892. drm_irq_uninstall(dev);
  3893. return i915_gem_idle(dev);
  3894. }
  3895. void
  3896. i915_gem_lastclose(struct drm_device *dev)
  3897. {
  3898. int ret;
  3899. if (drm_core_check_feature(dev, DRIVER_MODESET))
  3900. return;
  3901. ret = i915_gem_idle(dev);
  3902. if (ret)
  3903. DRM_ERROR("failed to idle hardware: %d\n", ret);
  3904. }
  3905. void
  3906. i915_gem_load(struct drm_device *dev)
  3907. {
  3908. int i;
  3909. drm_i915_private_t *dev_priv = dev->dev_private;
  3910. spin_lock_init(&dev_priv->mm.active_list_lock);
  3911. INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
  3912. INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
  3913. INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
  3914. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3915. INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
  3916. INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
  3917. INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
  3918. if (HAS_BSD(dev)) {
  3919. INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
  3920. INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
  3921. }
  3922. for (i = 0; i < 16; i++)
  3923. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  3924. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  3925. i915_gem_retire_work_handler);
  3926. spin_lock(&shrink_list_lock);
  3927. list_add(&dev_priv->mm.shrink_list, &shrink_list);
  3928. spin_unlock(&shrink_list_lock);
  3929. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  3930. if (IS_GEN3(dev)) {
  3931. u32 tmp = I915_READ(MI_ARB_STATE);
  3932. if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
  3933. /* arb state is a masked write, so set bit + bit in mask */
  3934. tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
  3935. I915_WRITE(MI_ARB_STATE, tmp);
  3936. }
  3937. }
  3938. /* Old X drivers will take 0-2 for front, back, depth buffers */
  3939. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  3940. dev_priv->fence_reg_start = 3;
  3941. if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3942. dev_priv->num_fence_regs = 16;
  3943. else
  3944. dev_priv->num_fence_regs = 8;
  3945. /* Initialize fence registers to zero */
  3946. if (IS_I965G(dev)) {
  3947. for (i = 0; i < 16; i++)
  3948. I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
  3949. } else {
  3950. for (i = 0; i < 8; i++)
  3951. I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
  3952. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  3953. for (i = 0; i < 8; i++)
  3954. I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
  3955. }
  3956. i915_gem_detect_bit_6_swizzle(dev);
  3957. init_waitqueue_head(&dev_priv->pending_flip_queue);
  3958. }
  3959. /*
  3960. * Create a physically contiguous memory object for this object
  3961. * e.g. for cursor + overlay regs
  3962. */
  3963. int i915_gem_init_phys_object(struct drm_device *dev,
  3964. int id, int size, int align)
  3965. {
  3966. drm_i915_private_t *dev_priv = dev->dev_private;
  3967. struct drm_i915_gem_phys_object *phys_obj;
  3968. int ret;
  3969. if (dev_priv->mm.phys_objs[id - 1] || !size)
  3970. return 0;
  3971. phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
  3972. if (!phys_obj)
  3973. return -ENOMEM;
  3974. phys_obj->id = id;
  3975. phys_obj->handle = drm_pci_alloc(dev, size, align);
  3976. if (!phys_obj->handle) {
  3977. ret = -ENOMEM;
  3978. goto kfree_obj;
  3979. }
  3980. #ifdef CONFIG_X86
  3981. set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  3982. #endif
  3983. dev_priv->mm.phys_objs[id - 1] = phys_obj;
  3984. return 0;
  3985. kfree_obj:
  3986. kfree(phys_obj);
  3987. return ret;
  3988. }
  3989. void i915_gem_free_phys_object(struct drm_device *dev, int id)
  3990. {
  3991. drm_i915_private_t *dev_priv = dev->dev_private;
  3992. struct drm_i915_gem_phys_object *phys_obj;
  3993. if (!dev_priv->mm.phys_objs[id - 1])
  3994. return;
  3995. phys_obj = dev_priv->mm.phys_objs[id - 1];
  3996. if (phys_obj->cur_obj) {
  3997. i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  3998. }
  3999. #ifdef CONFIG_X86
  4000. set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4001. #endif
  4002. drm_pci_free(dev, phys_obj->handle);
  4003. kfree(phys_obj);
  4004. dev_priv->mm.phys_objs[id - 1] = NULL;
  4005. }
  4006. void i915_gem_free_all_phys_object(struct drm_device *dev)
  4007. {
  4008. int i;
  4009. for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  4010. i915_gem_free_phys_object(dev, i);
  4011. }
  4012. void i915_gem_detach_phys_object(struct drm_device *dev,
  4013. struct drm_gem_object *obj)
  4014. {
  4015. struct drm_i915_gem_object *obj_priv;
  4016. int i;
  4017. int ret;
  4018. int page_count;
  4019. obj_priv = to_intel_bo(obj);
  4020. if (!obj_priv->phys_obj)
  4021. return;
  4022. ret = i915_gem_object_get_pages(obj, 0);
  4023. if (ret)
  4024. goto out;
  4025. page_count = obj->size / PAGE_SIZE;
  4026. for (i = 0; i < page_count; i++) {
  4027. char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4028. char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4029. memcpy(dst, src, PAGE_SIZE);
  4030. kunmap_atomic(dst, KM_USER0);
  4031. }
  4032. drm_clflush_pages(obj_priv->pages, page_count);
  4033. drm_agp_chipset_flush(dev);
  4034. i915_gem_object_put_pages(obj);
  4035. out:
  4036. obj_priv->phys_obj->cur_obj = NULL;
  4037. obj_priv->phys_obj = NULL;
  4038. }
  4039. int
  4040. i915_gem_attach_phys_object(struct drm_device *dev,
  4041. struct drm_gem_object *obj,
  4042. int id,
  4043. int align)
  4044. {
  4045. drm_i915_private_t *dev_priv = dev->dev_private;
  4046. struct drm_i915_gem_object *obj_priv;
  4047. int ret = 0;
  4048. int page_count;
  4049. int i;
  4050. if (id > I915_MAX_PHYS_OBJECT)
  4051. return -EINVAL;
  4052. obj_priv = to_intel_bo(obj);
  4053. if (obj_priv->phys_obj) {
  4054. if (obj_priv->phys_obj->id == id)
  4055. return 0;
  4056. i915_gem_detach_phys_object(dev, obj);
  4057. }
  4058. /* create a new object */
  4059. if (!dev_priv->mm.phys_objs[id - 1]) {
  4060. ret = i915_gem_init_phys_object(dev, id,
  4061. obj->size, align);
  4062. if (ret) {
  4063. DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
  4064. goto out;
  4065. }
  4066. }
  4067. /* bind to the object */
  4068. obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4069. obj_priv->phys_obj->cur_obj = obj;
  4070. ret = i915_gem_object_get_pages(obj, 0);
  4071. if (ret) {
  4072. DRM_ERROR("failed to get page list\n");
  4073. goto out;
  4074. }
  4075. page_count = obj->size / PAGE_SIZE;
  4076. for (i = 0; i < page_count; i++) {
  4077. char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
  4078. char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4079. memcpy(dst, src, PAGE_SIZE);
  4080. kunmap_atomic(src, KM_USER0);
  4081. }
  4082. i915_gem_object_put_pages(obj);
  4083. return 0;
  4084. out:
  4085. return ret;
  4086. }
  4087. static int
  4088. i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
  4089. struct drm_i915_gem_pwrite *args,
  4090. struct drm_file *file_priv)
  4091. {
  4092. struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  4093. void *obj_addr;
  4094. int ret;
  4095. char __user *user_data;
  4096. user_data = (char __user *) (uintptr_t) args->data_ptr;
  4097. obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  4098. DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
  4099. ret = copy_from_user(obj_addr, user_data, args->size);
  4100. if (ret)
  4101. return -EFAULT;
  4102. drm_agp_chipset_flush(dev);
  4103. return 0;
  4104. }
  4105. void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
  4106. {
  4107. struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
  4108. /* Clean up our request list when the client is going away, so that
  4109. * later retire_requests won't dereference our soon-to-be-gone
  4110. * file_priv.
  4111. */
  4112. mutex_lock(&dev->struct_mutex);
  4113. while (!list_empty(&i915_file_priv->mm.request_list))
  4114. list_del_init(i915_file_priv->mm.request_list.next);
  4115. mutex_unlock(&dev->struct_mutex);
  4116. }
  4117. static int
  4118. i915_gpu_is_active(struct drm_device *dev)
  4119. {
  4120. drm_i915_private_t *dev_priv = dev->dev_private;
  4121. int lists_empty;
  4122. spin_lock(&dev_priv->mm.active_list_lock);
  4123. lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
  4124. list_empty(&dev_priv->render_ring.active_list);
  4125. if (HAS_BSD(dev))
  4126. lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
  4127. spin_unlock(&dev_priv->mm.active_list_lock);
  4128. return !lists_empty;
  4129. }
  4130. static int
  4131. i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  4132. {
  4133. drm_i915_private_t *dev_priv, *next_dev;
  4134. struct drm_i915_gem_object *obj_priv, *next_obj;
  4135. int cnt = 0;
  4136. int would_deadlock = 1;
  4137. /* "fast-path" to count number of available objects */
  4138. if (nr_to_scan == 0) {
  4139. spin_lock(&shrink_list_lock);
  4140. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4141. struct drm_device *dev = dev_priv->dev;
  4142. if (mutex_trylock(&dev->struct_mutex)) {
  4143. list_for_each_entry(obj_priv,
  4144. &dev_priv->mm.inactive_list,
  4145. list)
  4146. cnt++;
  4147. mutex_unlock(&dev->struct_mutex);
  4148. }
  4149. }
  4150. spin_unlock(&shrink_list_lock);
  4151. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4152. }
  4153. spin_lock(&shrink_list_lock);
  4154. rescan:
  4155. /* first scan for clean buffers */
  4156. list_for_each_entry_safe(dev_priv, next_dev,
  4157. &shrink_list, mm.shrink_list) {
  4158. struct drm_device *dev = dev_priv->dev;
  4159. if (! mutex_trylock(&dev->struct_mutex))
  4160. continue;
  4161. spin_unlock(&shrink_list_lock);
  4162. i915_gem_retire_requests(dev);
  4163. list_for_each_entry_safe(obj_priv, next_obj,
  4164. &dev_priv->mm.inactive_list,
  4165. list) {
  4166. if (i915_gem_object_is_purgeable(obj_priv)) {
  4167. i915_gem_object_unbind(&obj_priv->base);
  4168. if (--nr_to_scan <= 0)
  4169. break;
  4170. }
  4171. }
  4172. spin_lock(&shrink_list_lock);
  4173. mutex_unlock(&dev->struct_mutex);
  4174. would_deadlock = 0;
  4175. if (nr_to_scan <= 0)
  4176. break;
  4177. }
  4178. /* second pass, evict/count anything still on the inactive list */
  4179. list_for_each_entry_safe(dev_priv, next_dev,
  4180. &shrink_list, mm.shrink_list) {
  4181. struct drm_device *dev = dev_priv->dev;
  4182. if (! mutex_trylock(&dev->struct_mutex))
  4183. continue;
  4184. spin_unlock(&shrink_list_lock);
  4185. list_for_each_entry_safe(obj_priv, next_obj,
  4186. &dev_priv->mm.inactive_list,
  4187. list) {
  4188. if (nr_to_scan > 0) {
  4189. i915_gem_object_unbind(&obj_priv->base);
  4190. nr_to_scan--;
  4191. } else
  4192. cnt++;
  4193. }
  4194. spin_lock(&shrink_list_lock);
  4195. mutex_unlock(&dev->struct_mutex);
  4196. would_deadlock = 0;
  4197. }
  4198. if (nr_to_scan) {
  4199. int active = 0;
  4200. /*
  4201. * We are desperate for pages, so as a last resort, wait
  4202. * for the GPU to finish and discard whatever we can.
  4203. * This has a dramatic impact to reduce the number of
  4204. * OOM-killer events whilst running the GPU aggressively.
  4205. */
  4206. list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
  4207. struct drm_device *dev = dev_priv->dev;
  4208. if (!mutex_trylock(&dev->struct_mutex))
  4209. continue;
  4210. spin_unlock(&shrink_list_lock);
  4211. if (i915_gpu_is_active(dev)) {
  4212. i915_gpu_idle(dev);
  4213. active++;
  4214. }
  4215. spin_lock(&shrink_list_lock);
  4216. mutex_unlock(&dev->struct_mutex);
  4217. }
  4218. if (active)
  4219. goto rescan;
  4220. }
  4221. spin_unlock(&shrink_list_lock);
  4222. if (would_deadlock)
  4223. return -1;
  4224. else if (cnt > 0)
  4225. return (cnt / 100) * sysctl_vfs_cache_pressure;
  4226. else
  4227. return 0;
  4228. }
  4229. static struct shrinker shrinker = {
  4230. .shrink = i915_gem_shrink,
  4231. .seeks = DEFAULT_SEEKS,
  4232. };
  4233. __init void
  4234. i915_gem_shrinker_init(void)
  4235. {
  4236. register_shrinker(&shrinker);
  4237. }
  4238. __exit void
  4239. i915_gem_shrinker_exit(void)
  4240. {
  4241. unregister_shrinker(&shrinker);
  4242. }