amd_iommu.c 83 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672
  1. /*
  2. * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/ratelimit.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci-ats.h>
  22. #include <linux/bitmap.h>
  23. #include <linux/slab.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/scatterlist.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/iommu-helper.h>
  28. #include <linux/iommu.h>
  29. #include <linux/delay.h>
  30. #include <linux/amd-iommu.h>
  31. #include <linux/notifier.h>
  32. #include <linux/export.h>
  33. #include <asm/msidef.h>
  34. #include <asm/proto.h>
  35. #include <asm/iommu.h>
  36. #include <asm/gart.h>
  37. #include <asm/dma.h>
  38. #include "amd_iommu_proto.h"
  39. #include "amd_iommu_types.h"
  40. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  41. #define LOOP_TIMEOUT 100000
  42. /*
  43. * This bitmap is used to advertise the page sizes our hardware support
  44. * to the IOMMU core, which will then use this information to split
  45. * physically contiguous memory regions it is mapping into page sizes
  46. * that we support.
  47. *
  48. * Traditionally the IOMMU core just handed us the mappings directly,
  49. * after making sure the size is an order of a 4KiB page and that the
  50. * mapping has natural alignment.
  51. *
  52. * To retain this behavior, we currently advertise that we support
  53. * all page sizes that are an order of 4KiB.
  54. *
  55. * If at some point we'd like to utilize the IOMMU core's new behavior,
  56. * we could change this to advertise the real page sizes we support.
  57. */
  58. #define AMD_IOMMU_PGSIZES (~0xFFFUL)
  59. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  60. /* A list of preallocated protection domains */
  61. static LIST_HEAD(iommu_pd_list);
  62. static DEFINE_SPINLOCK(iommu_pd_list_lock);
  63. /* List of all available dev_data structures */
  64. static LIST_HEAD(dev_data_list);
  65. static DEFINE_SPINLOCK(dev_data_list_lock);
  66. /*
  67. * Domain for untranslated devices - only allocated
  68. * if iommu=pt passed on kernel cmd line.
  69. */
  70. static struct protection_domain *pt_domain;
  71. static struct iommu_ops amd_iommu_ops;
  72. static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  73. int amd_iommu_max_glx_val = -1;
  74. /*
  75. * general struct to manage commands send to an IOMMU
  76. */
  77. struct iommu_cmd {
  78. u32 data[4];
  79. };
  80. static void update_domain(struct protection_domain *domain);
  81. static int __init alloc_passthrough_domain(void);
  82. /****************************************************************************
  83. *
  84. * Helper functions
  85. *
  86. ****************************************************************************/
  87. static struct iommu_dev_data *alloc_dev_data(u16 devid)
  88. {
  89. struct iommu_dev_data *dev_data;
  90. unsigned long flags;
  91. dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
  92. if (!dev_data)
  93. return NULL;
  94. dev_data->devid = devid;
  95. atomic_set(&dev_data->bind, 0);
  96. spin_lock_irqsave(&dev_data_list_lock, flags);
  97. list_add_tail(&dev_data->dev_data_list, &dev_data_list);
  98. spin_unlock_irqrestore(&dev_data_list_lock, flags);
  99. return dev_data;
  100. }
  101. static void free_dev_data(struct iommu_dev_data *dev_data)
  102. {
  103. unsigned long flags;
  104. spin_lock_irqsave(&dev_data_list_lock, flags);
  105. list_del(&dev_data->dev_data_list);
  106. spin_unlock_irqrestore(&dev_data_list_lock, flags);
  107. kfree(dev_data);
  108. }
  109. static struct iommu_dev_data *search_dev_data(u16 devid)
  110. {
  111. struct iommu_dev_data *dev_data;
  112. unsigned long flags;
  113. spin_lock_irqsave(&dev_data_list_lock, flags);
  114. list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
  115. if (dev_data->devid == devid)
  116. goto out_unlock;
  117. }
  118. dev_data = NULL;
  119. out_unlock:
  120. spin_unlock_irqrestore(&dev_data_list_lock, flags);
  121. return dev_data;
  122. }
  123. static struct iommu_dev_data *find_dev_data(u16 devid)
  124. {
  125. struct iommu_dev_data *dev_data;
  126. dev_data = search_dev_data(devid);
  127. if (dev_data == NULL)
  128. dev_data = alloc_dev_data(devid);
  129. return dev_data;
  130. }
  131. static inline u16 get_device_id(struct device *dev)
  132. {
  133. struct pci_dev *pdev = to_pci_dev(dev);
  134. return calc_devid(pdev->bus->number, pdev->devfn);
  135. }
  136. static struct iommu_dev_data *get_dev_data(struct device *dev)
  137. {
  138. return dev->archdata.iommu;
  139. }
  140. static bool pci_iommuv2_capable(struct pci_dev *pdev)
  141. {
  142. static const int caps[] = {
  143. PCI_EXT_CAP_ID_ATS,
  144. PCI_EXT_CAP_ID_PRI,
  145. PCI_EXT_CAP_ID_PASID,
  146. };
  147. int i, pos;
  148. for (i = 0; i < 3; ++i) {
  149. pos = pci_find_ext_capability(pdev, caps[i]);
  150. if (pos == 0)
  151. return false;
  152. }
  153. return true;
  154. }
  155. static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
  156. {
  157. struct iommu_dev_data *dev_data;
  158. dev_data = get_dev_data(&pdev->dev);
  159. return dev_data->errata & (1 << erratum) ? true : false;
  160. }
  161. /*
  162. * In this function the list of preallocated protection domains is traversed to
  163. * find the domain for a specific device
  164. */
  165. static struct dma_ops_domain *find_protection_domain(u16 devid)
  166. {
  167. struct dma_ops_domain *entry, *ret = NULL;
  168. unsigned long flags;
  169. u16 alias = amd_iommu_alias_table[devid];
  170. if (list_empty(&iommu_pd_list))
  171. return NULL;
  172. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  173. list_for_each_entry(entry, &iommu_pd_list, list) {
  174. if (entry->target_dev == devid ||
  175. entry->target_dev == alias) {
  176. ret = entry;
  177. break;
  178. }
  179. }
  180. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  181. return ret;
  182. }
  183. /*
  184. * This function checks if the driver got a valid device from the caller to
  185. * avoid dereferencing invalid pointers.
  186. */
  187. static bool check_device(struct device *dev)
  188. {
  189. u16 devid;
  190. if (!dev || !dev->dma_mask)
  191. return false;
  192. /* No device or no PCI device */
  193. if (dev->bus != &pci_bus_type)
  194. return false;
  195. devid = get_device_id(dev);
  196. /* Out of our scope? */
  197. if (devid > amd_iommu_last_bdf)
  198. return false;
  199. if (amd_iommu_rlookup_table[devid] == NULL)
  200. return false;
  201. return true;
  202. }
  203. static int iommu_init_device(struct device *dev)
  204. {
  205. struct pci_dev *pdev = to_pci_dev(dev);
  206. struct iommu_dev_data *dev_data;
  207. u16 alias;
  208. if (dev->archdata.iommu)
  209. return 0;
  210. dev_data = find_dev_data(get_device_id(dev));
  211. if (!dev_data)
  212. return -ENOMEM;
  213. alias = amd_iommu_alias_table[dev_data->devid];
  214. if (alias != dev_data->devid) {
  215. struct iommu_dev_data *alias_data;
  216. alias_data = find_dev_data(alias);
  217. if (alias_data == NULL) {
  218. pr_err("AMD-Vi: Warning: Unhandled device %s\n",
  219. dev_name(dev));
  220. free_dev_data(dev_data);
  221. return -ENOTSUPP;
  222. }
  223. dev_data->alias_data = alias_data;
  224. }
  225. if (pci_iommuv2_capable(pdev)) {
  226. struct amd_iommu *iommu;
  227. iommu = amd_iommu_rlookup_table[dev_data->devid];
  228. dev_data->iommu_v2 = iommu->is_iommu_v2;
  229. }
  230. dev->archdata.iommu = dev_data;
  231. return 0;
  232. }
  233. static void iommu_ignore_device(struct device *dev)
  234. {
  235. u16 devid, alias;
  236. devid = get_device_id(dev);
  237. alias = amd_iommu_alias_table[devid];
  238. memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
  239. memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
  240. amd_iommu_rlookup_table[devid] = NULL;
  241. amd_iommu_rlookup_table[alias] = NULL;
  242. }
  243. static void iommu_uninit_device(struct device *dev)
  244. {
  245. /*
  246. * Nothing to do here - we keep dev_data around for unplugged devices
  247. * and reuse it when the device is re-plugged - not doing so would
  248. * introduce a ton of races.
  249. */
  250. }
  251. void __init amd_iommu_uninit_devices(void)
  252. {
  253. struct iommu_dev_data *dev_data, *n;
  254. struct pci_dev *pdev = NULL;
  255. for_each_pci_dev(pdev) {
  256. if (!check_device(&pdev->dev))
  257. continue;
  258. iommu_uninit_device(&pdev->dev);
  259. }
  260. /* Free all of our dev_data structures */
  261. list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
  262. free_dev_data(dev_data);
  263. }
  264. int __init amd_iommu_init_devices(void)
  265. {
  266. struct pci_dev *pdev = NULL;
  267. int ret = 0;
  268. for_each_pci_dev(pdev) {
  269. if (!check_device(&pdev->dev))
  270. continue;
  271. ret = iommu_init_device(&pdev->dev);
  272. if (ret == -ENOTSUPP)
  273. iommu_ignore_device(&pdev->dev);
  274. else if (ret)
  275. goto out_free;
  276. }
  277. return 0;
  278. out_free:
  279. amd_iommu_uninit_devices();
  280. return ret;
  281. }
  282. #ifdef CONFIG_AMD_IOMMU_STATS
  283. /*
  284. * Initialization code for statistics collection
  285. */
  286. DECLARE_STATS_COUNTER(compl_wait);
  287. DECLARE_STATS_COUNTER(cnt_map_single);
  288. DECLARE_STATS_COUNTER(cnt_unmap_single);
  289. DECLARE_STATS_COUNTER(cnt_map_sg);
  290. DECLARE_STATS_COUNTER(cnt_unmap_sg);
  291. DECLARE_STATS_COUNTER(cnt_alloc_coherent);
  292. DECLARE_STATS_COUNTER(cnt_free_coherent);
  293. DECLARE_STATS_COUNTER(cross_page);
  294. DECLARE_STATS_COUNTER(domain_flush_single);
  295. DECLARE_STATS_COUNTER(domain_flush_all);
  296. DECLARE_STATS_COUNTER(alloced_io_mem);
  297. DECLARE_STATS_COUNTER(total_map_requests);
  298. DECLARE_STATS_COUNTER(complete_ppr);
  299. DECLARE_STATS_COUNTER(invalidate_iotlb);
  300. DECLARE_STATS_COUNTER(invalidate_iotlb_all);
  301. DECLARE_STATS_COUNTER(pri_requests);
  302. static struct dentry *stats_dir;
  303. static struct dentry *de_fflush;
  304. static void amd_iommu_stats_add(struct __iommu_counter *cnt)
  305. {
  306. if (stats_dir == NULL)
  307. return;
  308. cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
  309. &cnt->value);
  310. }
  311. static void amd_iommu_stats_init(void)
  312. {
  313. stats_dir = debugfs_create_dir("amd-iommu", NULL);
  314. if (stats_dir == NULL)
  315. return;
  316. de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
  317. (u32 *)&amd_iommu_unmap_flush);
  318. amd_iommu_stats_add(&compl_wait);
  319. amd_iommu_stats_add(&cnt_map_single);
  320. amd_iommu_stats_add(&cnt_unmap_single);
  321. amd_iommu_stats_add(&cnt_map_sg);
  322. amd_iommu_stats_add(&cnt_unmap_sg);
  323. amd_iommu_stats_add(&cnt_alloc_coherent);
  324. amd_iommu_stats_add(&cnt_free_coherent);
  325. amd_iommu_stats_add(&cross_page);
  326. amd_iommu_stats_add(&domain_flush_single);
  327. amd_iommu_stats_add(&domain_flush_all);
  328. amd_iommu_stats_add(&alloced_io_mem);
  329. amd_iommu_stats_add(&total_map_requests);
  330. amd_iommu_stats_add(&complete_ppr);
  331. amd_iommu_stats_add(&invalidate_iotlb);
  332. amd_iommu_stats_add(&invalidate_iotlb_all);
  333. amd_iommu_stats_add(&pri_requests);
  334. }
  335. #endif
  336. /****************************************************************************
  337. *
  338. * Interrupt handling functions
  339. *
  340. ****************************************************************************/
  341. static void dump_dte_entry(u16 devid)
  342. {
  343. int i;
  344. for (i = 0; i < 4; ++i)
  345. pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
  346. amd_iommu_dev_table[devid].data[i]);
  347. }
  348. static void dump_command(unsigned long phys_addr)
  349. {
  350. struct iommu_cmd *cmd = phys_to_virt(phys_addr);
  351. int i;
  352. for (i = 0; i < 4; ++i)
  353. pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
  354. }
  355. static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
  356. {
  357. int type, devid, domid, flags;
  358. volatile u32 *event = __evt;
  359. int count = 0;
  360. u64 address;
  361. retry:
  362. type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  363. devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  364. domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  365. flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  366. address = (u64)(((u64)event[3]) << 32) | event[2];
  367. if (type == 0) {
  368. /* Did we hit the erratum? */
  369. if (++count == LOOP_TIMEOUT) {
  370. pr_err("AMD-Vi: No event written to event log\n");
  371. return;
  372. }
  373. udelay(1);
  374. goto retry;
  375. }
  376. printk(KERN_ERR "AMD-Vi: Event logged [");
  377. switch (type) {
  378. case EVENT_TYPE_ILL_DEV:
  379. printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  380. "address=0x%016llx flags=0x%04x]\n",
  381. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  382. address, flags);
  383. dump_dte_entry(devid);
  384. break;
  385. case EVENT_TYPE_IO_FAULT:
  386. printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  387. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  388. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  389. domid, address, flags);
  390. break;
  391. case EVENT_TYPE_DEV_TAB_ERR:
  392. printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  393. "address=0x%016llx flags=0x%04x]\n",
  394. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  395. address, flags);
  396. break;
  397. case EVENT_TYPE_PAGE_TAB_ERR:
  398. printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  399. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  400. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  401. domid, address, flags);
  402. break;
  403. case EVENT_TYPE_ILL_CMD:
  404. printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
  405. dump_command(address);
  406. break;
  407. case EVENT_TYPE_CMD_HARD_ERR:
  408. printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  409. "flags=0x%04x]\n", address, flags);
  410. break;
  411. case EVENT_TYPE_IOTLB_INV_TO:
  412. printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  413. "address=0x%016llx]\n",
  414. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  415. address);
  416. break;
  417. case EVENT_TYPE_INV_DEV_REQ:
  418. printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  419. "address=0x%016llx flags=0x%04x]\n",
  420. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  421. address, flags);
  422. break;
  423. default:
  424. printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
  425. }
  426. memset(__evt, 0, 4 * sizeof(u32));
  427. }
  428. static void iommu_poll_events(struct amd_iommu *iommu)
  429. {
  430. u32 head, tail;
  431. unsigned long flags;
  432. spin_lock_irqsave(&iommu->lock, flags);
  433. head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  434. tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  435. while (head != tail) {
  436. iommu_print_event(iommu, iommu->evt_buf + head);
  437. head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  438. }
  439. writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  440. spin_unlock_irqrestore(&iommu->lock, flags);
  441. }
  442. static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
  443. {
  444. struct amd_iommu_fault fault;
  445. INC_STATS_COUNTER(pri_requests);
  446. if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
  447. pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
  448. return;
  449. }
  450. fault.address = raw[1];
  451. fault.pasid = PPR_PASID(raw[0]);
  452. fault.device_id = PPR_DEVID(raw[0]);
  453. fault.tag = PPR_TAG(raw[0]);
  454. fault.flags = PPR_FLAGS(raw[0]);
  455. atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
  456. }
  457. static void iommu_poll_ppr_log(struct amd_iommu *iommu)
  458. {
  459. unsigned long flags;
  460. u32 head, tail;
  461. if (iommu->ppr_log == NULL)
  462. return;
  463. /* enable ppr interrupts again */
  464. writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
  465. spin_lock_irqsave(&iommu->lock, flags);
  466. head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  467. tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
  468. while (head != tail) {
  469. volatile u64 *raw;
  470. u64 entry[2];
  471. int i;
  472. raw = (u64 *)(iommu->ppr_log + head);
  473. /*
  474. * Hardware bug: Interrupt may arrive before the entry is
  475. * written to memory. If this happens we need to wait for the
  476. * entry to arrive.
  477. */
  478. for (i = 0; i < LOOP_TIMEOUT; ++i) {
  479. if (PPR_REQ_TYPE(raw[0]) != 0)
  480. break;
  481. udelay(1);
  482. }
  483. /* Avoid memcpy function-call overhead */
  484. entry[0] = raw[0];
  485. entry[1] = raw[1];
  486. /*
  487. * To detect the hardware bug we need to clear the entry
  488. * back to zero.
  489. */
  490. raw[0] = raw[1] = 0UL;
  491. /* Update head pointer of hardware ring-buffer */
  492. head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
  493. writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  494. /*
  495. * Release iommu->lock because ppr-handling might need to
  496. * re-aquire it
  497. */
  498. spin_unlock_irqrestore(&iommu->lock, flags);
  499. /* Handle PPR entry */
  500. iommu_handle_ppr_entry(iommu, entry);
  501. spin_lock_irqsave(&iommu->lock, flags);
  502. /* Refresh ring-buffer information */
  503. head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  504. tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
  505. }
  506. spin_unlock_irqrestore(&iommu->lock, flags);
  507. }
  508. irqreturn_t amd_iommu_int_thread(int irq, void *data)
  509. {
  510. struct amd_iommu *iommu;
  511. for_each_iommu(iommu) {
  512. iommu_poll_events(iommu);
  513. iommu_poll_ppr_log(iommu);
  514. }
  515. return IRQ_HANDLED;
  516. }
  517. irqreturn_t amd_iommu_int_handler(int irq, void *data)
  518. {
  519. return IRQ_WAKE_THREAD;
  520. }
  521. /****************************************************************************
  522. *
  523. * IOMMU command queuing functions
  524. *
  525. ****************************************************************************/
  526. static int wait_on_sem(volatile u64 *sem)
  527. {
  528. int i = 0;
  529. while (*sem == 0 && i < LOOP_TIMEOUT) {
  530. udelay(1);
  531. i += 1;
  532. }
  533. if (i == LOOP_TIMEOUT) {
  534. pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
  535. return -EIO;
  536. }
  537. return 0;
  538. }
  539. static void copy_cmd_to_buffer(struct amd_iommu *iommu,
  540. struct iommu_cmd *cmd,
  541. u32 tail)
  542. {
  543. u8 *target;
  544. target = iommu->cmd_buf + tail;
  545. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  546. /* Copy command to buffer */
  547. memcpy(target, cmd, sizeof(*cmd));
  548. /* Tell the IOMMU about it */
  549. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  550. }
  551. static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
  552. {
  553. WARN_ON(address & 0x7ULL);
  554. memset(cmd, 0, sizeof(*cmd));
  555. cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
  556. cmd->data[1] = upper_32_bits(__pa(address));
  557. cmd->data[2] = 1;
  558. CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
  559. }
  560. static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
  561. {
  562. memset(cmd, 0, sizeof(*cmd));
  563. cmd->data[0] = devid;
  564. CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
  565. }
  566. static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
  567. size_t size, u16 domid, int pde)
  568. {
  569. u64 pages;
  570. int s;
  571. pages = iommu_num_pages(address, size, PAGE_SIZE);
  572. s = 0;
  573. if (pages > 1) {
  574. /*
  575. * If we have to flush more than one page, flush all
  576. * TLB entries for this domain
  577. */
  578. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  579. s = 1;
  580. }
  581. address &= PAGE_MASK;
  582. memset(cmd, 0, sizeof(*cmd));
  583. cmd->data[1] |= domid;
  584. cmd->data[2] = lower_32_bits(address);
  585. cmd->data[3] = upper_32_bits(address);
  586. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  587. if (s) /* size bit - we flush more than one 4kb page */
  588. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  589. if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
  590. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  591. }
  592. static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
  593. u64 address, size_t size)
  594. {
  595. u64 pages;
  596. int s;
  597. pages = iommu_num_pages(address, size, PAGE_SIZE);
  598. s = 0;
  599. if (pages > 1) {
  600. /*
  601. * If we have to flush more than one page, flush all
  602. * TLB entries for this domain
  603. */
  604. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  605. s = 1;
  606. }
  607. address &= PAGE_MASK;
  608. memset(cmd, 0, sizeof(*cmd));
  609. cmd->data[0] = devid;
  610. cmd->data[0] |= (qdep & 0xff) << 24;
  611. cmd->data[1] = devid;
  612. cmd->data[2] = lower_32_bits(address);
  613. cmd->data[3] = upper_32_bits(address);
  614. CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  615. if (s)
  616. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  617. }
  618. static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
  619. u64 address, bool size)
  620. {
  621. memset(cmd, 0, sizeof(*cmd));
  622. address &= ~(0xfffULL);
  623. cmd->data[0] = pasid & PASID_MASK;
  624. cmd->data[1] = domid;
  625. cmd->data[2] = lower_32_bits(address);
  626. cmd->data[3] = upper_32_bits(address);
  627. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  628. cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
  629. if (size)
  630. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  631. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  632. }
  633. static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
  634. int qdep, u64 address, bool size)
  635. {
  636. memset(cmd, 0, sizeof(*cmd));
  637. address &= ~(0xfffULL);
  638. cmd->data[0] = devid;
  639. cmd->data[0] |= (pasid & 0xff) << 16;
  640. cmd->data[0] |= (qdep & 0xff) << 24;
  641. cmd->data[1] = devid;
  642. cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
  643. cmd->data[2] = lower_32_bits(address);
  644. cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
  645. cmd->data[3] = upper_32_bits(address);
  646. if (size)
  647. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  648. CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  649. }
  650. static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
  651. int status, int tag, bool gn)
  652. {
  653. memset(cmd, 0, sizeof(*cmd));
  654. cmd->data[0] = devid;
  655. if (gn) {
  656. cmd->data[1] = pasid & PASID_MASK;
  657. cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
  658. }
  659. cmd->data[3] = tag & 0x1ff;
  660. cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
  661. CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
  662. }
  663. static void build_inv_all(struct iommu_cmd *cmd)
  664. {
  665. memset(cmd, 0, sizeof(*cmd));
  666. CMD_SET_TYPE(cmd, CMD_INV_ALL);
  667. }
  668. /*
  669. * Writes the command to the IOMMUs command buffer and informs the
  670. * hardware about the new command.
  671. */
  672. static int iommu_queue_command_sync(struct amd_iommu *iommu,
  673. struct iommu_cmd *cmd,
  674. bool sync)
  675. {
  676. u32 left, tail, head, next_tail;
  677. unsigned long flags;
  678. WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
  679. again:
  680. spin_lock_irqsave(&iommu->lock, flags);
  681. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  682. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  683. next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  684. left = (head - next_tail) % iommu->cmd_buf_size;
  685. if (left <= 2) {
  686. struct iommu_cmd sync_cmd;
  687. volatile u64 sem = 0;
  688. int ret;
  689. build_completion_wait(&sync_cmd, (u64)&sem);
  690. copy_cmd_to_buffer(iommu, &sync_cmd, tail);
  691. spin_unlock_irqrestore(&iommu->lock, flags);
  692. if ((ret = wait_on_sem(&sem)) != 0)
  693. return ret;
  694. goto again;
  695. }
  696. copy_cmd_to_buffer(iommu, cmd, tail);
  697. /* We need to sync now to make sure all commands are processed */
  698. iommu->need_sync = sync;
  699. spin_unlock_irqrestore(&iommu->lock, flags);
  700. return 0;
  701. }
  702. static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  703. {
  704. return iommu_queue_command_sync(iommu, cmd, true);
  705. }
  706. /*
  707. * This function queues a completion wait command into the command
  708. * buffer of an IOMMU
  709. */
  710. static int iommu_completion_wait(struct amd_iommu *iommu)
  711. {
  712. struct iommu_cmd cmd;
  713. volatile u64 sem = 0;
  714. int ret;
  715. if (!iommu->need_sync)
  716. return 0;
  717. build_completion_wait(&cmd, (u64)&sem);
  718. ret = iommu_queue_command_sync(iommu, &cmd, false);
  719. if (ret)
  720. return ret;
  721. return wait_on_sem(&sem);
  722. }
  723. static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
  724. {
  725. struct iommu_cmd cmd;
  726. build_inv_dte(&cmd, devid);
  727. return iommu_queue_command(iommu, &cmd);
  728. }
  729. static void iommu_flush_dte_all(struct amd_iommu *iommu)
  730. {
  731. u32 devid;
  732. for (devid = 0; devid <= 0xffff; ++devid)
  733. iommu_flush_dte(iommu, devid);
  734. iommu_completion_wait(iommu);
  735. }
  736. /*
  737. * This function uses heavy locking and may disable irqs for some time. But
  738. * this is no issue because it is only called during resume.
  739. */
  740. static void iommu_flush_tlb_all(struct amd_iommu *iommu)
  741. {
  742. u32 dom_id;
  743. for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
  744. struct iommu_cmd cmd;
  745. build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  746. dom_id, 1);
  747. iommu_queue_command(iommu, &cmd);
  748. }
  749. iommu_completion_wait(iommu);
  750. }
  751. static void iommu_flush_all(struct amd_iommu *iommu)
  752. {
  753. struct iommu_cmd cmd;
  754. build_inv_all(&cmd);
  755. iommu_queue_command(iommu, &cmd);
  756. iommu_completion_wait(iommu);
  757. }
  758. void iommu_flush_all_caches(struct amd_iommu *iommu)
  759. {
  760. if (iommu_feature(iommu, FEATURE_IA)) {
  761. iommu_flush_all(iommu);
  762. } else {
  763. iommu_flush_dte_all(iommu);
  764. iommu_flush_tlb_all(iommu);
  765. }
  766. }
  767. /*
  768. * Command send function for flushing on-device TLB
  769. */
  770. static int device_flush_iotlb(struct iommu_dev_data *dev_data,
  771. u64 address, size_t size)
  772. {
  773. struct amd_iommu *iommu;
  774. struct iommu_cmd cmd;
  775. int qdep;
  776. qdep = dev_data->ats.qdep;
  777. iommu = amd_iommu_rlookup_table[dev_data->devid];
  778. build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
  779. return iommu_queue_command(iommu, &cmd);
  780. }
  781. /*
  782. * Command send function for invalidating a device table entry
  783. */
  784. static int device_flush_dte(struct iommu_dev_data *dev_data)
  785. {
  786. struct amd_iommu *iommu;
  787. int ret;
  788. iommu = amd_iommu_rlookup_table[dev_data->devid];
  789. ret = iommu_flush_dte(iommu, dev_data->devid);
  790. if (ret)
  791. return ret;
  792. if (dev_data->ats.enabled)
  793. ret = device_flush_iotlb(dev_data, 0, ~0UL);
  794. return ret;
  795. }
  796. /*
  797. * TLB invalidation function which is called from the mapping functions.
  798. * It invalidates a single PTE if the range to flush is within a single
  799. * page. Otherwise it flushes the whole TLB of the IOMMU.
  800. */
  801. static void __domain_flush_pages(struct protection_domain *domain,
  802. u64 address, size_t size, int pde)
  803. {
  804. struct iommu_dev_data *dev_data;
  805. struct iommu_cmd cmd;
  806. int ret = 0, i;
  807. build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
  808. for (i = 0; i < amd_iommus_present; ++i) {
  809. if (!domain->dev_iommu[i])
  810. continue;
  811. /*
  812. * Devices of this domain are behind this IOMMU
  813. * We need a TLB flush
  814. */
  815. ret |= iommu_queue_command(amd_iommus[i], &cmd);
  816. }
  817. list_for_each_entry(dev_data, &domain->dev_list, list) {
  818. if (!dev_data->ats.enabled)
  819. continue;
  820. ret |= device_flush_iotlb(dev_data, address, size);
  821. }
  822. WARN_ON(ret);
  823. }
  824. static void domain_flush_pages(struct protection_domain *domain,
  825. u64 address, size_t size)
  826. {
  827. __domain_flush_pages(domain, address, size, 0);
  828. }
  829. /* Flush the whole IO/TLB for a given protection domain */
  830. static void domain_flush_tlb(struct protection_domain *domain)
  831. {
  832. __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
  833. }
  834. /* Flush the whole IO/TLB for a given protection domain - including PDE */
  835. static void domain_flush_tlb_pde(struct protection_domain *domain)
  836. {
  837. __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
  838. }
  839. static void domain_flush_complete(struct protection_domain *domain)
  840. {
  841. int i;
  842. for (i = 0; i < amd_iommus_present; ++i) {
  843. if (!domain->dev_iommu[i])
  844. continue;
  845. /*
  846. * Devices of this domain are behind this IOMMU
  847. * We need to wait for completion of all commands.
  848. */
  849. iommu_completion_wait(amd_iommus[i]);
  850. }
  851. }
  852. /*
  853. * This function flushes the DTEs for all devices in domain
  854. */
  855. static void domain_flush_devices(struct protection_domain *domain)
  856. {
  857. struct iommu_dev_data *dev_data;
  858. list_for_each_entry(dev_data, &domain->dev_list, list)
  859. device_flush_dte(dev_data);
  860. }
  861. /****************************************************************************
  862. *
  863. * The functions below are used the create the page table mappings for
  864. * unity mapped regions.
  865. *
  866. ****************************************************************************/
  867. /*
  868. * This function is used to add another level to an IO page table. Adding
  869. * another level increases the size of the address space by 9 bits to a size up
  870. * to 64 bits.
  871. */
  872. static bool increase_address_space(struct protection_domain *domain,
  873. gfp_t gfp)
  874. {
  875. u64 *pte;
  876. if (domain->mode == PAGE_MODE_6_LEVEL)
  877. /* address space already 64 bit large */
  878. return false;
  879. pte = (void *)get_zeroed_page(gfp);
  880. if (!pte)
  881. return false;
  882. *pte = PM_LEVEL_PDE(domain->mode,
  883. virt_to_phys(domain->pt_root));
  884. domain->pt_root = pte;
  885. domain->mode += 1;
  886. domain->updated = true;
  887. return true;
  888. }
  889. static u64 *alloc_pte(struct protection_domain *domain,
  890. unsigned long address,
  891. unsigned long page_size,
  892. u64 **pte_page,
  893. gfp_t gfp)
  894. {
  895. int level, end_lvl;
  896. u64 *pte, *page;
  897. BUG_ON(!is_power_of_2(page_size));
  898. while (address > PM_LEVEL_SIZE(domain->mode))
  899. increase_address_space(domain, gfp);
  900. level = domain->mode - 1;
  901. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  902. address = PAGE_SIZE_ALIGN(address, page_size);
  903. end_lvl = PAGE_SIZE_LEVEL(page_size);
  904. while (level > end_lvl) {
  905. if (!IOMMU_PTE_PRESENT(*pte)) {
  906. page = (u64 *)get_zeroed_page(gfp);
  907. if (!page)
  908. return NULL;
  909. *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
  910. }
  911. /* No level skipping support yet */
  912. if (PM_PTE_LEVEL(*pte) != level)
  913. return NULL;
  914. level -= 1;
  915. pte = IOMMU_PTE_PAGE(*pte);
  916. if (pte_page && level == end_lvl)
  917. *pte_page = pte;
  918. pte = &pte[PM_LEVEL_INDEX(level, address)];
  919. }
  920. return pte;
  921. }
  922. /*
  923. * This function checks if there is a PTE for a given dma address. If
  924. * there is one, it returns the pointer to it.
  925. */
  926. static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
  927. {
  928. int level;
  929. u64 *pte;
  930. if (address > PM_LEVEL_SIZE(domain->mode))
  931. return NULL;
  932. level = domain->mode - 1;
  933. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  934. while (level > 0) {
  935. /* Not Present */
  936. if (!IOMMU_PTE_PRESENT(*pte))
  937. return NULL;
  938. /* Large PTE */
  939. if (PM_PTE_LEVEL(*pte) == 0x07) {
  940. unsigned long pte_mask, __pte;
  941. /*
  942. * If we have a series of large PTEs, make
  943. * sure to return a pointer to the first one.
  944. */
  945. pte_mask = PTE_PAGE_SIZE(*pte);
  946. pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
  947. __pte = ((unsigned long)pte) & pte_mask;
  948. return (u64 *)__pte;
  949. }
  950. /* No level skipping support yet */
  951. if (PM_PTE_LEVEL(*pte) != level)
  952. return NULL;
  953. level -= 1;
  954. /* Walk to the next level */
  955. pte = IOMMU_PTE_PAGE(*pte);
  956. pte = &pte[PM_LEVEL_INDEX(level, address)];
  957. }
  958. return pte;
  959. }
  960. /*
  961. * Generic mapping functions. It maps a physical address into a DMA
  962. * address space. It allocates the page table pages if necessary.
  963. * In the future it can be extended to a generic mapping function
  964. * supporting all features of AMD IOMMU page tables like level skipping
  965. * and full 64 bit address spaces.
  966. */
  967. static int iommu_map_page(struct protection_domain *dom,
  968. unsigned long bus_addr,
  969. unsigned long phys_addr,
  970. int prot,
  971. unsigned long page_size)
  972. {
  973. u64 __pte, *pte;
  974. int i, count;
  975. if (!(prot & IOMMU_PROT_MASK))
  976. return -EINVAL;
  977. bus_addr = PAGE_ALIGN(bus_addr);
  978. phys_addr = PAGE_ALIGN(phys_addr);
  979. count = PAGE_SIZE_PTE_COUNT(page_size);
  980. pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
  981. for (i = 0; i < count; ++i)
  982. if (IOMMU_PTE_PRESENT(pte[i]))
  983. return -EBUSY;
  984. if (page_size > PAGE_SIZE) {
  985. __pte = PAGE_SIZE_PTE(phys_addr, page_size);
  986. __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
  987. } else
  988. __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
  989. if (prot & IOMMU_PROT_IR)
  990. __pte |= IOMMU_PTE_IR;
  991. if (prot & IOMMU_PROT_IW)
  992. __pte |= IOMMU_PTE_IW;
  993. for (i = 0; i < count; ++i)
  994. pte[i] = __pte;
  995. update_domain(dom);
  996. return 0;
  997. }
  998. static unsigned long iommu_unmap_page(struct protection_domain *dom,
  999. unsigned long bus_addr,
  1000. unsigned long page_size)
  1001. {
  1002. unsigned long long unmap_size, unmapped;
  1003. u64 *pte;
  1004. BUG_ON(!is_power_of_2(page_size));
  1005. unmapped = 0;
  1006. while (unmapped < page_size) {
  1007. pte = fetch_pte(dom, bus_addr);
  1008. if (!pte) {
  1009. /*
  1010. * No PTE for this address
  1011. * move forward in 4kb steps
  1012. */
  1013. unmap_size = PAGE_SIZE;
  1014. } else if (PM_PTE_LEVEL(*pte) == 0) {
  1015. /* 4kb PTE found for this address */
  1016. unmap_size = PAGE_SIZE;
  1017. *pte = 0ULL;
  1018. } else {
  1019. int count, i;
  1020. /* Large PTE found which maps this address */
  1021. unmap_size = PTE_PAGE_SIZE(*pte);
  1022. count = PAGE_SIZE_PTE_COUNT(unmap_size);
  1023. for (i = 0; i < count; i++)
  1024. pte[i] = 0ULL;
  1025. }
  1026. bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
  1027. unmapped += unmap_size;
  1028. }
  1029. BUG_ON(!is_power_of_2(unmapped));
  1030. return unmapped;
  1031. }
  1032. /*
  1033. * This function checks if a specific unity mapping entry is needed for
  1034. * this specific IOMMU.
  1035. */
  1036. static int iommu_for_unity_map(struct amd_iommu *iommu,
  1037. struct unity_map_entry *entry)
  1038. {
  1039. u16 bdf, i;
  1040. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  1041. bdf = amd_iommu_alias_table[i];
  1042. if (amd_iommu_rlookup_table[bdf] == iommu)
  1043. return 1;
  1044. }
  1045. return 0;
  1046. }
  1047. /*
  1048. * This function actually applies the mapping to the page table of the
  1049. * dma_ops domain.
  1050. */
  1051. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  1052. struct unity_map_entry *e)
  1053. {
  1054. u64 addr;
  1055. int ret;
  1056. for (addr = e->address_start; addr < e->address_end;
  1057. addr += PAGE_SIZE) {
  1058. ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
  1059. PAGE_SIZE);
  1060. if (ret)
  1061. return ret;
  1062. /*
  1063. * if unity mapping is in aperture range mark the page
  1064. * as allocated in the aperture
  1065. */
  1066. if (addr < dma_dom->aperture_size)
  1067. __set_bit(addr >> PAGE_SHIFT,
  1068. dma_dom->aperture[0]->bitmap);
  1069. }
  1070. return 0;
  1071. }
  1072. /*
  1073. * Init the unity mappings for a specific IOMMU in the system
  1074. *
  1075. * Basically iterates over all unity mapping entries and applies them to
  1076. * the default domain DMA of that IOMMU if necessary.
  1077. */
  1078. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  1079. {
  1080. struct unity_map_entry *entry;
  1081. int ret;
  1082. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  1083. if (!iommu_for_unity_map(iommu, entry))
  1084. continue;
  1085. ret = dma_ops_unity_map(iommu->default_dom, entry);
  1086. if (ret)
  1087. return ret;
  1088. }
  1089. return 0;
  1090. }
  1091. /*
  1092. * Inits the unity mappings required for a specific device
  1093. */
  1094. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  1095. u16 devid)
  1096. {
  1097. struct unity_map_entry *e;
  1098. int ret;
  1099. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  1100. if (!(devid >= e->devid_start && devid <= e->devid_end))
  1101. continue;
  1102. ret = dma_ops_unity_map(dma_dom, e);
  1103. if (ret)
  1104. return ret;
  1105. }
  1106. return 0;
  1107. }
  1108. /****************************************************************************
  1109. *
  1110. * The next functions belong to the address allocator for the dma_ops
  1111. * interface functions. They work like the allocators in the other IOMMU
  1112. * drivers. Its basically a bitmap which marks the allocated pages in
  1113. * the aperture. Maybe it could be enhanced in the future to a more
  1114. * efficient allocator.
  1115. *
  1116. ****************************************************************************/
  1117. /*
  1118. * The address allocator core functions.
  1119. *
  1120. * called with domain->lock held
  1121. */
  1122. /*
  1123. * Used to reserve address ranges in the aperture (e.g. for exclusion
  1124. * ranges.
  1125. */
  1126. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  1127. unsigned long start_page,
  1128. unsigned int pages)
  1129. {
  1130. unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  1131. if (start_page + pages > last_page)
  1132. pages = last_page - start_page;
  1133. for (i = start_page; i < start_page + pages; ++i) {
  1134. int index = i / APERTURE_RANGE_PAGES;
  1135. int page = i % APERTURE_RANGE_PAGES;
  1136. __set_bit(page, dom->aperture[index]->bitmap);
  1137. }
  1138. }
  1139. /*
  1140. * This function is used to add a new aperture range to an existing
  1141. * aperture in case of dma_ops domain allocation or address allocation
  1142. * failure.
  1143. */
  1144. static int alloc_new_range(struct dma_ops_domain *dma_dom,
  1145. bool populate, gfp_t gfp)
  1146. {
  1147. int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
  1148. struct amd_iommu *iommu;
  1149. unsigned long i, old_size;
  1150. #ifdef CONFIG_IOMMU_STRESS
  1151. populate = false;
  1152. #endif
  1153. if (index >= APERTURE_MAX_RANGES)
  1154. return -ENOMEM;
  1155. dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
  1156. if (!dma_dom->aperture[index])
  1157. return -ENOMEM;
  1158. dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
  1159. if (!dma_dom->aperture[index]->bitmap)
  1160. goto out_free;
  1161. dma_dom->aperture[index]->offset = dma_dom->aperture_size;
  1162. if (populate) {
  1163. unsigned long address = dma_dom->aperture_size;
  1164. int i, num_ptes = APERTURE_RANGE_PAGES / 512;
  1165. u64 *pte, *pte_page;
  1166. for (i = 0; i < num_ptes; ++i) {
  1167. pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
  1168. &pte_page, gfp);
  1169. if (!pte)
  1170. goto out_free;
  1171. dma_dom->aperture[index]->pte_pages[i] = pte_page;
  1172. address += APERTURE_RANGE_SIZE / 64;
  1173. }
  1174. }
  1175. old_size = dma_dom->aperture_size;
  1176. dma_dom->aperture_size += APERTURE_RANGE_SIZE;
  1177. /* Reserve address range used for MSI messages */
  1178. if (old_size < MSI_ADDR_BASE_LO &&
  1179. dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
  1180. unsigned long spage;
  1181. int pages;
  1182. pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
  1183. spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
  1184. dma_ops_reserve_addresses(dma_dom, spage, pages);
  1185. }
  1186. /* Initialize the exclusion range if necessary */
  1187. for_each_iommu(iommu) {
  1188. if (iommu->exclusion_start &&
  1189. iommu->exclusion_start >= dma_dom->aperture[index]->offset
  1190. && iommu->exclusion_start < dma_dom->aperture_size) {
  1191. unsigned long startpage;
  1192. int pages = iommu_num_pages(iommu->exclusion_start,
  1193. iommu->exclusion_length,
  1194. PAGE_SIZE);
  1195. startpage = iommu->exclusion_start >> PAGE_SHIFT;
  1196. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  1197. }
  1198. }
  1199. /*
  1200. * Check for areas already mapped as present in the new aperture
  1201. * range and mark those pages as reserved in the allocator. Such
  1202. * mappings may already exist as a result of requested unity
  1203. * mappings for devices.
  1204. */
  1205. for (i = dma_dom->aperture[index]->offset;
  1206. i < dma_dom->aperture_size;
  1207. i += PAGE_SIZE) {
  1208. u64 *pte = fetch_pte(&dma_dom->domain, i);
  1209. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  1210. continue;
  1211. dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
  1212. }
  1213. update_domain(&dma_dom->domain);
  1214. return 0;
  1215. out_free:
  1216. update_domain(&dma_dom->domain);
  1217. free_page((unsigned long)dma_dom->aperture[index]->bitmap);
  1218. kfree(dma_dom->aperture[index]);
  1219. dma_dom->aperture[index] = NULL;
  1220. return -ENOMEM;
  1221. }
  1222. static unsigned long dma_ops_area_alloc(struct device *dev,
  1223. struct dma_ops_domain *dom,
  1224. unsigned int pages,
  1225. unsigned long align_mask,
  1226. u64 dma_mask,
  1227. unsigned long start)
  1228. {
  1229. unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
  1230. int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
  1231. int i = start >> APERTURE_RANGE_SHIFT;
  1232. unsigned long boundary_size;
  1233. unsigned long address = -1;
  1234. unsigned long limit;
  1235. next_bit >>= PAGE_SHIFT;
  1236. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  1237. PAGE_SIZE) >> PAGE_SHIFT;
  1238. for (;i < max_index; ++i) {
  1239. unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
  1240. if (dom->aperture[i]->offset >= dma_mask)
  1241. break;
  1242. limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
  1243. dma_mask >> PAGE_SHIFT);
  1244. address = iommu_area_alloc(dom->aperture[i]->bitmap,
  1245. limit, next_bit, pages, 0,
  1246. boundary_size, align_mask);
  1247. if (address != -1) {
  1248. address = dom->aperture[i]->offset +
  1249. (address << PAGE_SHIFT);
  1250. dom->next_address = address + (pages << PAGE_SHIFT);
  1251. break;
  1252. }
  1253. next_bit = 0;
  1254. }
  1255. return address;
  1256. }
  1257. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  1258. struct dma_ops_domain *dom,
  1259. unsigned int pages,
  1260. unsigned long align_mask,
  1261. u64 dma_mask)
  1262. {
  1263. unsigned long address;
  1264. #ifdef CONFIG_IOMMU_STRESS
  1265. dom->next_address = 0;
  1266. dom->need_flush = true;
  1267. #endif
  1268. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  1269. dma_mask, dom->next_address);
  1270. if (address == -1) {
  1271. dom->next_address = 0;
  1272. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  1273. dma_mask, 0);
  1274. dom->need_flush = true;
  1275. }
  1276. if (unlikely(address == -1))
  1277. address = DMA_ERROR_CODE;
  1278. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  1279. return address;
  1280. }
  1281. /*
  1282. * The address free function.
  1283. *
  1284. * called with domain->lock held
  1285. */
  1286. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  1287. unsigned long address,
  1288. unsigned int pages)
  1289. {
  1290. unsigned i = address >> APERTURE_RANGE_SHIFT;
  1291. struct aperture_range *range = dom->aperture[i];
  1292. BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
  1293. #ifdef CONFIG_IOMMU_STRESS
  1294. if (i < 4)
  1295. return;
  1296. #endif
  1297. if (address >= dom->next_address)
  1298. dom->need_flush = true;
  1299. address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
  1300. bitmap_clear(range->bitmap, address, pages);
  1301. }
  1302. /****************************************************************************
  1303. *
  1304. * The next functions belong to the domain allocation. A domain is
  1305. * allocated for every IOMMU as the default domain. If device isolation
  1306. * is enabled, every device get its own domain. The most important thing
  1307. * about domains is the page table mapping the DMA address space they
  1308. * contain.
  1309. *
  1310. ****************************************************************************/
  1311. /*
  1312. * This function adds a protection domain to the global protection domain list
  1313. */
  1314. static void add_domain_to_list(struct protection_domain *domain)
  1315. {
  1316. unsigned long flags;
  1317. spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  1318. list_add(&domain->list, &amd_iommu_pd_list);
  1319. spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  1320. }
  1321. /*
  1322. * This function removes a protection domain to the global
  1323. * protection domain list
  1324. */
  1325. static void del_domain_from_list(struct protection_domain *domain)
  1326. {
  1327. unsigned long flags;
  1328. spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  1329. list_del(&domain->list);
  1330. spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  1331. }
  1332. static u16 domain_id_alloc(void)
  1333. {
  1334. unsigned long flags;
  1335. int id;
  1336. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1337. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  1338. BUG_ON(id == 0);
  1339. if (id > 0 && id < MAX_DOMAIN_ID)
  1340. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  1341. else
  1342. id = 0;
  1343. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1344. return id;
  1345. }
  1346. static void domain_id_free(int id)
  1347. {
  1348. unsigned long flags;
  1349. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1350. if (id > 0 && id < MAX_DOMAIN_ID)
  1351. __clear_bit(id, amd_iommu_pd_alloc_bitmap);
  1352. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1353. }
  1354. static void free_pagetable(struct protection_domain *domain)
  1355. {
  1356. int i, j;
  1357. u64 *p1, *p2, *p3;
  1358. p1 = domain->pt_root;
  1359. if (!p1)
  1360. return;
  1361. for (i = 0; i < 512; ++i) {
  1362. if (!IOMMU_PTE_PRESENT(p1[i]))
  1363. continue;
  1364. p2 = IOMMU_PTE_PAGE(p1[i]);
  1365. for (j = 0; j < 512; ++j) {
  1366. if (!IOMMU_PTE_PRESENT(p2[j]))
  1367. continue;
  1368. p3 = IOMMU_PTE_PAGE(p2[j]);
  1369. free_page((unsigned long)p3);
  1370. }
  1371. free_page((unsigned long)p2);
  1372. }
  1373. free_page((unsigned long)p1);
  1374. domain->pt_root = NULL;
  1375. }
  1376. static void free_gcr3_tbl_level1(u64 *tbl)
  1377. {
  1378. u64 *ptr;
  1379. int i;
  1380. for (i = 0; i < 512; ++i) {
  1381. if (!(tbl[i] & GCR3_VALID))
  1382. continue;
  1383. ptr = __va(tbl[i] & PAGE_MASK);
  1384. free_page((unsigned long)ptr);
  1385. }
  1386. }
  1387. static void free_gcr3_tbl_level2(u64 *tbl)
  1388. {
  1389. u64 *ptr;
  1390. int i;
  1391. for (i = 0; i < 512; ++i) {
  1392. if (!(tbl[i] & GCR3_VALID))
  1393. continue;
  1394. ptr = __va(tbl[i] & PAGE_MASK);
  1395. free_gcr3_tbl_level1(ptr);
  1396. }
  1397. }
  1398. static void free_gcr3_table(struct protection_domain *domain)
  1399. {
  1400. if (domain->glx == 2)
  1401. free_gcr3_tbl_level2(domain->gcr3_tbl);
  1402. else if (domain->glx == 1)
  1403. free_gcr3_tbl_level1(domain->gcr3_tbl);
  1404. else if (domain->glx != 0)
  1405. BUG();
  1406. free_page((unsigned long)domain->gcr3_tbl);
  1407. }
  1408. /*
  1409. * Free a domain, only used if something went wrong in the
  1410. * allocation path and we need to free an already allocated page table
  1411. */
  1412. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  1413. {
  1414. int i;
  1415. if (!dom)
  1416. return;
  1417. del_domain_from_list(&dom->domain);
  1418. free_pagetable(&dom->domain);
  1419. for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
  1420. if (!dom->aperture[i])
  1421. continue;
  1422. free_page((unsigned long)dom->aperture[i]->bitmap);
  1423. kfree(dom->aperture[i]);
  1424. }
  1425. kfree(dom);
  1426. }
  1427. /*
  1428. * Allocates a new protection domain usable for the dma_ops functions.
  1429. * It also initializes the page table and the address allocator data
  1430. * structures required for the dma_ops interface
  1431. */
  1432. static struct dma_ops_domain *dma_ops_domain_alloc(void)
  1433. {
  1434. struct dma_ops_domain *dma_dom;
  1435. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  1436. if (!dma_dom)
  1437. return NULL;
  1438. spin_lock_init(&dma_dom->domain.lock);
  1439. dma_dom->domain.id = domain_id_alloc();
  1440. if (dma_dom->domain.id == 0)
  1441. goto free_dma_dom;
  1442. INIT_LIST_HEAD(&dma_dom->domain.dev_list);
  1443. dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
  1444. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  1445. dma_dom->domain.flags = PD_DMA_OPS_MASK;
  1446. dma_dom->domain.priv = dma_dom;
  1447. if (!dma_dom->domain.pt_root)
  1448. goto free_dma_dom;
  1449. dma_dom->need_flush = false;
  1450. dma_dom->target_dev = 0xffff;
  1451. add_domain_to_list(&dma_dom->domain);
  1452. if (alloc_new_range(dma_dom, true, GFP_KERNEL))
  1453. goto free_dma_dom;
  1454. /*
  1455. * mark the first page as allocated so we never return 0 as
  1456. * a valid dma-address. So we can use 0 as error value
  1457. */
  1458. dma_dom->aperture[0]->bitmap[0] = 1;
  1459. dma_dom->next_address = 0;
  1460. return dma_dom;
  1461. free_dma_dom:
  1462. dma_ops_domain_free(dma_dom);
  1463. return NULL;
  1464. }
  1465. /*
  1466. * little helper function to check whether a given protection domain is a
  1467. * dma_ops domain
  1468. */
  1469. static bool dma_ops_domain(struct protection_domain *domain)
  1470. {
  1471. return domain->flags & PD_DMA_OPS_MASK;
  1472. }
  1473. static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
  1474. {
  1475. u64 pte_root = 0;
  1476. u64 flags = 0;
  1477. if (domain->mode != PAGE_MODE_NONE)
  1478. pte_root = virt_to_phys(domain->pt_root);
  1479. pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  1480. << DEV_ENTRY_MODE_SHIFT;
  1481. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
  1482. flags = amd_iommu_dev_table[devid].data[1];
  1483. if (ats)
  1484. flags |= DTE_FLAG_IOTLB;
  1485. if (domain->flags & PD_IOMMUV2_MASK) {
  1486. u64 gcr3 = __pa(domain->gcr3_tbl);
  1487. u64 glx = domain->glx;
  1488. u64 tmp;
  1489. pte_root |= DTE_FLAG_GV;
  1490. pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
  1491. /* First mask out possible old values for GCR3 table */
  1492. tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
  1493. flags &= ~tmp;
  1494. tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
  1495. flags &= ~tmp;
  1496. /* Encode GCR3 table into DTE */
  1497. tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
  1498. pte_root |= tmp;
  1499. tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
  1500. flags |= tmp;
  1501. tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
  1502. flags |= tmp;
  1503. }
  1504. flags &= ~(0xffffUL);
  1505. flags |= domain->id;
  1506. amd_iommu_dev_table[devid].data[1] = flags;
  1507. amd_iommu_dev_table[devid].data[0] = pte_root;
  1508. }
  1509. static void clear_dte_entry(u16 devid)
  1510. {
  1511. /* remove entry from the device table seen by the hardware */
  1512. amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
  1513. amd_iommu_dev_table[devid].data[1] = 0;
  1514. amd_iommu_apply_erratum_63(devid);
  1515. }
  1516. static void do_attach(struct iommu_dev_data *dev_data,
  1517. struct protection_domain *domain)
  1518. {
  1519. struct amd_iommu *iommu;
  1520. bool ats;
  1521. iommu = amd_iommu_rlookup_table[dev_data->devid];
  1522. ats = dev_data->ats.enabled;
  1523. /* Update data structures */
  1524. dev_data->domain = domain;
  1525. list_add(&dev_data->list, &domain->dev_list);
  1526. set_dte_entry(dev_data->devid, domain, ats);
  1527. /* Do reference counting */
  1528. domain->dev_iommu[iommu->index] += 1;
  1529. domain->dev_cnt += 1;
  1530. /* Flush the DTE entry */
  1531. device_flush_dte(dev_data);
  1532. }
  1533. static void do_detach(struct iommu_dev_data *dev_data)
  1534. {
  1535. struct amd_iommu *iommu;
  1536. iommu = amd_iommu_rlookup_table[dev_data->devid];
  1537. /* decrease reference counters */
  1538. dev_data->domain->dev_iommu[iommu->index] -= 1;
  1539. dev_data->domain->dev_cnt -= 1;
  1540. /* Update data structures */
  1541. dev_data->domain = NULL;
  1542. list_del(&dev_data->list);
  1543. clear_dte_entry(dev_data->devid);
  1544. /* Flush the DTE entry */
  1545. device_flush_dte(dev_data);
  1546. }
  1547. /*
  1548. * If a device is not yet associated with a domain, this function does
  1549. * assigns it visible for the hardware
  1550. */
  1551. static int __attach_device(struct iommu_dev_data *dev_data,
  1552. struct protection_domain *domain)
  1553. {
  1554. int ret;
  1555. /* lock domain */
  1556. spin_lock(&domain->lock);
  1557. if (dev_data->alias_data != NULL) {
  1558. struct iommu_dev_data *alias_data = dev_data->alias_data;
  1559. /* Some sanity checks */
  1560. ret = -EBUSY;
  1561. if (alias_data->domain != NULL &&
  1562. alias_data->domain != domain)
  1563. goto out_unlock;
  1564. if (dev_data->domain != NULL &&
  1565. dev_data->domain != domain)
  1566. goto out_unlock;
  1567. /* Do real assignment */
  1568. if (alias_data->domain == NULL)
  1569. do_attach(alias_data, domain);
  1570. atomic_inc(&alias_data->bind);
  1571. }
  1572. if (dev_data->domain == NULL)
  1573. do_attach(dev_data, domain);
  1574. atomic_inc(&dev_data->bind);
  1575. ret = 0;
  1576. out_unlock:
  1577. /* ready */
  1578. spin_unlock(&domain->lock);
  1579. return ret;
  1580. }
  1581. static void pdev_iommuv2_disable(struct pci_dev *pdev)
  1582. {
  1583. pci_disable_ats(pdev);
  1584. pci_disable_pri(pdev);
  1585. pci_disable_pasid(pdev);
  1586. }
  1587. /* FIXME: Change generic reset-function to do the same */
  1588. static int pri_reset_while_enabled(struct pci_dev *pdev)
  1589. {
  1590. u16 control;
  1591. int pos;
  1592. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  1593. if (!pos)
  1594. return -EINVAL;
  1595. pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
  1596. control |= PCI_PRI_CTRL_RESET;
  1597. pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
  1598. return 0;
  1599. }
  1600. static int pdev_iommuv2_enable(struct pci_dev *pdev)
  1601. {
  1602. bool reset_enable;
  1603. int reqs, ret;
  1604. /* FIXME: Hardcode number of outstanding requests for now */
  1605. reqs = 32;
  1606. if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
  1607. reqs = 1;
  1608. reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
  1609. /* Only allow access to user-accessible pages */
  1610. ret = pci_enable_pasid(pdev, 0);
  1611. if (ret)
  1612. goto out_err;
  1613. /* First reset the PRI state of the device */
  1614. ret = pci_reset_pri(pdev);
  1615. if (ret)
  1616. goto out_err;
  1617. /* Enable PRI */
  1618. ret = pci_enable_pri(pdev, reqs);
  1619. if (ret)
  1620. goto out_err;
  1621. if (reset_enable) {
  1622. ret = pri_reset_while_enabled(pdev);
  1623. if (ret)
  1624. goto out_err;
  1625. }
  1626. ret = pci_enable_ats(pdev, PAGE_SHIFT);
  1627. if (ret)
  1628. goto out_err;
  1629. return 0;
  1630. out_err:
  1631. pci_disable_pri(pdev);
  1632. pci_disable_pasid(pdev);
  1633. return ret;
  1634. }
  1635. /* FIXME: Move this to PCI code */
  1636. #define PCI_PRI_TLP_OFF (1 << 15)
  1637. bool pci_pri_tlp_required(struct pci_dev *pdev)
  1638. {
  1639. u16 status;
  1640. int pos;
  1641. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  1642. if (!pos)
  1643. return false;
  1644. pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
  1645. return (status & PCI_PRI_TLP_OFF) ? true : false;
  1646. }
  1647. /*
  1648. * If a device is not yet associated with a domain, this function does
  1649. * assigns it visible for the hardware
  1650. */
  1651. static int attach_device(struct device *dev,
  1652. struct protection_domain *domain)
  1653. {
  1654. struct pci_dev *pdev = to_pci_dev(dev);
  1655. struct iommu_dev_data *dev_data;
  1656. unsigned long flags;
  1657. int ret;
  1658. dev_data = get_dev_data(dev);
  1659. if (domain->flags & PD_IOMMUV2_MASK) {
  1660. if (!dev_data->iommu_v2 || !dev_data->passthrough)
  1661. return -EINVAL;
  1662. if (pdev_iommuv2_enable(pdev) != 0)
  1663. return -EINVAL;
  1664. dev_data->ats.enabled = true;
  1665. dev_data->ats.qdep = pci_ats_queue_depth(pdev);
  1666. dev_data->pri_tlp = pci_pri_tlp_required(pdev);
  1667. } else if (amd_iommu_iotlb_sup &&
  1668. pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
  1669. dev_data->ats.enabled = true;
  1670. dev_data->ats.qdep = pci_ats_queue_depth(pdev);
  1671. }
  1672. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1673. ret = __attach_device(dev_data, domain);
  1674. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1675. /*
  1676. * We might boot into a crash-kernel here. The crashed kernel
  1677. * left the caches in the IOMMU dirty. So we have to flush
  1678. * here to evict all dirty stuff.
  1679. */
  1680. domain_flush_tlb_pde(domain);
  1681. return ret;
  1682. }
  1683. /*
  1684. * Removes a device from a protection domain (unlocked)
  1685. */
  1686. static void __detach_device(struct iommu_dev_data *dev_data)
  1687. {
  1688. struct protection_domain *domain;
  1689. unsigned long flags;
  1690. BUG_ON(!dev_data->domain);
  1691. domain = dev_data->domain;
  1692. spin_lock_irqsave(&domain->lock, flags);
  1693. if (dev_data->alias_data != NULL) {
  1694. struct iommu_dev_data *alias_data = dev_data->alias_data;
  1695. if (atomic_dec_and_test(&alias_data->bind))
  1696. do_detach(alias_data);
  1697. }
  1698. if (atomic_dec_and_test(&dev_data->bind))
  1699. do_detach(dev_data);
  1700. spin_unlock_irqrestore(&domain->lock, flags);
  1701. /*
  1702. * If we run in passthrough mode the device must be assigned to the
  1703. * passthrough domain if it is detached from any other domain.
  1704. * Make sure we can deassign from the pt_domain itself.
  1705. */
  1706. if (dev_data->passthrough &&
  1707. (dev_data->domain == NULL && domain != pt_domain))
  1708. __attach_device(dev_data, pt_domain);
  1709. }
  1710. /*
  1711. * Removes a device from a protection domain (with devtable_lock held)
  1712. */
  1713. static void detach_device(struct device *dev)
  1714. {
  1715. struct protection_domain *domain;
  1716. struct iommu_dev_data *dev_data;
  1717. unsigned long flags;
  1718. dev_data = get_dev_data(dev);
  1719. domain = dev_data->domain;
  1720. /* lock device table */
  1721. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1722. __detach_device(dev_data);
  1723. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1724. if (domain->flags & PD_IOMMUV2_MASK)
  1725. pdev_iommuv2_disable(to_pci_dev(dev));
  1726. else if (dev_data->ats.enabled)
  1727. pci_disable_ats(to_pci_dev(dev));
  1728. dev_data->ats.enabled = false;
  1729. }
  1730. /*
  1731. * Find out the protection domain structure for a given PCI device. This
  1732. * will give us the pointer to the page table root for example.
  1733. */
  1734. static struct protection_domain *domain_for_device(struct device *dev)
  1735. {
  1736. struct iommu_dev_data *dev_data;
  1737. struct protection_domain *dom = NULL;
  1738. unsigned long flags;
  1739. dev_data = get_dev_data(dev);
  1740. if (dev_data->domain)
  1741. return dev_data->domain;
  1742. if (dev_data->alias_data != NULL) {
  1743. struct iommu_dev_data *alias_data = dev_data->alias_data;
  1744. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1745. if (alias_data->domain != NULL) {
  1746. __attach_device(dev_data, alias_data->domain);
  1747. dom = alias_data->domain;
  1748. }
  1749. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1750. }
  1751. return dom;
  1752. }
  1753. static int device_change_notifier(struct notifier_block *nb,
  1754. unsigned long action, void *data)
  1755. {
  1756. struct dma_ops_domain *dma_domain;
  1757. struct protection_domain *domain;
  1758. struct iommu_dev_data *dev_data;
  1759. struct device *dev = data;
  1760. struct amd_iommu *iommu;
  1761. unsigned long flags;
  1762. u16 devid;
  1763. if (!check_device(dev))
  1764. return 0;
  1765. devid = get_device_id(dev);
  1766. iommu = amd_iommu_rlookup_table[devid];
  1767. dev_data = get_dev_data(dev);
  1768. switch (action) {
  1769. case BUS_NOTIFY_UNBOUND_DRIVER:
  1770. domain = domain_for_device(dev);
  1771. if (!domain)
  1772. goto out;
  1773. if (dev_data->passthrough)
  1774. break;
  1775. detach_device(dev);
  1776. break;
  1777. case BUS_NOTIFY_ADD_DEVICE:
  1778. iommu_init_device(dev);
  1779. domain = domain_for_device(dev);
  1780. /* allocate a protection domain if a device is added */
  1781. dma_domain = find_protection_domain(devid);
  1782. if (dma_domain)
  1783. goto out;
  1784. dma_domain = dma_ops_domain_alloc();
  1785. if (!dma_domain)
  1786. goto out;
  1787. dma_domain->target_dev = devid;
  1788. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  1789. list_add_tail(&dma_domain->list, &iommu_pd_list);
  1790. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  1791. break;
  1792. case BUS_NOTIFY_DEL_DEVICE:
  1793. iommu_uninit_device(dev);
  1794. default:
  1795. goto out;
  1796. }
  1797. iommu_completion_wait(iommu);
  1798. out:
  1799. return 0;
  1800. }
  1801. static struct notifier_block device_nb = {
  1802. .notifier_call = device_change_notifier,
  1803. };
  1804. void amd_iommu_init_notifier(void)
  1805. {
  1806. bus_register_notifier(&pci_bus_type, &device_nb);
  1807. }
  1808. /*****************************************************************************
  1809. *
  1810. * The next functions belong to the dma_ops mapping/unmapping code.
  1811. *
  1812. *****************************************************************************/
  1813. /*
  1814. * In the dma_ops path we only have the struct device. This function
  1815. * finds the corresponding IOMMU, the protection domain and the
  1816. * requestor id for a given device.
  1817. * If the device is not yet associated with a domain this is also done
  1818. * in this function.
  1819. */
  1820. static struct protection_domain *get_domain(struct device *dev)
  1821. {
  1822. struct protection_domain *domain;
  1823. struct dma_ops_domain *dma_dom;
  1824. u16 devid = get_device_id(dev);
  1825. if (!check_device(dev))
  1826. return ERR_PTR(-EINVAL);
  1827. domain = domain_for_device(dev);
  1828. if (domain != NULL && !dma_ops_domain(domain))
  1829. return ERR_PTR(-EBUSY);
  1830. if (domain != NULL)
  1831. return domain;
  1832. /* Device not bount yet - bind it */
  1833. dma_dom = find_protection_domain(devid);
  1834. if (!dma_dom)
  1835. dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
  1836. attach_device(dev, &dma_dom->domain);
  1837. DUMP_printk("Using protection domain %d for device %s\n",
  1838. dma_dom->domain.id, dev_name(dev));
  1839. return &dma_dom->domain;
  1840. }
  1841. static void update_device_table(struct protection_domain *domain)
  1842. {
  1843. struct iommu_dev_data *dev_data;
  1844. list_for_each_entry(dev_data, &domain->dev_list, list)
  1845. set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
  1846. }
  1847. static void update_domain(struct protection_domain *domain)
  1848. {
  1849. if (!domain->updated)
  1850. return;
  1851. update_device_table(domain);
  1852. domain_flush_devices(domain);
  1853. domain_flush_tlb_pde(domain);
  1854. domain->updated = false;
  1855. }
  1856. /*
  1857. * This function fetches the PTE for a given address in the aperture
  1858. */
  1859. static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  1860. unsigned long address)
  1861. {
  1862. struct aperture_range *aperture;
  1863. u64 *pte, *pte_page;
  1864. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1865. if (!aperture)
  1866. return NULL;
  1867. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1868. if (!pte) {
  1869. pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
  1870. GFP_ATOMIC);
  1871. aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
  1872. } else
  1873. pte += PM_LEVEL_INDEX(0, address);
  1874. update_domain(&dom->domain);
  1875. return pte;
  1876. }
  1877. /*
  1878. * This is the generic map function. It maps one 4kb page at paddr to
  1879. * the given address in the DMA address space for the domain.
  1880. */
  1881. static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
  1882. unsigned long address,
  1883. phys_addr_t paddr,
  1884. int direction)
  1885. {
  1886. u64 *pte, __pte;
  1887. WARN_ON(address > dom->aperture_size);
  1888. paddr &= PAGE_MASK;
  1889. pte = dma_ops_get_pte(dom, address);
  1890. if (!pte)
  1891. return DMA_ERROR_CODE;
  1892. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  1893. if (direction == DMA_TO_DEVICE)
  1894. __pte |= IOMMU_PTE_IR;
  1895. else if (direction == DMA_FROM_DEVICE)
  1896. __pte |= IOMMU_PTE_IW;
  1897. else if (direction == DMA_BIDIRECTIONAL)
  1898. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  1899. WARN_ON(*pte);
  1900. *pte = __pte;
  1901. return (dma_addr_t)address;
  1902. }
  1903. /*
  1904. * The generic unmapping function for on page in the DMA address space.
  1905. */
  1906. static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
  1907. unsigned long address)
  1908. {
  1909. struct aperture_range *aperture;
  1910. u64 *pte;
  1911. if (address >= dom->aperture_size)
  1912. return;
  1913. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1914. if (!aperture)
  1915. return;
  1916. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1917. if (!pte)
  1918. return;
  1919. pte += PM_LEVEL_INDEX(0, address);
  1920. WARN_ON(!*pte);
  1921. *pte = 0ULL;
  1922. }
  1923. /*
  1924. * This function contains common code for mapping of a physically
  1925. * contiguous memory region into DMA address space. It is used by all
  1926. * mapping functions provided with this IOMMU driver.
  1927. * Must be called with the domain lock held.
  1928. */
  1929. static dma_addr_t __map_single(struct device *dev,
  1930. struct dma_ops_domain *dma_dom,
  1931. phys_addr_t paddr,
  1932. size_t size,
  1933. int dir,
  1934. bool align,
  1935. u64 dma_mask)
  1936. {
  1937. dma_addr_t offset = paddr & ~PAGE_MASK;
  1938. dma_addr_t address, start, ret;
  1939. unsigned int pages;
  1940. unsigned long align_mask = 0;
  1941. int i;
  1942. pages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1943. paddr &= PAGE_MASK;
  1944. INC_STATS_COUNTER(total_map_requests);
  1945. if (pages > 1)
  1946. INC_STATS_COUNTER(cross_page);
  1947. if (align)
  1948. align_mask = (1UL << get_order(size)) - 1;
  1949. retry:
  1950. address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
  1951. dma_mask);
  1952. if (unlikely(address == DMA_ERROR_CODE)) {
  1953. /*
  1954. * setting next_address here will let the address
  1955. * allocator only scan the new allocated range in the
  1956. * first run. This is a small optimization.
  1957. */
  1958. dma_dom->next_address = dma_dom->aperture_size;
  1959. if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
  1960. goto out;
  1961. /*
  1962. * aperture was successfully enlarged by 128 MB, try
  1963. * allocation again
  1964. */
  1965. goto retry;
  1966. }
  1967. start = address;
  1968. for (i = 0; i < pages; ++i) {
  1969. ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
  1970. if (ret == DMA_ERROR_CODE)
  1971. goto out_unmap;
  1972. paddr += PAGE_SIZE;
  1973. start += PAGE_SIZE;
  1974. }
  1975. address += offset;
  1976. ADD_STATS_COUNTER(alloced_io_mem, size);
  1977. if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
  1978. domain_flush_tlb(&dma_dom->domain);
  1979. dma_dom->need_flush = false;
  1980. } else if (unlikely(amd_iommu_np_cache))
  1981. domain_flush_pages(&dma_dom->domain, address, size);
  1982. out:
  1983. return address;
  1984. out_unmap:
  1985. for (--i; i >= 0; --i) {
  1986. start -= PAGE_SIZE;
  1987. dma_ops_domain_unmap(dma_dom, start);
  1988. }
  1989. dma_ops_free_addresses(dma_dom, address, pages);
  1990. return DMA_ERROR_CODE;
  1991. }
  1992. /*
  1993. * Does the reverse of the __map_single function. Must be called with
  1994. * the domain lock held too
  1995. */
  1996. static void __unmap_single(struct dma_ops_domain *dma_dom,
  1997. dma_addr_t dma_addr,
  1998. size_t size,
  1999. int dir)
  2000. {
  2001. dma_addr_t flush_addr;
  2002. dma_addr_t i, start;
  2003. unsigned int pages;
  2004. if ((dma_addr == DMA_ERROR_CODE) ||
  2005. (dma_addr + size > dma_dom->aperture_size))
  2006. return;
  2007. flush_addr = dma_addr;
  2008. pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  2009. dma_addr &= PAGE_MASK;
  2010. start = dma_addr;
  2011. for (i = 0; i < pages; ++i) {
  2012. dma_ops_domain_unmap(dma_dom, start);
  2013. start += PAGE_SIZE;
  2014. }
  2015. SUB_STATS_COUNTER(alloced_io_mem, size);
  2016. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  2017. if (amd_iommu_unmap_flush || dma_dom->need_flush) {
  2018. domain_flush_pages(&dma_dom->domain, flush_addr, size);
  2019. dma_dom->need_flush = false;
  2020. }
  2021. }
  2022. /*
  2023. * The exported map_single function for dma_ops.
  2024. */
  2025. static dma_addr_t map_page(struct device *dev, struct page *page,
  2026. unsigned long offset, size_t size,
  2027. enum dma_data_direction dir,
  2028. struct dma_attrs *attrs)
  2029. {
  2030. unsigned long flags;
  2031. struct protection_domain *domain;
  2032. dma_addr_t addr;
  2033. u64 dma_mask;
  2034. phys_addr_t paddr = page_to_phys(page) + offset;
  2035. INC_STATS_COUNTER(cnt_map_single);
  2036. domain = get_domain(dev);
  2037. if (PTR_ERR(domain) == -EINVAL)
  2038. return (dma_addr_t)paddr;
  2039. else if (IS_ERR(domain))
  2040. return DMA_ERROR_CODE;
  2041. dma_mask = *dev->dma_mask;
  2042. spin_lock_irqsave(&domain->lock, flags);
  2043. addr = __map_single(dev, domain->priv, paddr, size, dir, false,
  2044. dma_mask);
  2045. if (addr == DMA_ERROR_CODE)
  2046. goto out;
  2047. domain_flush_complete(domain);
  2048. out:
  2049. spin_unlock_irqrestore(&domain->lock, flags);
  2050. return addr;
  2051. }
  2052. /*
  2053. * The exported unmap_single function for dma_ops.
  2054. */
  2055. static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  2056. enum dma_data_direction dir, struct dma_attrs *attrs)
  2057. {
  2058. unsigned long flags;
  2059. struct protection_domain *domain;
  2060. INC_STATS_COUNTER(cnt_unmap_single);
  2061. domain = get_domain(dev);
  2062. if (IS_ERR(domain))
  2063. return;
  2064. spin_lock_irqsave(&domain->lock, flags);
  2065. __unmap_single(domain->priv, dma_addr, size, dir);
  2066. domain_flush_complete(domain);
  2067. spin_unlock_irqrestore(&domain->lock, flags);
  2068. }
  2069. /*
  2070. * This is a special map_sg function which is used if we should map a
  2071. * device which is not handled by an AMD IOMMU in the system.
  2072. */
  2073. static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  2074. int nelems, int dir)
  2075. {
  2076. struct scatterlist *s;
  2077. int i;
  2078. for_each_sg(sglist, s, nelems, i) {
  2079. s->dma_address = (dma_addr_t)sg_phys(s);
  2080. s->dma_length = s->length;
  2081. }
  2082. return nelems;
  2083. }
  2084. /*
  2085. * The exported map_sg function for dma_ops (handles scatter-gather
  2086. * lists).
  2087. */
  2088. static int map_sg(struct device *dev, struct scatterlist *sglist,
  2089. int nelems, enum dma_data_direction dir,
  2090. struct dma_attrs *attrs)
  2091. {
  2092. unsigned long flags;
  2093. struct protection_domain *domain;
  2094. int i;
  2095. struct scatterlist *s;
  2096. phys_addr_t paddr;
  2097. int mapped_elems = 0;
  2098. u64 dma_mask;
  2099. INC_STATS_COUNTER(cnt_map_sg);
  2100. domain = get_domain(dev);
  2101. if (PTR_ERR(domain) == -EINVAL)
  2102. return map_sg_no_iommu(dev, sglist, nelems, dir);
  2103. else if (IS_ERR(domain))
  2104. return 0;
  2105. dma_mask = *dev->dma_mask;
  2106. spin_lock_irqsave(&domain->lock, flags);
  2107. for_each_sg(sglist, s, nelems, i) {
  2108. paddr = sg_phys(s);
  2109. s->dma_address = __map_single(dev, domain->priv,
  2110. paddr, s->length, dir, false,
  2111. dma_mask);
  2112. if (s->dma_address) {
  2113. s->dma_length = s->length;
  2114. mapped_elems++;
  2115. } else
  2116. goto unmap;
  2117. }
  2118. domain_flush_complete(domain);
  2119. out:
  2120. spin_unlock_irqrestore(&domain->lock, flags);
  2121. return mapped_elems;
  2122. unmap:
  2123. for_each_sg(sglist, s, mapped_elems, i) {
  2124. if (s->dma_address)
  2125. __unmap_single(domain->priv, s->dma_address,
  2126. s->dma_length, dir);
  2127. s->dma_address = s->dma_length = 0;
  2128. }
  2129. mapped_elems = 0;
  2130. goto out;
  2131. }
  2132. /*
  2133. * The exported map_sg function for dma_ops (handles scatter-gather
  2134. * lists).
  2135. */
  2136. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  2137. int nelems, enum dma_data_direction dir,
  2138. struct dma_attrs *attrs)
  2139. {
  2140. unsigned long flags;
  2141. struct protection_domain *domain;
  2142. struct scatterlist *s;
  2143. int i;
  2144. INC_STATS_COUNTER(cnt_unmap_sg);
  2145. domain = get_domain(dev);
  2146. if (IS_ERR(domain))
  2147. return;
  2148. spin_lock_irqsave(&domain->lock, flags);
  2149. for_each_sg(sglist, s, nelems, i) {
  2150. __unmap_single(domain->priv, s->dma_address,
  2151. s->dma_length, dir);
  2152. s->dma_address = s->dma_length = 0;
  2153. }
  2154. domain_flush_complete(domain);
  2155. spin_unlock_irqrestore(&domain->lock, flags);
  2156. }
  2157. /*
  2158. * The exported alloc_coherent function for dma_ops.
  2159. */
  2160. static void *alloc_coherent(struct device *dev, size_t size,
  2161. dma_addr_t *dma_addr, gfp_t flag,
  2162. struct dma_attrs *attrs)
  2163. {
  2164. unsigned long flags;
  2165. void *virt_addr;
  2166. struct protection_domain *domain;
  2167. phys_addr_t paddr;
  2168. u64 dma_mask = dev->coherent_dma_mask;
  2169. INC_STATS_COUNTER(cnt_alloc_coherent);
  2170. domain = get_domain(dev);
  2171. if (PTR_ERR(domain) == -EINVAL) {
  2172. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  2173. *dma_addr = __pa(virt_addr);
  2174. return virt_addr;
  2175. } else if (IS_ERR(domain))
  2176. return NULL;
  2177. dma_mask = dev->coherent_dma_mask;
  2178. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  2179. flag |= __GFP_ZERO;
  2180. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  2181. if (!virt_addr)
  2182. return NULL;
  2183. paddr = virt_to_phys(virt_addr);
  2184. if (!dma_mask)
  2185. dma_mask = *dev->dma_mask;
  2186. spin_lock_irqsave(&domain->lock, flags);
  2187. *dma_addr = __map_single(dev, domain->priv, paddr,
  2188. size, DMA_BIDIRECTIONAL, true, dma_mask);
  2189. if (*dma_addr == DMA_ERROR_CODE) {
  2190. spin_unlock_irqrestore(&domain->lock, flags);
  2191. goto out_free;
  2192. }
  2193. domain_flush_complete(domain);
  2194. spin_unlock_irqrestore(&domain->lock, flags);
  2195. return virt_addr;
  2196. out_free:
  2197. free_pages((unsigned long)virt_addr, get_order(size));
  2198. return NULL;
  2199. }
  2200. /*
  2201. * The exported free_coherent function for dma_ops.
  2202. */
  2203. static void free_coherent(struct device *dev, size_t size,
  2204. void *virt_addr, dma_addr_t dma_addr,
  2205. struct dma_attrs *attrs)
  2206. {
  2207. unsigned long flags;
  2208. struct protection_domain *domain;
  2209. INC_STATS_COUNTER(cnt_free_coherent);
  2210. domain = get_domain(dev);
  2211. if (IS_ERR(domain))
  2212. goto free_mem;
  2213. spin_lock_irqsave(&domain->lock, flags);
  2214. __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  2215. domain_flush_complete(domain);
  2216. spin_unlock_irqrestore(&domain->lock, flags);
  2217. free_mem:
  2218. free_pages((unsigned long)virt_addr, get_order(size));
  2219. }
  2220. /*
  2221. * This function is called by the DMA layer to find out if we can handle a
  2222. * particular device. It is part of the dma_ops.
  2223. */
  2224. static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  2225. {
  2226. return check_device(dev);
  2227. }
  2228. /*
  2229. * The function for pre-allocating protection domains.
  2230. *
  2231. * If the driver core informs the DMA layer if a driver grabs a device
  2232. * we don't need to preallocate the protection domains anymore.
  2233. * For now we have to.
  2234. */
  2235. static void __init prealloc_protection_domains(void)
  2236. {
  2237. struct iommu_dev_data *dev_data;
  2238. struct dma_ops_domain *dma_dom;
  2239. struct pci_dev *dev = NULL;
  2240. u16 devid;
  2241. for_each_pci_dev(dev) {
  2242. /* Do we handle this device? */
  2243. if (!check_device(&dev->dev))
  2244. continue;
  2245. dev_data = get_dev_data(&dev->dev);
  2246. if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
  2247. /* Make sure passthrough domain is allocated */
  2248. alloc_passthrough_domain();
  2249. dev_data->passthrough = true;
  2250. attach_device(&dev->dev, pt_domain);
  2251. pr_info("AMD-Vi: Using passthough domain for device %s\n",
  2252. dev_name(&dev->dev));
  2253. }
  2254. /* Is there already any domain for it? */
  2255. if (domain_for_device(&dev->dev))
  2256. continue;
  2257. devid = get_device_id(&dev->dev);
  2258. dma_dom = dma_ops_domain_alloc();
  2259. if (!dma_dom)
  2260. continue;
  2261. init_unity_mappings_for_device(dma_dom, devid);
  2262. dma_dom->target_dev = devid;
  2263. attach_device(&dev->dev, &dma_dom->domain);
  2264. list_add_tail(&dma_dom->list, &iommu_pd_list);
  2265. }
  2266. }
  2267. static struct dma_map_ops amd_iommu_dma_ops = {
  2268. .alloc = alloc_coherent,
  2269. .free = free_coherent,
  2270. .map_page = map_page,
  2271. .unmap_page = unmap_page,
  2272. .map_sg = map_sg,
  2273. .unmap_sg = unmap_sg,
  2274. .dma_supported = amd_iommu_dma_supported,
  2275. };
  2276. static unsigned device_dma_ops_init(void)
  2277. {
  2278. struct iommu_dev_data *dev_data;
  2279. struct pci_dev *pdev = NULL;
  2280. unsigned unhandled = 0;
  2281. for_each_pci_dev(pdev) {
  2282. if (!check_device(&pdev->dev)) {
  2283. iommu_ignore_device(&pdev->dev);
  2284. unhandled += 1;
  2285. continue;
  2286. }
  2287. dev_data = get_dev_data(&pdev->dev);
  2288. if (!dev_data->passthrough)
  2289. pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
  2290. else
  2291. pdev->dev.archdata.dma_ops = &nommu_dma_ops;
  2292. }
  2293. return unhandled;
  2294. }
  2295. /*
  2296. * The function which clues the AMD IOMMU driver into dma_ops.
  2297. */
  2298. void __init amd_iommu_init_api(void)
  2299. {
  2300. bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
  2301. }
  2302. int __init amd_iommu_init_dma_ops(void)
  2303. {
  2304. struct amd_iommu *iommu;
  2305. int ret, unhandled;
  2306. /*
  2307. * first allocate a default protection domain for every IOMMU we
  2308. * found in the system. Devices not assigned to any other
  2309. * protection domain will be assigned to the default one.
  2310. */
  2311. for_each_iommu(iommu) {
  2312. iommu->default_dom = dma_ops_domain_alloc();
  2313. if (iommu->default_dom == NULL)
  2314. return -ENOMEM;
  2315. iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
  2316. ret = iommu_init_unity_mappings(iommu);
  2317. if (ret)
  2318. goto free_domains;
  2319. }
  2320. /*
  2321. * Pre-allocate the protection domains for each device.
  2322. */
  2323. prealloc_protection_domains();
  2324. iommu_detected = 1;
  2325. swiotlb = 0;
  2326. /* Make the driver finally visible to the drivers */
  2327. unhandled = device_dma_ops_init();
  2328. if (unhandled && max_pfn > MAX_DMA32_PFN) {
  2329. /* There are unhandled devices - initialize swiotlb for them */
  2330. swiotlb = 1;
  2331. }
  2332. amd_iommu_stats_init();
  2333. return 0;
  2334. free_domains:
  2335. for_each_iommu(iommu) {
  2336. if (iommu->default_dom)
  2337. dma_ops_domain_free(iommu->default_dom);
  2338. }
  2339. return ret;
  2340. }
  2341. /*****************************************************************************
  2342. *
  2343. * The following functions belong to the exported interface of AMD IOMMU
  2344. *
  2345. * This interface allows access to lower level functions of the IOMMU
  2346. * like protection domain handling and assignement of devices to domains
  2347. * which is not possible with the dma_ops interface.
  2348. *
  2349. *****************************************************************************/
  2350. static void cleanup_domain(struct protection_domain *domain)
  2351. {
  2352. struct iommu_dev_data *dev_data, *next;
  2353. unsigned long flags;
  2354. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  2355. list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
  2356. __detach_device(dev_data);
  2357. atomic_set(&dev_data->bind, 0);
  2358. }
  2359. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  2360. }
  2361. static void protection_domain_free(struct protection_domain *domain)
  2362. {
  2363. if (!domain)
  2364. return;
  2365. del_domain_from_list(domain);
  2366. if (domain->id)
  2367. domain_id_free(domain->id);
  2368. kfree(domain);
  2369. }
  2370. static struct protection_domain *protection_domain_alloc(void)
  2371. {
  2372. struct protection_domain *domain;
  2373. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  2374. if (!domain)
  2375. return NULL;
  2376. spin_lock_init(&domain->lock);
  2377. mutex_init(&domain->api_lock);
  2378. domain->id = domain_id_alloc();
  2379. if (!domain->id)
  2380. goto out_err;
  2381. INIT_LIST_HEAD(&domain->dev_list);
  2382. add_domain_to_list(domain);
  2383. return domain;
  2384. out_err:
  2385. kfree(domain);
  2386. return NULL;
  2387. }
  2388. static int __init alloc_passthrough_domain(void)
  2389. {
  2390. if (pt_domain != NULL)
  2391. return 0;
  2392. /* allocate passthrough domain */
  2393. pt_domain = protection_domain_alloc();
  2394. if (!pt_domain)
  2395. return -ENOMEM;
  2396. pt_domain->mode = PAGE_MODE_NONE;
  2397. return 0;
  2398. }
  2399. static int amd_iommu_domain_init(struct iommu_domain *dom)
  2400. {
  2401. struct protection_domain *domain;
  2402. domain = protection_domain_alloc();
  2403. if (!domain)
  2404. goto out_free;
  2405. domain->mode = PAGE_MODE_3_LEVEL;
  2406. domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  2407. if (!domain->pt_root)
  2408. goto out_free;
  2409. domain->iommu_domain = dom;
  2410. dom->priv = domain;
  2411. return 0;
  2412. out_free:
  2413. protection_domain_free(domain);
  2414. return -ENOMEM;
  2415. }
  2416. static void amd_iommu_domain_destroy(struct iommu_domain *dom)
  2417. {
  2418. struct protection_domain *domain = dom->priv;
  2419. if (!domain)
  2420. return;
  2421. if (domain->dev_cnt > 0)
  2422. cleanup_domain(domain);
  2423. BUG_ON(domain->dev_cnt != 0);
  2424. if (domain->mode != PAGE_MODE_NONE)
  2425. free_pagetable(domain);
  2426. if (domain->flags & PD_IOMMUV2_MASK)
  2427. free_gcr3_table(domain);
  2428. protection_domain_free(domain);
  2429. dom->priv = NULL;
  2430. }
  2431. static void amd_iommu_detach_device(struct iommu_domain *dom,
  2432. struct device *dev)
  2433. {
  2434. struct iommu_dev_data *dev_data = dev->archdata.iommu;
  2435. struct amd_iommu *iommu;
  2436. u16 devid;
  2437. if (!check_device(dev))
  2438. return;
  2439. devid = get_device_id(dev);
  2440. if (dev_data->domain != NULL)
  2441. detach_device(dev);
  2442. iommu = amd_iommu_rlookup_table[devid];
  2443. if (!iommu)
  2444. return;
  2445. iommu_completion_wait(iommu);
  2446. }
  2447. static int amd_iommu_attach_device(struct iommu_domain *dom,
  2448. struct device *dev)
  2449. {
  2450. struct protection_domain *domain = dom->priv;
  2451. struct iommu_dev_data *dev_data;
  2452. struct amd_iommu *iommu;
  2453. int ret;
  2454. if (!check_device(dev))
  2455. return -EINVAL;
  2456. dev_data = dev->archdata.iommu;
  2457. iommu = amd_iommu_rlookup_table[dev_data->devid];
  2458. if (!iommu)
  2459. return -EINVAL;
  2460. if (dev_data->domain)
  2461. detach_device(dev);
  2462. ret = attach_device(dev, domain);
  2463. iommu_completion_wait(iommu);
  2464. return ret;
  2465. }
  2466. static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
  2467. phys_addr_t paddr, size_t page_size, int iommu_prot)
  2468. {
  2469. struct protection_domain *domain = dom->priv;
  2470. int prot = 0;
  2471. int ret;
  2472. if (domain->mode == PAGE_MODE_NONE)
  2473. return -EINVAL;
  2474. if (iommu_prot & IOMMU_READ)
  2475. prot |= IOMMU_PROT_IR;
  2476. if (iommu_prot & IOMMU_WRITE)
  2477. prot |= IOMMU_PROT_IW;
  2478. mutex_lock(&domain->api_lock);
  2479. ret = iommu_map_page(domain, iova, paddr, prot, page_size);
  2480. mutex_unlock(&domain->api_lock);
  2481. return ret;
  2482. }
  2483. static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
  2484. size_t page_size)
  2485. {
  2486. struct protection_domain *domain = dom->priv;
  2487. size_t unmap_size;
  2488. if (domain->mode == PAGE_MODE_NONE)
  2489. return -EINVAL;
  2490. mutex_lock(&domain->api_lock);
  2491. unmap_size = iommu_unmap_page(domain, iova, page_size);
  2492. mutex_unlock(&domain->api_lock);
  2493. domain_flush_tlb_pde(domain);
  2494. return unmap_size;
  2495. }
  2496. static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
  2497. unsigned long iova)
  2498. {
  2499. struct protection_domain *domain = dom->priv;
  2500. unsigned long offset_mask;
  2501. phys_addr_t paddr;
  2502. u64 *pte, __pte;
  2503. if (domain->mode == PAGE_MODE_NONE)
  2504. return iova;
  2505. pte = fetch_pte(domain, iova);
  2506. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  2507. return 0;
  2508. if (PM_PTE_LEVEL(*pte) == 0)
  2509. offset_mask = PAGE_SIZE - 1;
  2510. else
  2511. offset_mask = PTE_PAGE_SIZE(*pte) - 1;
  2512. __pte = *pte & PM_ADDR_MASK;
  2513. paddr = (__pte & ~offset_mask) | (iova & offset_mask);
  2514. return paddr;
  2515. }
  2516. static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
  2517. unsigned long cap)
  2518. {
  2519. switch (cap) {
  2520. case IOMMU_CAP_CACHE_COHERENCY:
  2521. return 1;
  2522. }
  2523. return 0;
  2524. }
  2525. static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
  2526. {
  2527. struct iommu_dev_data *dev_data = dev->archdata.iommu;
  2528. struct pci_dev *pdev = to_pci_dev(dev);
  2529. u16 devid;
  2530. if (!dev_data)
  2531. return -ENODEV;
  2532. if (pdev->is_virtfn || !iommu_group_mf)
  2533. devid = dev_data->devid;
  2534. else
  2535. devid = calc_devid(pdev->bus->number,
  2536. PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
  2537. *groupid = amd_iommu_alias_table[devid];
  2538. return 0;
  2539. }
  2540. static struct iommu_ops amd_iommu_ops = {
  2541. .domain_init = amd_iommu_domain_init,
  2542. .domain_destroy = amd_iommu_domain_destroy,
  2543. .attach_dev = amd_iommu_attach_device,
  2544. .detach_dev = amd_iommu_detach_device,
  2545. .map = amd_iommu_map,
  2546. .unmap = amd_iommu_unmap,
  2547. .iova_to_phys = amd_iommu_iova_to_phys,
  2548. .domain_has_cap = amd_iommu_domain_has_cap,
  2549. .device_group = amd_iommu_device_group,
  2550. .pgsize_bitmap = AMD_IOMMU_PGSIZES,
  2551. };
  2552. /*****************************************************************************
  2553. *
  2554. * The next functions do a basic initialization of IOMMU for pass through
  2555. * mode
  2556. *
  2557. * In passthrough mode the IOMMU is initialized and enabled but not used for
  2558. * DMA-API translation.
  2559. *
  2560. *****************************************************************************/
  2561. int __init amd_iommu_init_passthrough(void)
  2562. {
  2563. struct iommu_dev_data *dev_data;
  2564. struct pci_dev *dev = NULL;
  2565. struct amd_iommu *iommu;
  2566. u16 devid;
  2567. int ret;
  2568. ret = alloc_passthrough_domain();
  2569. if (ret)
  2570. return ret;
  2571. for_each_pci_dev(dev) {
  2572. if (!check_device(&dev->dev))
  2573. continue;
  2574. dev_data = get_dev_data(&dev->dev);
  2575. dev_data->passthrough = true;
  2576. devid = get_device_id(&dev->dev);
  2577. iommu = amd_iommu_rlookup_table[devid];
  2578. if (!iommu)
  2579. continue;
  2580. attach_device(&dev->dev, pt_domain);
  2581. }
  2582. amd_iommu_stats_init();
  2583. pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
  2584. return 0;
  2585. }
  2586. /* IOMMUv2 specific functions */
  2587. int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
  2588. {
  2589. return atomic_notifier_chain_register(&ppr_notifier, nb);
  2590. }
  2591. EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
  2592. int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
  2593. {
  2594. return atomic_notifier_chain_unregister(&ppr_notifier, nb);
  2595. }
  2596. EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
  2597. void amd_iommu_domain_direct_map(struct iommu_domain *dom)
  2598. {
  2599. struct protection_domain *domain = dom->priv;
  2600. unsigned long flags;
  2601. spin_lock_irqsave(&domain->lock, flags);
  2602. /* Update data structure */
  2603. domain->mode = PAGE_MODE_NONE;
  2604. domain->updated = true;
  2605. /* Make changes visible to IOMMUs */
  2606. update_domain(domain);
  2607. /* Page-table is not visible to IOMMU anymore, so free it */
  2608. free_pagetable(domain);
  2609. spin_unlock_irqrestore(&domain->lock, flags);
  2610. }
  2611. EXPORT_SYMBOL(amd_iommu_domain_direct_map);
  2612. int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
  2613. {
  2614. struct protection_domain *domain = dom->priv;
  2615. unsigned long flags;
  2616. int levels, ret;
  2617. if (pasids <= 0 || pasids > (PASID_MASK + 1))
  2618. return -EINVAL;
  2619. /* Number of GCR3 table levels required */
  2620. for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
  2621. levels += 1;
  2622. if (levels > amd_iommu_max_glx_val)
  2623. return -EINVAL;
  2624. spin_lock_irqsave(&domain->lock, flags);
  2625. /*
  2626. * Save us all sanity checks whether devices already in the
  2627. * domain support IOMMUv2. Just force that the domain has no
  2628. * devices attached when it is switched into IOMMUv2 mode.
  2629. */
  2630. ret = -EBUSY;
  2631. if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
  2632. goto out;
  2633. ret = -ENOMEM;
  2634. domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
  2635. if (domain->gcr3_tbl == NULL)
  2636. goto out;
  2637. domain->glx = levels;
  2638. domain->flags |= PD_IOMMUV2_MASK;
  2639. domain->updated = true;
  2640. update_domain(domain);
  2641. ret = 0;
  2642. out:
  2643. spin_unlock_irqrestore(&domain->lock, flags);
  2644. return ret;
  2645. }
  2646. EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
  2647. static int __flush_pasid(struct protection_domain *domain, int pasid,
  2648. u64 address, bool size)
  2649. {
  2650. struct iommu_dev_data *dev_data;
  2651. struct iommu_cmd cmd;
  2652. int i, ret;
  2653. if (!(domain->flags & PD_IOMMUV2_MASK))
  2654. return -EINVAL;
  2655. build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
  2656. /*
  2657. * IOMMU TLB needs to be flushed before Device TLB to
  2658. * prevent device TLB refill from IOMMU TLB
  2659. */
  2660. for (i = 0; i < amd_iommus_present; ++i) {
  2661. if (domain->dev_iommu[i] == 0)
  2662. continue;
  2663. ret = iommu_queue_command(amd_iommus[i], &cmd);
  2664. if (ret != 0)
  2665. goto out;
  2666. }
  2667. /* Wait until IOMMU TLB flushes are complete */
  2668. domain_flush_complete(domain);
  2669. /* Now flush device TLBs */
  2670. list_for_each_entry(dev_data, &domain->dev_list, list) {
  2671. struct amd_iommu *iommu;
  2672. int qdep;
  2673. BUG_ON(!dev_data->ats.enabled);
  2674. qdep = dev_data->ats.qdep;
  2675. iommu = amd_iommu_rlookup_table[dev_data->devid];
  2676. build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
  2677. qdep, address, size);
  2678. ret = iommu_queue_command(iommu, &cmd);
  2679. if (ret != 0)
  2680. goto out;
  2681. }
  2682. /* Wait until all device TLBs are flushed */
  2683. domain_flush_complete(domain);
  2684. ret = 0;
  2685. out:
  2686. return ret;
  2687. }
  2688. static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
  2689. u64 address)
  2690. {
  2691. INC_STATS_COUNTER(invalidate_iotlb);
  2692. return __flush_pasid(domain, pasid, address, false);
  2693. }
  2694. int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
  2695. u64 address)
  2696. {
  2697. struct protection_domain *domain = dom->priv;
  2698. unsigned long flags;
  2699. int ret;
  2700. spin_lock_irqsave(&domain->lock, flags);
  2701. ret = __amd_iommu_flush_page(domain, pasid, address);
  2702. spin_unlock_irqrestore(&domain->lock, flags);
  2703. return ret;
  2704. }
  2705. EXPORT_SYMBOL(amd_iommu_flush_page);
  2706. static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
  2707. {
  2708. INC_STATS_COUNTER(invalidate_iotlb_all);
  2709. return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  2710. true);
  2711. }
  2712. int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
  2713. {
  2714. struct protection_domain *domain = dom->priv;
  2715. unsigned long flags;
  2716. int ret;
  2717. spin_lock_irqsave(&domain->lock, flags);
  2718. ret = __amd_iommu_flush_tlb(domain, pasid);
  2719. spin_unlock_irqrestore(&domain->lock, flags);
  2720. return ret;
  2721. }
  2722. EXPORT_SYMBOL(amd_iommu_flush_tlb);
  2723. static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
  2724. {
  2725. int index;
  2726. u64 *pte;
  2727. while (true) {
  2728. index = (pasid >> (9 * level)) & 0x1ff;
  2729. pte = &root[index];
  2730. if (level == 0)
  2731. break;
  2732. if (!(*pte & GCR3_VALID)) {
  2733. if (!alloc)
  2734. return NULL;
  2735. root = (void *)get_zeroed_page(GFP_ATOMIC);
  2736. if (root == NULL)
  2737. return NULL;
  2738. *pte = __pa(root) | GCR3_VALID;
  2739. }
  2740. root = __va(*pte & PAGE_MASK);
  2741. level -= 1;
  2742. }
  2743. return pte;
  2744. }
  2745. static int __set_gcr3(struct protection_domain *domain, int pasid,
  2746. unsigned long cr3)
  2747. {
  2748. u64 *pte;
  2749. if (domain->mode != PAGE_MODE_NONE)
  2750. return -EINVAL;
  2751. pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
  2752. if (pte == NULL)
  2753. return -ENOMEM;
  2754. *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
  2755. return __amd_iommu_flush_tlb(domain, pasid);
  2756. }
  2757. static int __clear_gcr3(struct protection_domain *domain, int pasid)
  2758. {
  2759. u64 *pte;
  2760. if (domain->mode != PAGE_MODE_NONE)
  2761. return -EINVAL;
  2762. pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
  2763. if (pte == NULL)
  2764. return 0;
  2765. *pte = 0;
  2766. return __amd_iommu_flush_tlb(domain, pasid);
  2767. }
  2768. int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
  2769. unsigned long cr3)
  2770. {
  2771. struct protection_domain *domain = dom->priv;
  2772. unsigned long flags;
  2773. int ret;
  2774. spin_lock_irqsave(&domain->lock, flags);
  2775. ret = __set_gcr3(domain, pasid, cr3);
  2776. spin_unlock_irqrestore(&domain->lock, flags);
  2777. return ret;
  2778. }
  2779. EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
  2780. int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
  2781. {
  2782. struct protection_domain *domain = dom->priv;
  2783. unsigned long flags;
  2784. int ret;
  2785. spin_lock_irqsave(&domain->lock, flags);
  2786. ret = __clear_gcr3(domain, pasid);
  2787. spin_unlock_irqrestore(&domain->lock, flags);
  2788. return ret;
  2789. }
  2790. EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
  2791. int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
  2792. int status, int tag)
  2793. {
  2794. struct iommu_dev_data *dev_data;
  2795. struct amd_iommu *iommu;
  2796. struct iommu_cmd cmd;
  2797. INC_STATS_COUNTER(complete_ppr);
  2798. dev_data = get_dev_data(&pdev->dev);
  2799. iommu = amd_iommu_rlookup_table[dev_data->devid];
  2800. build_complete_ppr(&cmd, dev_data->devid, pasid, status,
  2801. tag, dev_data->pri_tlp);
  2802. return iommu_queue_command(iommu, &cmd);
  2803. }
  2804. EXPORT_SYMBOL(amd_iommu_complete_ppr);
  2805. struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
  2806. {
  2807. struct protection_domain *domain;
  2808. domain = get_domain(&pdev->dev);
  2809. if (IS_ERR(domain))
  2810. return NULL;
  2811. /* Only return IOMMUv2 domains */
  2812. if (!(domain->flags & PD_IOMMUV2_MASK))
  2813. return NULL;
  2814. return domain->iommu_domain;
  2815. }
  2816. EXPORT_SYMBOL(amd_iommu_get_v2_domain);
  2817. void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
  2818. {
  2819. struct iommu_dev_data *dev_data;
  2820. if (!amd_iommu_v2_supported())
  2821. return;
  2822. dev_data = get_dev_data(&pdev->dev);
  2823. dev_data->errata |= (1 << erratum);
  2824. }
  2825. EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
  2826. int amd_iommu_device_info(struct pci_dev *pdev,
  2827. struct amd_iommu_device_info *info)
  2828. {
  2829. int max_pasids;
  2830. int pos;
  2831. if (pdev == NULL || info == NULL)
  2832. return -EINVAL;
  2833. if (!amd_iommu_v2_supported())
  2834. return -EINVAL;
  2835. memset(info, 0, sizeof(*info));
  2836. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
  2837. if (pos)
  2838. info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
  2839. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  2840. if (pos)
  2841. info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
  2842. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
  2843. if (pos) {
  2844. int features;
  2845. max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
  2846. max_pasids = min(max_pasids, (1 << 20));
  2847. info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
  2848. info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
  2849. features = pci_pasid_features(pdev);
  2850. if (features & PCI_PASID_CAP_EXEC)
  2851. info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
  2852. if (features & PCI_PASID_CAP_PRIV)
  2853. info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
  2854. }
  2855. return 0;
  2856. }
  2857. EXPORT_SYMBOL(amd_iommu_device_info);