amd_iommu.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/gfp.h>
  21. #include <linux/bitops.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/iommu-helper.h>
  26. #include <linux/iommu.h>
  27. #include <asm/proto.h>
  28. #include <asm/iommu.h>
  29. #include <asm/gart.h>
  30. #include <asm/amd_iommu_types.h>
  31. #include <asm/amd_iommu.h>
  32. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  33. #define EXIT_LOOP_COUNT 10000000
  34. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  35. /* A list of preallocated protection domains */
  36. static LIST_HEAD(iommu_pd_list);
  37. static DEFINE_SPINLOCK(iommu_pd_list_lock);
  38. #ifdef CONFIG_IOMMU_API
  39. static struct iommu_ops amd_iommu_ops;
  40. #endif
  41. /*
  42. * general struct to manage commands send to an IOMMU
  43. */
  44. struct iommu_cmd {
  45. u32 data[4];
  46. };
  47. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  48. struct unity_map_entry *e);
  49. static struct dma_ops_domain *find_protection_domain(u16 devid);
  50. static u64* alloc_pte(struct protection_domain *dom,
  51. unsigned long address, u64
  52. **pte_page, gfp_t gfp);
  53. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  54. unsigned long start_page,
  55. unsigned int pages);
  56. #ifndef BUS_NOTIFY_UNBOUND_DRIVER
  57. #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
  58. #endif
  59. #ifdef CONFIG_AMD_IOMMU_STATS
  60. /*
  61. * Initialization code for statistics collection
  62. */
  63. DECLARE_STATS_COUNTER(compl_wait);
  64. DECLARE_STATS_COUNTER(cnt_map_single);
  65. DECLARE_STATS_COUNTER(cnt_unmap_single);
  66. DECLARE_STATS_COUNTER(cnt_map_sg);
  67. DECLARE_STATS_COUNTER(cnt_unmap_sg);
  68. DECLARE_STATS_COUNTER(cnt_alloc_coherent);
  69. DECLARE_STATS_COUNTER(cnt_free_coherent);
  70. DECLARE_STATS_COUNTER(cross_page);
  71. DECLARE_STATS_COUNTER(domain_flush_single);
  72. DECLARE_STATS_COUNTER(domain_flush_all);
  73. DECLARE_STATS_COUNTER(alloced_io_mem);
  74. DECLARE_STATS_COUNTER(total_map_requests);
  75. static struct dentry *stats_dir;
  76. static struct dentry *de_isolate;
  77. static struct dentry *de_fflush;
  78. static void amd_iommu_stats_add(struct __iommu_counter *cnt)
  79. {
  80. if (stats_dir == NULL)
  81. return;
  82. cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
  83. &cnt->value);
  84. }
  85. static void amd_iommu_stats_init(void)
  86. {
  87. stats_dir = debugfs_create_dir("amd-iommu", NULL);
  88. if (stats_dir == NULL)
  89. return;
  90. de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
  91. (u32 *)&amd_iommu_isolate);
  92. de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
  93. (u32 *)&amd_iommu_unmap_flush);
  94. amd_iommu_stats_add(&compl_wait);
  95. amd_iommu_stats_add(&cnt_map_single);
  96. amd_iommu_stats_add(&cnt_unmap_single);
  97. amd_iommu_stats_add(&cnt_map_sg);
  98. amd_iommu_stats_add(&cnt_unmap_sg);
  99. amd_iommu_stats_add(&cnt_alloc_coherent);
  100. amd_iommu_stats_add(&cnt_free_coherent);
  101. amd_iommu_stats_add(&cross_page);
  102. amd_iommu_stats_add(&domain_flush_single);
  103. amd_iommu_stats_add(&domain_flush_all);
  104. amd_iommu_stats_add(&alloced_io_mem);
  105. amd_iommu_stats_add(&total_map_requests);
  106. }
  107. #endif
  108. /* returns !0 if the IOMMU is caching non-present entries in its TLB */
  109. static int iommu_has_npcache(struct amd_iommu *iommu)
  110. {
  111. return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
  112. }
  113. /****************************************************************************
  114. *
  115. * Interrupt handling functions
  116. *
  117. ****************************************************************************/
  118. static void iommu_print_event(void *__evt)
  119. {
  120. u32 *event = __evt;
  121. int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  122. int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  123. int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  124. int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  125. u64 address = (u64)(((u64)event[3]) << 32) | event[2];
  126. printk(KERN_ERR "AMD IOMMU: Event logged [");
  127. switch (type) {
  128. case EVENT_TYPE_ILL_DEV:
  129. printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  130. "address=0x%016llx flags=0x%04x]\n",
  131. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  132. address, flags);
  133. break;
  134. case EVENT_TYPE_IO_FAULT:
  135. printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  136. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  137. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  138. domid, address, flags);
  139. break;
  140. case EVENT_TYPE_DEV_TAB_ERR:
  141. printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  142. "address=0x%016llx flags=0x%04x]\n",
  143. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  144. address, flags);
  145. break;
  146. case EVENT_TYPE_PAGE_TAB_ERR:
  147. printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  148. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  149. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  150. domid, address, flags);
  151. break;
  152. case EVENT_TYPE_ILL_CMD:
  153. printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
  154. break;
  155. case EVENT_TYPE_CMD_HARD_ERR:
  156. printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  157. "flags=0x%04x]\n", address, flags);
  158. break;
  159. case EVENT_TYPE_IOTLB_INV_TO:
  160. printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  161. "address=0x%016llx]\n",
  162. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  163. address);
  164. break;
  165. case EVENT_TYPE_INV_DEV_REQ:
  166. printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  167. "address=0x%016llx flags=0x%04x]\n",
  168. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  169. address, flags);
  170. break;
  171. default:
  172. printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
  173. }
  174. }
  175. static void iommu_poll_events(struct amd_iommu *iommu)
  176. {
  177. u32 head, tail;
  178. unsigned long flags;
  179. spin_lock_irqsave(&iommu->lock, flags);
  180. head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  181. tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  182. while (head != tail) {
  183. iommu_print_event(iommu->evt_buf + head);
  184. head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  185. }
  186. writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  187. spin_unlock_irqrestore(&iommu->lock, flags);
  188. }
  189. irqreturn_t amd_iommu_int_handler(int irq, void *data)
  190. {
  191. struct amd_iommu *iommu;
  192. for_each_iommu(iommu)
  193. iommu_poll_events(iommu);
  194. return IRQ_HANDLED;
  195. }
  196. /****************************************************************************
  197. *
  198. * IOMMU command queuing functions
  199. *
  200. ****************************************************************************/
  201. /*
  202. * Writes the command to the IOMMUs command buffer and informs the
  203. * hardware about the new command. Must be called with iommu->lock held.
  204. */
  205. static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  206. {
  207. u32 tail, head;
  208. u8 *target;
  209. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  210. target = iommu->cmd_buf + tail;
  211. memcpy_toio(target, cmd, sizeof(*cmd));
  212. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  213. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  214. if (tail == head)
  215. return -ENOMEM;
  216. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  217. return 0;
  218. }
  219. /*
  220. * General queuing function for commands. Takes iommu->lock and calls
  221. * __iommu_queue_command().
  222. */
  223. static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  224. {
  225. unsigned long flags;
  226. int ret;
  227. spin_lock_irqsave(&iommu->lock, flags);
  228. ret = __iommu_queue_command(iommu, cmd);
  229. if (!ret)
  230. iommu->need_sync = true;
  231. spin_unlock_irqrestore(&iommu->lock, flags);
  232. return ret;
  233. }
  234. /*
  235. * This function waits until an IOMMU has completed a completion
  236. * wait command
  237. */
  238. static void __iommu_wait_for_completion(struct amd_iommu *iommu)
  239. {
  240. int ready = 0;
  241. unsigned status = 0;
  242. unsigned long i = 0;
  243. INC_STATS_COUNTER(compl_wait);
  244. while (!ready && (i < EXIT_LOOP_COUNT)) {
  245. ++i;
  246. /* wait for the bit to become one */
  247. status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
  248. ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
  249. }
  250. /* set bit back to zero */
  251. status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
  252. writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
  253. if (unlikely(i == EXIT_LOOP_COUNT))
  254. panic("AMD IOMMU: Completion wait loop failed\n");
  255. }
  256. /*
  257. * This function queues a completion wait command into the command
  258. * buffer of an IOMMU
  259. */
  260. static int __iommu_completion_wait(struct amd_iommu *iommu)
  261. {
  262. struct iommu_cmd cmd;
  263. memset(&cmd, 0, sizeof(cmd));
  264. cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
  265. CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
  266. return __iommu_queue_command(iommu, &cmd);
  267. }
  268. /*
  269. * This function is called whenever we need to ensure that the IOMMU has
  270. * completed execution of all commands we sent. It sends a
  271. * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
  272. * us about that by writing a value to a physical address we pass with
  273. * the command.
  274. */
  275. static int iommu_completion_wait(struct amd_iommu *iommu)
  276. {
  277. int ret = 0;
  278. unsigned long flags;
  279. spin_lock_irqsave(&iommu->lock, flags);
  280. if (!iommu->need_sync)
  281. goto out;
  282. ret = __iommu_completion_wait(iommu);
  283. iommu->need_sync = false;
  284. if (ret)
  285. goto out;
  286. __iommu_wait_for_completion(iommu);
  287. out:
  288. spin_unlock_irqrestore(&iommu->lock, flags);
  289. return 0;
  290. }
  291. /*
  292. * Command send function for invalidating a device table entry
  293. */
  294. static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
  295. {
  296. struct iommu_cmd cmd;
  297. int ret;
  298. BUG_ON(iommu == NULL);
  299. memset(&cmd, 0, sizeof(cmd));
  300. CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
  301. cmd.data[0] = devid;
  302. ret = iommu_queue_command(iommu, &cmd);
  303. return ret;
  304. }
  305. static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
  306. u16 domid, int pde, int s)
  307. {
  308. memset(cmd, 0, sizeof(*cmd));
  309. address &= PAGE_MASK;
  310. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  311. cmd->data[1] |= domid;
  312. cmd->data[2] = lower_32_bits(address);
  313. cmd->data[3] = upper_32_bits(address);
  314. if (s) /* size bit - we flush more than one 4kb page */
  315. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  316. if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
  317. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  318. }
  319. /*
  320. * Generic command send function for invalidaing TLB entries
  321. */
  322. static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
  323. u64 address, u16 domid, int pde, int s)
  324. {
  325. struct iommu_cmd cmd;
  326. int ret;
  327. __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
  328. ret = iommu_queue_command(iommu, &cmd);
  329. return ret;
  330. }
  331. /*
  332. * TLB invalidation function which is called from the mapping functions.
  333. * It invalidates a single PTE if the range to flush is within a single
  334. * page. Otherwise it flushes the whole TLB of the IOMMU.
  335. */
  336. static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
  337. u64 address, size_t size)
  338. {
  339. int s = 0;
  340. unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
  341. address &= PAGE_MASK;
  342. if (pages > 1) {
  343. /*
  344. * If we have to flush more than one page, flush all
  345. * TLB entries for this domain
  346. */
  347. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  348. s = 1;
  349. }
  350. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
  351. return 0;
  352. }
  353. /* Flush the whole IO/TLB for a given protection domain */
  354. static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
  355. {
  356. u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  357. INC_STATS_COUNTER(domain_flush_single);
  358. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
  359. }
  360. /*
  361. * This function is used to flush the IO/TLB for a given protection domain
  362. * on every IOMMU in the system
  363. */
  364. static void iommu_flush_domain(u16 domid)
  365. {
  366. unsigned long flags;
  367. struct amd_iommu *iommu;
  368. struct iommu_cmd cmd;
  369. INC_STATS_COUNTER(domain_flush_all);
  370. __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  371. domid, 1, 1);
  372. for_each_iommu(iommu) {
  373. spin_lock_irqsave(&iommu->lock, flags);
  374. __iommu_queue_command(iommu, &cmd);
  375. __iommu_completion_wait(iommu);
  376. __iommu_wait_for_completion(iommu);
  377. spin_unlock_irqrestore(&iommu->lock, flags);
  378. }
  379. }
  380. void amd_iommu_flush_all_domains(void)
  381. {
  382. int i;
  383. for (i = 1; i < MAX_DOMAIN_ID; ++i) {
  384. if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
  385. continue;
  386. iommu_flush_domain(i);
  387. }
  388. }
  389. void amd_iommu_flush_all_devices(void)
  390. {
  391. struct amd_iommu *iommu;
  392. int i;
  393. for (i = 0; i <= amd_iommu_last_bdf; ++i) {
  394. if (amd_iommu_pd_table[i] == NULL)
  395. continue;
  396. iommu = amd_iommu_rlookup_table[i];
  397. if (!iommu)
  398. continue;
  399. iommu_queue_inv_dev_entry(iommu, i);
  400. iommu_completion_wait(iommu);
  401. }
  402. }
  403. /****************************************************************************
  404. *
  405. * The functions below are used the create the page table mappings for
  406. * unity mapped regions.
  407. *
  408. ****************************************************************************/
  409. /*
  410. * Generic mapping functions. It maps a physical address into a DMA
  411. * address space. It allocates the page table pages if necessary.
  412. * In the future it can be extended to a generic mapping function
  413. * supporting all features of AMD IOMMU page tables like level skipping
  414. * and full 64 bit address spaces.
  415. */
  416. static int iommu_map_page(struct protection_domain *dom,
  417. unsigned long bus_addr,
  418. unsigned long phys_addr,
  419. int prot)
  420. {
  421. u64 __pte, *pte;
  422. bus_addr = PAGE_ALIGN(bus_addr);
  423. phys_addr = PAGE_ALIGN(phys_addr);
  424. /* only support 512GB address spaces for now */
  425. if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
  426. return -EINVAL;
  427. pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL);
  428. if (IOMMU_PTE_PRESENT(*pte))
  429. return -EBUSY;
  430. __pte = phys_addr | IOMMU_PTE_P;
  431. if (prot & IOMMU_PROT_IR)
  432. __pte |= IOMMU_PTE_IR;
  433. if (prot & IOMMU_PROT_IW)
  434. __pte |= IOMMU_PTE_IW;
  435. *pte = __pte;
  436. return 0;
  437. }
  438. static void iommu_unmap_page(struct protection_domain *dom,
  439. unsigned long bus_addr)
  440. {
  441. u64 *pte;
  442. pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
  443. if (!IOMMU_PTE_PRESENT(*pte))
  444. return;
  445. pte = IOMMU_PTE_PAGE(*pte);
  446. pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
  447. if (!IOMMU_PTE_PRESENT(*pte))
  448. return;
  449. pte = IOMMU_PTE_PAGE(*pte);
  450. pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
  451. *pte = 0;
  452. }
  453. /*
  454. * This function checks if a specific unity mapping entry is needed for
  455. * this specific IOMMU.
  456. */
  457. static int iommu_for_unity_map(struct amd_iommu *iommu,
  458. struct unity_map_entry *entry)
  459. {
  460. u16 bdf, i;
  461. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  462. bdf = amd_iommu_alias_table[i];
  463. if (amd_iommu_rlookup_table[bdf] == iommu)
  464. return 1;
  465. }
  466. return 0;
  467. }
  468. /*
  469. * Init the unity mappings for a specific IOMMU in the system
  470. *
  471. * Basically iterates over all unity mapping entries and applies them to
  472. * the default domain DMA of that IOMMU if necessary.
  473. */
  474. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  475. {
  476. struct unity_map_entry *entry;
  477. int ret;
  478. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  479. if (!iommu_for_unity_map(iommu, entry))
  480. continue;
  481. ret = dma_ops_unity_map(iommu->default_dom, entry);
  482. if (ret)
  483. return ret;
  484. }
  485. return 0;
  486. }
  487. /*
  488. * This function actually applies the mapping to the page table of the
  489. * dma_ops domain.
  490. */
  491. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  492. struct unity_map_entry *e)
  493. {
  494. u64 addr;
  495. int ret;
  496. for (addr = e->address_start; addr < e->address_end;
  497. addr += PAGE_SIZE) {
  498. ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot);
  499. if (ret)
  500. return ret;
  501. /*
  502. * if unity mapping is in aperture range mark the page
  503. * as allocated in the aperture
  504. */
  505. if (addr < dma_dom->aperture_size)
  506. __set_bit(addr >> PAGE_SHIFT,
  507. dma_dom->aperture[0]->bitmap);
  508. }
  509. return 0;
  510. }
  511. /*
  512. * Inits the unity mappings required for a specific device
  513. */
  514. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  515. u16 devid)
  516. {
  517. struct unity_map_entry *e;
  518. int ret;
  519. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  520. if (!(devid >= e->devid_start && devid <= e->devid_end))
  521. continue;
  522. ret = dma_ops_unity_map(dma_dom, e);
  523. if (ret)
  524. return ret;
  525. }
  526. return 0;
  527. }
  528. /****************************************************************************
  529. *
  530. * The next functions belong to the address allocator for the dma_ops
  531. * interface functions. They work like the allocators in the other IOMMU
  532. * drivers. Its basically a bitmap which marks the allocated pages in
  533. * the aperture. Maybe it could be enhanced in the future to a more
  534. * efficient allocator.
  535. *
  536. ****************************************************************************/
  537. /*
  538. * The address allocator core functions.
  539. *
  540. * called with domain->lock held
  541. */
  542. /*
  543. * This function checks if there is a PTE for a given dma address. If
  544. * there is one, it returns the pointer to it.
  545. */
  546. static u64* fetch_pte(struct protection_domain *domain,
  547. unsigned long address)
  548. {
  549. u64 *pte;
  550. pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)];
  551. if (!IOMMU_PTE_PRESENT(*pte))
  552. return NULL;
  553. pte = IOMMU_PTE_PAGE(*pte);
  554. pte = &pte[IOMMU_PTE_L1_INDEX(address)];
  555. if (!IOMMU_PTE_PRESENT(*pte))
  556. return NULL;
  557. pte = IOMMU_PTE_PAGE(*pte);
  558. pte = &pte[IOMMU_PTE_L0_INDEX(address)];
  559. return pte;
  560. }
  561. /*
  562. * This function is used to add a new aperture range to an existing
  563. * aperture in case of dma_ops domain allocation or address allocation
  564. * failure.
  565. */
  566. static int alloc_new_range(struct amd_iommu *iommu,
  567. struct dma_ops_domain *dma_dom,
  568. bool populate, gfp_t gfp)
  569. {
  570. int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
  571. int i;
  572. #ifdef CONFIG_IOMMU_STRESS
  573. populate = false;
  574. #endif
  575. if (index >= APERTURE_MAX_RANGES)
  576. return -ENOMEM;
  577. dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
  578. if (!dma_dom->aperture[index])
  579. return -ENOMEM;
  580. dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
  581. if (!dma_dom->aperture[index]->bitmap)
  582. goto out_free;
  583. dma_dom->aperture[index]->offset = dma_dom->aperture_size;
  584. if (populate) {
  585. unsigned long address = dma_dom->aperture_size;
  586. int i, num_ptes = APERTURE_RANGE_PAGES / 512;
  587. u64 *pte, *pte_page;
  588. for (i = 0; i < num_ptes; ++i) {
  589. pte = alloc_pte(&dma_dom->domain, address,
  590. &pte_page, gfp);
  591. if (!pte)
  592. goto out_free;
  593. dma_dom->aperture[index]->pte_pages[i] = pte_page;
  594. address += APERTURE_RANGE_SIZE / 64;
  595. }
  596. }
  597. dma_dom->aperture_size += APERTURE_RANGE_SIZE;
  598. /* Intialize the exclusion range if necessary */
  599. if (iommu->exclusion_start &&
  600. iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
  601. iommu->exclusion_start < dma_dom->aperture_size) {
  602. unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
  603. int pages = iommu_num_pages(iommu->exclusion_start,
  604. iommu->exclusion_length,
  605. PAGE_SIZE);
  606. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  607. }
  608. /*
  609. * Check for areas already mapped as present in the new aperture
  610. * range and mark those pages as reserved in the allocator. Such
  611. * mappings may already exist as a result of requested unity
  612. * mappings for devices.
  613. */
  614. for (i = dma_dom->aperture[index]->offset;
  615. i < dma_dom->aperture_size;
  616. i += PAGE_SIZE) {
  617. u64 *pte = fetch_pte(&dma_dom->domain, i);
  618. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  619. continue;
  620. dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
  621. }
  622. return 0;
  623. out_free:
  624. free_page((unsigned long)dma_dom->aperture[index]->bitmap);
  625. kfree(dma_dom->aperture[index]);
  626. dma_dom->aperture[index] = NULL;
  627. return -ENOMEM;
  628. }
  629. static unsigned long dma_ops_area_alloc(struct device *dev,
  630. struct dma_ops_domain *dom,
  631. unsigned int pages,
  632. unsigned long align_mask,
  633. u64 dma_mask,
  634. unsigned long start)
  635. {
  636. unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
  637. int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
  638. int i = start >> APERTURE_RANGE_SHIFT;
  639. unsigned long boundary_size;
  640. unsigned long address = -1;
  641. unsigned long limit;
  642. next_bit >>= PAGE_SHIFT;
  643. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  644. PAGE_SIZE) >> PAGE_SHIFT;
  645. for (;i < max_index; ++i) {
  646. unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
  647. if (dom->aperture[i]->offset >= dma_mask)
  648. break;
  649. limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
  650. dma_mask >> PAGE_SHIFT);
  651. address = iommu_area_alloc(dom->aperture[i]->bitmap,
  652. limit, next_bit, pages, 0,
  653. boundary_size, align_mask);
  654. if (address != -1) {
  655. address = dom->aperture[i]->offset +
  656. (address << PAGE_SHIFT);
  657. dom->next_address = address + (pages << PAGE_SHIFT);
  658. break;
  659. }
  660. next_bit = 0;
  661. }
  662. return address;
  663. }
  664. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  665. struct dma_ops_domain *dom,
  666. unsigned int pages,
  667. unsigned long align_mask,
  668. u64 dma_mask)
  669. {
  670. unsigned long address;
  671. #ifdef CONFIG_IOMMU_STRESS
  672. dom->next_address = 0;
  673. dom->need_flush = true;
  674. #endif
  675. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  676. dma_mask, dom->next_address);
  677. if (address == -1) {
  678. dom->next_address = 0;
  679. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  680. dma_mask, 0);
  681. dom->need_flush = true;
  682. }
  683. if (unlikely(address == -1))
  684. address = bad_dma_address;
  685. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  686. return address;
  687. }
  688. /*
  689. * The address free function.
  690. *
  691. * called with domain->lock held
  692. */
  693. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  694. unsigned long address,
  695. unsigned int pages)
  696. {
  697. unsigned i = address >> APERTURE_RANGE_SHIFT;
  698. struct aperture_range *range = dom->aperture[i];
  699. BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
  700. #ifdef CONFIG_IOMMU_STRESS
  701. if (i < 4)
  702. return;
  703. #endif
  704. if (address >= dom->next_address)
  705. dom->need_flush = true;
  706. address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
  707. iommu_area_free(range->bitmap, address, pages);
  708. }
  709. /****************************************************************************
  710. *
  711. * The next functions belong to the domain allocation. A domain is
  712. * allocated for every IOMMU as the default domain. If device isolation
  713. * is enabled, every device get its own domain. The most important thing
  714. * about domains is the page table mapping the DMA address space they
  715. * contain.
  716. *
  717. ****************************************************************************/
  718. static u16 domain_id_alloc(void)
  719. {
  720. unsigned long flags;
  721. int id;
  722. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  723. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  724. BUG_ON(id == 0);
  725. if (id > 0 && id < MAX_DOMAIN_ID)
  726. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  727. else
  728. id = 0;
  729. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  730. return id;
  731. }
  732. static void domain_id_free(int id)
  733. {
  734. unsigned long flags;
  735. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  736. if (id > 0 && id < MAX_DOMAIN_ID)
  737. __clear_bit(id, amd_iommu_pd_alloc_bitmap);
  738. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  739. }
  740. /*
  741. * Used to reserve address ranges in the aperture (e.g. for exclusion
  742. * ranges.
  743. */
  744. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  745. unsigned long start_page,
  746. unsigned int pages)
  747. {
  748. unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  749. if (start_page + pages > last_page)
  750. pages = last_page - start_page;
  751. for (i = start_page; i < start_page + pages; ++i) {
  752. int index = i / APERTURE_RANGE_PAGES;
  753. int page = i % APERTURE_RANGE_PAGES;
  754. __set_bit(page, dom->aperture[index]->bitmap);
  755. }
  756. }
  757. static void free_pagetable(struct protection_domain *domain)
  758. {
  759. int i, j;
  760. u64 *p1, *p2, *p3;
  761. p1 = domain->pt_root;
  762. if (!p1)
  763. return;
  764. for (i = 0; i < 512; ++i) {
  765. if (!IOMMU_PTE_PRESENT(p1[i]))
  766. continue;
  767. p2 = IOMMU_PTE_PAGE(p1[i]);
  768. for (j = 0; j < 512; ++j) {
  769. if (!IOMMU_PTE_PRESENT(p2[j]))
  770. continue;
  771. p3 = IOMMU_PTE_PAGE(p2[j]);
  772. free_page((unsigned long)p3);
  773. }
  774. free_page((unsigned long)p2);
  775. }
  776. free_page((unsigned long)p1);
  777. domain->pt_root = NULL;
  778. }
  779. /*
  780. * Free a domain, only used if something went wrong in the
  781. * allocation path and we need to free an already allocated page table
  782. */
  783. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  784. {
  785. int i;
  786. if (!dom)
  787. return;
  788. free_pagetable(&dom->domain);
  789. for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
  790. if (!dom->aperture[i])
  791. continue;
  792. free_page((unsigned long)dom->aperture[i]->bitmap);
  793. kfree(dom->aperture[i]);
  794. }
  795. kfree(dom);
  796. }
  797. /*
  798. * Allocates a new protection domain usable for the dma_ops functions.
  799. * It also intializes the page table and the address allocator data
  800. * structures required for the dma_ops interface
  801. */
  802. static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
  803. {
  804. struct dma_ops_domain *dma_dom;
  805. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  806. if (!dma_dom)
  807. return NULL;
  808. spin_lock_init(&dma_dom->domain.lock);
  809. dma_dom->domain.id = domain_id_alloc();
  810. if (dma_dom->domain.id == 0)
  811. goto free_dma_dom;
  812. dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
  813. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  814. dma_dom->domain.flags = PD_DMA_OPS_MASK;
  815. dma_dom->domain.priv = dma_dom;
  816. if (!dma_dom->domain.pt_root)
  817. goto free_dma_dom;
  818. dma_dom->need_flush = false;
  819. dma_dom->target_dev = 0xffff;
  820. if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
  821. goto free_dma_dom;
  822. /*
  823. * mark the first page as allocated so we never return 0 as
  824. * a valid dma-address. So we can use 0 as error value
  825. */
  826. dma_dom->aperture[0]->bitmap[0] = 1;
  827. dma_dom->next_address = 0;
  828. return dma_dom;
  829. free_dma_dom:
  830. dma_ops_domain_free(dma_dom);
  831. return NULL;
  832. }
  833. /*
  834. * little helper function to check whether a given protection domain is a
  835. * dma_ops domain
  836. */
  837. static bool dma_ops_domain(struct protection_domain *domain)
  838. {
  839. return domain->flags & PD_DMA_OPS_MASK;
  840. }
  841. /*
  842. * Find out the protection domain structure for a given PCI device. This
  843. * will give us the pointer to the page table root for example.
  844. */
  845. static struct protection_domain *domain_for_device(u16 devid)
  846. {
  847. struct protection_domain *dom;
  848. unsigned long flags;
  849. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  850. dom = amd_iommu_pd_table[devid];
  851. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  852. return dom;
  853. }
  854. /*
  855. * If a device is not yet associated with a domain, this function does
  856. * assigns it visible for the hardware
  857. */
  858. static void attach_device(struct amd_iommu *iommu,
  859. struct protection_domain *domain,
  860. u16 devid)
  861. {
  862. unsigned long flags;
  863. u64 pte_root = virt_to_phys(domain->pt_root);
  864. domain->dev_cnt += 1;
  865. pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  866. << DEV_ENTRY_MODE_SHIFT;
  867. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
  868. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  869. amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
  870. amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
  871. amd_iommu_dev_table[devid].data[2] = domain->id;
  872. amd_iommu_pd_table[devid] = domain;
  873. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  874. iommu_queue_inv_dev_entry(iommu, devid);
  875. }
  876. /*
  877. * Removes a device from a protection domain (unlocked)
  878. */
  879. static void __detach_device(struct protection_domain *domain, u16 devid)
  880. {
  881. /* lock domain */
  882. spin_lock(&domain->lock);
  883. /* remove domain from the lookup table */
  884. amd_iommu_pd_table[devid] = NULL;
  885. /* remove entry from the device table seen by the hardware */
  886. amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
  887. amd_iommu_dev_table[devid].data[1] = 0;
  888. amd_iommu_dev_table[devid].data[2] = 0;
  889. /* decrease reference counter */
  890. domain->dev_cnt -= 1;
  891. /* ready */
  892. spin_unlock(&domain->lock);
  893. }
  894. /*
  895. * Removes a device from a protection domain (with devtable_lock held)
  896. */
  897. static void detach_device(struct protection_domain *domain, u16 devid)
  898. {
  899. unsigned long flags;
  900. /* lock device table */
  901. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  902. __detach_device(domain, devid);
  903. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  904. }
  905. static int device_change_notifier(struct notifier_block *nb,
  906. unsigned long action, void *data)
  907. {
  908. struct device *dev = data;
  909. struct pci_dev *pdev = to_pci_dev(dev);
  910. u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
  911. struct protection_domain *domain;
  912. struct dma_ops_domain *dma_domain;
  913. struct amd_iommu *iommu;
  914. unsigned long flags;
  915. if (devid > amd_iommu_last_bdf)
  916. goto out;
  917. devid = amd_iommu_alias_table[devid];
  918. iommu = amd_iommu_rlookup_table[devid];
  919. if (iommu == NULL)
  920. goto out;
  921. domain = domain_for_device(devid);
  922. if (domain && !dma_ops_domain(domain))
  923. WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
  924. "to a non-dma-ops domain\n", dev_name(dev));
  925. switch (action) {
  926. case BUS_NOTIFY_UNBOUND_DRIVER:
  927. if (!domain)
  928. goto out;
  929. detach_device(domain, devid);
  930. break;
  931. case BUS_NOTIFY_ADD_DEVICE:
  932. /* allocate a protection domain if a device is added */
  933. dma_domain = find_protection_domain(devid);
  934. if (dma_domain)
  935. goto out;
  936. dma_domain = dma_ops_domain_alloc(iommu);
  937. if (!dma_domain)
  938. goto out;
  939. dma_domain->target_dev = devid;
  940. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  941. list_add_tail(&dma_domain->list, &iommu_pd_list);
  942. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  943. break;
  944. default:
  945. goto out;
  946. }
  947. iommu_queue_inv_dev_entry(iommu, devid);
  948. iommu_completion_wait(iommu);
  949. out:
  950. return 0;
  951. }
  952. struct notifier_block device_nb = {
  953. .notifier_call = device_change_notifier,
  954. };
  955. /*****************************************************************************
  956. *
  957. * The next functions belong to the dma_ops mapping/unmapping code.
  958. *
  959. *****************************************************************************/
  960. /*
  961. * This function checks if the driver got a valid device from the caller to
  962. * avoid dereferencing invalid pointers.
  963. */
  964. static bool check_device(struct device *dev)
  965. {
  966. if (!dev || !dev->dma_mask)
  967. return false;
  968. return true;
  969. }
  970. /*
  971. * In this function the list of preallocated protection domains is traversed to
  972. * find the domain for a specific device
  973. */
  974. static struct dma_ops_domain *find_protection_domain(u16 devid)
  975. {
  976. struct dma_ops_domain *entry, *ret = NULL;
  977. unsigned long flags;
  978. if (list_empty(&iommu_pd_list))
  979. return NULL;
  980. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  981. list_for_each_entry(entry, &iommu_pd_list, list) {
  982. if (entry->target_dev == devid) {
  983. ret = entry;
  984. break;
  985. }
  986. }
  987. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  988. return ret;
  989. }
  990. /*
  991. * In the dma_ops path we only have the struct device. This function
  992. * finds the corresponding IOMMU, the protection domain and the
  993. * requestor id for a given device.
  994. * If the device is not yet associated with a domain this is also done
  995. * in this function.
  996. */
  997. static int get_device_resources(struct device *dev,
  998. struct amd_iommu **iommu,
  999. struct protection_domain **domain,
  1000. u16 *bdf)
  1001. {
  1002. struct dma_ops_domain *dma_dom;
  1003. struct pci_dev *pcidev;
  1004. u16 _bdf;
  1005. *iommu = NULL;
  1006. *domain = NULL;
  1007. *bdf = 0xffff;
  1008. if (dev->bus != &pci_bus_type)
  1009. return 0;
  1010. pcidev = to_pci_dev(dev);
  1011. _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1012. /* device not translated by any IOMMU in the system? */
  1013. if (_bdf > amd_iommu_last_bdf)
  1014. return 0;
  1015. *bdf = amd_iommu_alias_table[_bdf];
  1016. *iommu = amd_iommu_rlookup_table[*bdf];
  1017. if (*iommu == NULL)
  1018. return 0;
  1019. *domain = domain_for_device(*bdf);
  1020. if (*domain == NULL) {
  1021. dma_dom = find_protection_domain(*bdf);
  1022. if (!dma_dom)
  1023. dma_dom = (*iommu)->default_dom;
  1024. *domain = &dma_dom->domain;
  1025. attach_device(*iommu, *domain, *bdf);
  1026. DUMP_printk("Using protection domain %d for device %s\n",
  1027. (*domain)->id, dev_name(dev));
  1028. }
  1029. if (domain_for_device(_bdf) == NULL)
  1030. attach_device(*iommu, *domain, _bdf);
  1031. return 1;
  1032. }
  1033. /*
  1034. * If the pte_page is not yet allocated this function is called
  1035. */
  1036. static u64* alloc_pte(struct protection_domain *dom,
  1037. unsigned long address, u64 **pte_page, gfp_t gfp)
  1038. {
  1039. u64 *pte, *page;
  1040. pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];
  1041. if (!IOMMU_PTE_PRESENT(*pte)) {
  1042. page = (u64 *)get_zeroed_page(gfp);
  1043. if (!page)
  1044. return NULL;
  1045. *pte = IOMMU_L2_PDE(virt_to_phys(page));
  1046. }
  1047. pte = IOMMU_PTE_PAGE(*pte);
  1048. pte = &pte[IOMMU_PTE_L1_INDEX(address)];
  1049. if (!IOMMU_PTE_PRESENT(*pte)) {
  1050. page = (u64 *)get_zeroed_page(gfp);
  1051. if (!page)
  1052. return NULL;
  1053. *pte = IOMMU_L1_PDE(virt_to_phys(page));
  1054. }
  1055. pte = IOMMU_PTE_PAGE(*pte);
  1056. if (pte_page)
  1057. *pte_page = pte;
  1058. pte = &pte[IOMMU_PTE_L0_INDEX(address)];
  1059. return pte;
  1060. }
  1061. /*
  1062. * This function fetches the PTE for a given address in the aperture
  1063. */
  1064. static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  1065. unsigned long address)
  1066. {
  1067. struct aperture_range *aperture;
  1068. u64 *pte, *pte_page;
  1069. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1070. if (!aperture)
  1071. return NULL;
  1072. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1073. if (!pte) {
  1074. pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
  1075. aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
  1076. } else
  1077. pte += IOMMU_PTE_L0_INDEX(address);
  1078. return pte;
  1079. }
  1080. /*
  1081. * This is the generic map function. It maps one 4kb page at paddr to
  1082. * the given address in the DMA address space for the domain.
  1083. */
  1084. static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
  1085. struct dma_ops_domain *dom,
  1086. unsigned long address,
  1087. phys_addr_t paddr,
  1088. int direction)
  1089. {
  1090. u64 *pte, __pte;
  1091. WARN_ON(address > dom->aperture_size);
  1092. paddr &= PAGE_MASK;
  1093. pte = dma_ops_get_pte(dom, address);
  1094. if (!pte)
  1095. return bad_dma_address;
  1096. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  1097. if (direction == DMA_TO_DEVICE)
  1098. __pte |= IOMMU_PTE_IR;
  1099. else if (direction == DMA_FROM_DEVICE)
  1100. __pte |= IOMMU_PTE_IW;
  1101. else if (direction == DMA_BIDIRECTIONAL)
  1102. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  1103. WARN_ON(*pte);
  1104. *pte = __pte;
  1105. return (dma_addr_t)address;
  1106. }
  1107. /*
  1108. * The generic unmapping function for on page in the DMA address space.
  1109. */
  1110. static void dma_ops_domain_unmap(struct amd_iommu *iommu,
  1111. struct dma_ops_domain *dom,
  1112. unsigned long address)
  1113. {
  1114. struct aperture_range *aperture;
  1115. u64 *pte;
  1116. if (address >= dom->aperture_size)
  1117. return;
  1118. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1119. if (!aperture)
  1120. return;
  1121. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1122. if (!pte)
  1123. return;
  1124. pte += IOMMU_PTE_L0_INDEX(address);
  1125. WARN_ON(!*pte);
  1126. *pte = 0ULL;
  1127. }
  1128. /*
  1129. * This function contains common code for mapping of a physically
  1130. * contiguous memory region into DMA address space. It is used by all
  1131. * mapping functions provided with this IOMMU driver.
  1132. * Must be called with the domain lock held.
  1133. */
  1134. static dma_addr_t __map_single(struct device *dev,
  1135. struct amd_iommu *iommu,
  1136. struct dma_ops_domain *dma_dom,
  1137. phys_addr_t paddr,
  1138. size_t size,
  1139. int dir,
  1140. bool align,
  1141. u64 dma_mask)
  1142. {
  1143. dma_addr_t offset = paddr & ~PAGE_MASK;
  1144. dma_addr_t address, start, ret;
  1145. unsigned int pages;
  1146. unsigned long align_mask = 0;
  1147. int i;
  1148. pages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1149. paddr &= PAGE_MASK;
  1150. INC_STATS_COUNTER(total_map_requests);
  1151. if (pages > 1)
  1152. INC_STATS_COUNTER(cross_page);
  1153. if (align)
  1154. align_mask = (1UL << get_order(size)) - 1;
  1155. retry:
  1156. address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
  1157. dma_mask);
  1158. if (unlikely(address == bad_dma_address)) {
  1159. /*
  1160. * setting next_address here will let the address
  1161. * allocator only scan the new allocated range in the
  1162. * first run. This is a small optimization.
  1163. */
  1164. dma_dom->next_address = dma_dom->aperture_size;
  1165. if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
  1166. goto out;
  1167. /*
  1168. * aperture was sucessfully enlarged by 128 MB, try
  1169. * allocation again
  1170. */
  1171. goto retry;
  1172. }
  1173. start = address;
  1174. for (i = 0; i < pages; ++i) {
  1175. ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
  1176. if (ret == bad_dma_address)
  1177. goto out_unmap;
  1178. paddr += PAGE_SIZE;
  1179. start += PAGE_SIZE;
  1180. }
  1181. address += offset;
  1182. ADD_STATS_COUNTER(alloced_io_mem, size);
  1183. if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
  1184. iommu_flush_tlb(iommu, dma_dom->domain.id);
  1185. dma_dom->need_flush = false;
  1186. } else if (unlikely(iommu_has_npcache(iommu)))
  1187. iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
  1188. out:
  1189. return address;
  1190. out_unmap:
  1191. for (--i; i >= 0; --i) {
  1192. start -= PAGE_SIZE;
  1193. dma_ops_domain_unmap(iommu, dma_dom, start);
  1194. }
  1195. dma_ops_free_addresses(dma_dom, address, pages);
  1196. return bad_dma_address;
  1197. }
  1198. /*
  1199. * Does the reverse of the __map_single function. Must be called with
  1200. * the domain lock held too
  1201. */
  1202. static void __unmap_single(struct amd_iommu *iommu,
  1203. struct dma_ops_domain *dma_dom,
  1204. dma_addr_t dma_addr,
  1205. size_t size,
  1206. int dir)
  1207. {
  1208. dma_addr_t i, start;
  1209. unsigned int pages;
  1210. if ((dma_addr == bad_dma_address) ||
  1211. (dma_addr + size > dma_dom->aperture_size))
  1212. return;
  1213. pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  1214. dma_addr &= PAGE_MASK;
  1215. start = dma_addr;
  1216. for (i = 0; i < pages; ++i) {
  1217. dma_ops_domain_unmap(iommu, dma_dom, start);
  1218. start += PAGE_SIZE;
  1219. }
  1220. SUB_STATS_COUNTER(alloced_io_mem, size);
  1221. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  1222. if (amd_iommu_unmap_flush || dma_dom->need_flush) {
  1223. iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
  1224. dma_dom->need_flush = false;
  1225. }
  1226. }
  1227. /*
  1228. * The exported map_single function for dma_ops.
  1229. */
  1230. static dma_addr_t map_page(struct device *dev, struct page *page,
  1231. unsigned long offset, size_t size,
  1232. enum dma_data_direction dir,
  1233. struct dma_attrs *attrs)
  1234. {
  1235. unsigned long flags;
  1236. struct amd_iommu *iommu;
  1237. struct protection_domain *domain;
  1238. u16 devid;
  1239. dma_addr_t addr;
  1240. u64 dma_mask;
  1241. phys_addr_t paddr = page_to_phys(page) + offset;
  1242. INC_STATS_COUNTER(cnt_map_single);
  1243. if (!check_device(dev))
  1244. return bad_dma_address;
  1245. dma_mask = *dev->dma_mask;
  1246. get_device_resources(dev, &iommu, &domain, &devid);
  1247. if (iommu == NULL || domain == NULL)
  1248. /* device not handled by any AMD IOMMU */
  1249. return (dma_addr_t)paddr;
  1250. if (!dma_ops_domain(domain))
  1251. return bad_dma_address;
  1252. spin_lock_irqsave(&domain->lock, flags);
  1253. addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
  1254. dma_mask);
  1255. if (addr == bad_dma_address)
  1256. goto out;
  1257. iommu_completion_wait(iommu);
  1258. out:
  1259. spin_unlock_irqrestore(&domain->lock, flags);
  1260. return addr;
  1261. }
  1262. /*
  1263. * The exported unmap_single function for dma_ops.
  1264. */
  1265. static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  1266. enum dma_data_direction dir, struct dma_attrs *attrs)
  1267. {
  1268. unsigned long flags;
  1269. struct amd_iommu *iommu;
  1270. struct protection_domain *domain;
  1271. u16 devid;
  1272. INC_STATS_COUNTER(cnt_unmap_single);
  1273. if (!check_device(dev) ||
  1274. !get_device_resources(dev, &iommu, &domain, &devid))
  1275. /* device not handled by any AMD IOMMU */
  1276. return;
  1277. if (!dma_ops_domain(domain))
  1278. return;
  1279. spin_lock_irqsave(&domain->lock, flags);
  1280. __unmap_single(iommu, domain->priv, dma_addr, size, dir);
  1281. iommu_completion_wait(iommu);
  1282. spin_unlock_irqrestore(&domain->lock, flags);
  1283. }
  1284. /*
  1285. * This is a special map_sg function which is used if we should map a
  1286. * device which is not handled by an AMD IOMMU in the system.
  1287. */
  1288. static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  1289. int nelems, int dir)
  1290. {
  1291. struct scatterlist *s;
  1292. int i;
  1293. for_each_sg(sglist, s, nelems, i) {
  1294. s->dma_address = (dma_addr_t)sg_phys(s);
  1295. s->dma_length = s->length;
  1296. }
  1297. return nelems;
  1298. }
  1299. /*
  1300. * The exported map_sg function for dma_ops (handles scatter-gather
  1301. * lists).
  1302. */
  1303. static int map_sg(struct device *dev, struct scatterlist *sglist,
  1304. int nelems, enum dma_data_direction dir,
  1305. struct dma_attrs *attrs)
  1306. {
  1307. unsigned long flags;
  1308. struct amd_iommu *iommu;
  1309. struct protection_domain *domain;
  1310. u16 devid;
  1311. int i;
  1312. struct scatterlist *s;
  1313. phys_addr_t paddr;
  1314. int mapped_elems = 0;
  1315. u64 dma_mask;
  1316. INC_STATS_COUNTER(cnt_map_sg);
  1317. if (!check_device(dev))
  1318. return 0;
  1319. dma_mask = *dev->dma_mask;
  1320. get_device_resources(dev, &iommu, &domain, &devid);
  1321. if (!iommu || !domain)
  1322. return map_sg_no_iommu(dev, sglist, nelems, dir);
  1323. if (!dma_ops_domain(domain))
  1324. return 0;
  1325. spin_lock_irqsave(&domain->lock, flags);
  1326. for_each_sg(sglist, s, nelems, i) {
  1327. paddr = sg_phys(s);
  1328. s->dma_address = __map_single(dev, iommu, domain->priv,
  1329. paddr, s->length, dir, false,
  1330. dma_mask);
  1331. if (s->dma_address) {
  1332. s->dma_length = s->length;
  1333. mapped_elems++;
  1334. } else
  1335. goto unmap;
  1336. }
  1337. iommu_completion_wait(iommu);
  1338. out:
  1339. spin_unlock_irqrestore(&domain->lock, flags);
  1340. return mapped_elems;
  1341. unmap:
  1342. for_each_sg(sglist, s, mapped_elems, i) {
  1343. if (s->dma_address)
  1344. __unmap_single(iommu, domain->priv, s->dma_address,
  1345. s->dma_length, dir);
  1346. s->dma_address = s->dma_length = 0;
  1347. }
  1348. mapped_elems = 0;
  1349. goto out;
  1350. }
  1351. /*
  1352. * The exported map_sg function for dma_ops (handles scatter-gather
  1353. * lists).
  1354. */
  1355. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  1356. int nelems, enum dma_data_direction dir,
  1357. struct dma_attrs *attrs)
  1358. {
  1359. unsigned long flags;
  1360. struct amd_iommu *iommu;
  1361. struct protection_domain *domain;
  1362. struct scatterlist *s;
  1363. u16 devid;
  1364. int i;
  1365. INC_STATS_COUNTER(cnt_unmap_sg);
  1366. if (!check_device(dev) ||
  1367. !get_device_resources(dev, &iommu, &domain, &devid))
  1368. return;
  1369. if (!dma_ops_domain(domain))
  1370. return;
  1371. spin_lock_irqsave(&domain->lock, flags);
  1372. for_each_sg(sglist, s, nelems, i) {
  1373. __unmap_single(iommu, domain->priv, s->dma_address,
  1374. s->dma_length, dir);
  1375. s->dma_address = s->dma_length = 0;
  1376. }
  1377. iommu_completion_wait(iommu);
  1378. spin_unlock_irqrestore(&domain->lock, flags);
  1379. }
  1380. /*
  1381. * The exported alloc_coherent function for dma_ops.
  1382. */
  1383. static void *alloc_coherent(struct device *dev, size_t size,
  1384. dma_addr_t *dma_addr, gfp_t flag)
  1385. {
  1386. unsigned long flags;
  1387. void *virt_addr;
  1388. struct amd_iommu *iommu;
  1389. struct protection_domain *domain;
  1390. u16 devid;
  1391. phys_addr_t paddr;
  1392. u64 dma_mask = dev->coherent_dma_mask;
  1393. INC_STATS_COUNTER(cnt_alloc_coherent);
  1394. if (!check_device(dev))
  1395. return NULL;
  1396. if (!get_device_resources(dev, &iommu, &domain, &devid))
  1397. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  1398. flag |= __GFP_ZERO;
  1399. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  1400. if (!virt_addr)
  1401. return 0;
  1402. paddr = virt_to_phys(virt_addr);
  1403. if (!iommu || !domain) {
  1404. *dma_addr = (dma_addr_t)paddr;
  1405. return virt_addr;
  1406. }
  1407. if (!dma_ops_domain(domain))
  1408. goto out_free;
  1409. if (!dma_mask)
  1410. dma_mask = *dev->dma_mask;
  1411. spin_lock_irqsave(&domain->lock, flags);
  1412. *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
  1413. size, DMA_BIDIRECTIONAL, true, dma_mask);
  1414. if (*dma_addr == bad_dma_address) {
  1415. spin_unlock_irqrestore(&domain->lock, flags);
  1416. goto out_free;
  1417. }
  1418. iommu_completion_wait(iommu);
  1419. spin_unlock_irqrestore(&domain->lock, flags);
  1420. return virt_addr;
  1421. out_free:
  1422. free_pages((unsigned long)virt_addr, get_order(size));
  1423. return NULL;
  1424. }
  1425. /*
  1426. * The exported free_coherent function for dma_ops.
  1427. */
  1428. static void free_coherent(struct device *dev, size_t size,
  1429. void *virt_addr, dma_addr_t dma_addr)
  1430. {
  1431. unsigned long flags;
  1432. struct amd_iommu *iommu;
  1433. struct protection_domain *domain;
  1434. u16 devid;
  1435. INC_STATS_COUNTER(cnt_free_coherent);
  1436. if (!check_device(dev))
  1437. return;
  1438. get_device_resources(dev, &iommu, &domain, &devid);
  1439. if (!iommu || !domain)
  1440. goto free_mem;
  1441. if (!dma_ops_domain(domain))
  1442. goto free_mem;
  1443. spin_lock_irqsave(&domain->lock, flags);
  1444. __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  1445. iommu_completion_wait(iommu);
  1446. spin_unlock_irqrestore(&domain->lock, flags);
  1447. free_mem:
  1448. free_pages((unsigned long)virt_addr, get_order(size));
  1449. }
  1450. /*
  1451. * This function is called by the DMA layer to find out if we can handle a
  1452. * particular device. It is part of the dma_ops.
  1453. */
  1454. static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  1455. {
  1456. u16 bdf;
  1457. struct pci_dev *pcidev;
  1458. /* No device or no PCI device */
  1459. if (!dev || dev->bus != &pci_bus_type)
  1460. return 0;
  1461. pcidev = to_pci_dev(dev);
  1462. bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1463. /* Out of our scope? */
  1464. if (bdf > amd_iommu_last_bdf)
  1465. return 0;
  1466. return 1;
  1467. }
  1468. /*
  1469. * The function for pre-allocating protection domains.
  1470. *
  1471. * If the driver core informs the DMA layer if a driver grabs a device
  1472. * we don't need to preallocate the protection domains anymore.
  1473. * For now we have to.
  1474. */
  1475. static void prealloc_protection_domains(void)
  1476. {
  1477. struct pci_dev *dev = NULL;
  1478. struct dma_ops_domain *dma_dom;
  1479. struct amd_iommu *iommu;
  1480. u16 devid;
  1481. while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
  1482. devid = calc_devid(dev->bus->number, dev->devfn);
  1483. if (devid > amd_iommu_last_bdf)
  1484. continue;
  1485. devid = amd_iommu_alias_table[devid];
  1486. if (domain_for_device(devid))
  1487. continue;
  1488. iommu = amd_iommu_rlookup_table[devid];
  1489. if (!iommu)
  1490. continue;
  1491. dma_dom = dma_ops_domain_alloc(iommu);
  1492. if (!dma_dom)
  1493. continue;
  1494. init_unity_mappings_for_device(dma_dom, devid);
  1495. dma_dom->target_dev = devid;
  1496. list_add_tail(&dma_dom->list, &iommu_pd_list);
  1497. }
  1498. }
  1499. static struct dma_map_ops amd_iommu_dma_ops = {
  1500. .alloc_coherent = alloc_coherent,
  1501. .free_coherent = free_coherent,
  1502. .map_page = map_page,
  1503. .unmap_page = unmap_page,
  1504. .map_sg = map_sg,
  1505. .unmap_sg = unmap_sg,
  1506. .dma_supported = amd_iommu_dma_supported,
  1507. };
  1508. /*
  1509. * The function which clues the AMD IOMMU driver into dma_ops.
  1510. */
  1511. int __init amd_iommu_init_dma_ops(void)
  1512. {
  1513. struct amd_iommu *iommu;
  1514. int ret;
  1515. /*
  1516. * first allocate a default protection domain for every IOMMU we
  1517. * found in the system. Devices not assigned to any other
  1518. * protection domain will be assigned to the default one.
  1519. */
  1520. for_each_iommu(iommu) {
  1521. iommu->default_dom = dma_ops_domain_alloc(iommu);
  1522. if (iommu->default_dom == NULL)
  1523. return -ENOMEM;
  1524. iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
  1525. ret = iommu_init_unity_mappings(iommu);
  1526. if (ret)
  1527. goto free_domains;
  1528. }
  1529. /*
  1530. * If device isolation is enabled, pre-allocate the protection
  1531. * domains for each device.
  1532. */
  1533. if (amd_iommu_isolate)
  1534. prealloc_protection_domains();
  1535. iommu_detected = 1;
  1536. force_iommu = 1;
  1537. bad_dma_address = 0;
  1538. #ifdef CONFIG_GART_IOMMU
  1539. gart_iommu_aperture_disabled = 1;
  1540. gart_iommu_aperture = 0;
  1541. #endif
  1542. /* Make the driver finally visible to the drivers */
  1543. dma_ops = &amd_iommu_dma_ops;
  1544. register_iommu(&amd_iommu_ops);
  1545. bus_register_notifier(&pci_bus_type, &device_nb);
  1546. amd_iommu_stats_init();
  1547. return 0;
  1548. free_domains:
  1549. for_each_iommu(iommu) {
  1550. if (iommu->default_dom)
  1551. dma_ops_domain_free(iommu->default_dom);
  1552. }
  1553. return ret;
  1554. }
  1555. /*****************************************************************************
  1556. *
  1557. * The following functions belong to the exported interface of AMD IOMMU
  1558. *
  1559. * This interface allows access to lower level functions of the IOMMU
  1560. * like protection domain handling and assignement of devices to domains
  1561. * which is not possible with the dma_ops interface.
  1562. *
  1563. *****************************************************************************/
  1564. static void cleanup_domain(struct protection_domain *domain)
  1565. {
  1566. unsigned long flags;
  1567. u16 devid;
  1568. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1569. for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
  1570. if (amd_iommu_pd_table[devid] == domain)
  1571. __detach_device(domain, devid);
  1572. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1573. }
  1574. static int amd_iommu_domain_init(struct iommu_domain *dom)
  1575. {
  1576. struct protection_domain *domain;
  1577. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  1578. if (!domain)
  1579. return -ENOMEM;
  1580. spin_lock_init(&domain->lock);
  1581. domain->mode = PAGE_MODE_3_LEVEL;
  1582. domain->id = domain_id_alloc();
  1583. if (!domain->id)
  1584. goto out_free;
  1585. domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  1586. if (!domain->pt_root)
  1587. goto out_free;
  1588. dom->priv = domain;
  1589. return 0;
  1590. out_free:
  1591. kfree(domain);
  1592. return -ENOMEM;
  1593. }
  1594. static void amd_iommu_domain_destroy(struct iommu_domain *dom)
  1595. {
  1596. struct protection_domain *domain = dom->priv;
  1597. if (!domain)
  1598. return;
  1599. if (domain->dev_cnt > 0)
  1600. cleanup_domain(domain);
  1601. BUG_ON(domain->dev_cnt != 0);
  1602. free_pagetable(domain);
  1603. domain_id_free(domain->id);
  1604. kfree(domain);
  1605. dom->priv = NULL;
  1606. }
  1607. static void amd_iommu_detach_device(struct iommu_domain *dom,
  1608. struct device *dev)
  1609. {
  1610. struct protection_domain *domain = dom->priv;
  1611. struct amd_iommu *iommu;
  1612. struct pci_dev *pdev;
  1613. u16 devid;
  1614. if (dev->bus != &pci_bus_type)
  1615. return;
  1616. pdev = to_pci_dev(dev);
  1617. devid = calc_devid(pdev->bus->number, pdev->devfn);
  1618. if (devid > 0)
  1619. detach_device(domain, devid);
  1620. iommu = amd_iommu_rlookup_table[devid];
  1621. if (!iommu)
  1622. return;
  1623. iommu_queue_inv_dev_entry(iommu, devid);
  1624. iommu_completion_wait(iommu);
  1625. }
  1626. static int amd_iommu_attach_device(struct iommu_domain *dom,
  1627. struct device *dev)
  1628. {
  1629. struct protection_domain *domain = dom->priv;
  1630. struct protection_domain *old_domain;
  1631. struct amd_iommu *iommu;
  1632. struct pci_dev *pdev;
  1633. u16 devid;
  1634. if (dev->bus != &pci_bus_type)
  1635. return -EINVAL;
  1636. pdev = to_pci_dev(dev);
  1637. devid = calc_devid(pdev->bus->number, pdev->devfn);
  1638. if (devid >= amd_iommu_last_bdf ||
  1639. devid != amd_iommu_alias_table[devid])
  1640. return -EINVAL;
  1641. iommu = amd_iommu_rlookup_table[devid];
  1642. if (!iommu)
  1643. return -EINVAL;
  1644. old_domain = domain_for_device(devid);
  1645. if (old_domain)
  1646. detach_device(old_domain, devid);
  1647. attach_device(iommu, domain, devid);
  1648. iommu_completion_wait(iommu);
  1649. return 0;
  1650. }
  1651. static int amd_iommu_map_range(struct iommu_domain *dom,
  1652. unsigned long iova, phys_addr_t paddr,
  1653. size_t size, int iommu_prot)
  1654. {
  1655. struct protection_domain *domain = dom->priv;
  1656. unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1657. int prot = 0;
  1658. int ret;
  1659. if (iommu_prot & IOMMU_READ)
  1660. prot |= IOMMU_PROT_IR;
  1661. if (iommu_prot & IOMMU_WRITE)
  1662. prot |= IOMMU_PROT_IW;
  1663. iova &= PAGE_MASK;
  1664. paddr &= PAGE_MASK;
  1665. for (i = 0; i < npages; ++i) {
  1666. ret = iommu_map_page(domain, iova, paddr, prot);
  1667. if (ret)
  1668. return ret;
  1669. iova += PAGE_SIZE;
  1670. paddr += PAGE_SIZE;
  1671. }
  1672. return 0;
  1673. }
  1674. static void amd_iommu_unmap_range(struct iommu_domain *dom,
  1675. unsigned long iova, size_t size)
  1676. {
  1677. struct protection_domain *domain = dom->priv;
  1678. unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
  1679. iova &= PAGE_MASK;
  1680. for (i = 0; i < npages; ++i) {
  1681. iommu_unmap_page(domain, iova);
  1682. iova += PAGE_SIZE;
  1683. }
  1684. iommu_flush_domain(domain->id);
  1685. }
  1686. static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
  1687. unsigned long iova)
  1688. {
  1689. struct protection_domain *domain = dom->priv;
  1690. unsigned long offset = iova & ~PAGE_MASK;
  1691. phys_addr_t paddr;
  1692. u64 *pte;
  1693. pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)];
  1694. if (!IOMMU_PTE_PRESENT(*pte))
  1695. return 0;
  1696. pte = IOMMU_PTE_PAGE(*pte);
  1697. pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
  1698. if (!IOMMU_PTE_PRESENT(*pte))
  1699. return 0;
  1700. pte = IOMMU_PTE_PAGE(*pte);
  1701. pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
  1702. if (!IOMMU_PTE_PRESENT(*pte))
  1703. return 0;
  1704. paddr = *pte & IOMMU_PAGE_MASK;
  1705. paddr |= offset;
  1706. return paddr;
  1707. }
  1708. static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
  1709. unsigned long cap)
  1710. {
  1711. return 0;
  1712. }
  1713. static struct iommu_ops amd_iommu_ops = {
  1714. .domain_init = amd_iommu_domain_init,
  1715. .domain_destroy = amd_iommu_domain_destroy,
  1716. .attach_dev = amd_iommu_attach_device,
  1717. .detach_dev = amd_iommu_detach_device,
  1718. .map = amd_iommu_map_range,
  1719. .unmap = amd_iommu_unmap_range,
  1720. .iova_to_phys = amd_iommu_iova_to_phys,
  1721. .domain_has_cap = amd_iommu_domain_has_cap,
  1722. };