amd_iommu.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/gfp.h>
  21. #include <linux/bitops.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/iommu-helper.h>
  26. #include <linux/iommu.h>
  27. #include <asm/proto.h>
  28. #include <asm/iommu.h>
  29. #include <asm/gart.h>
  30. #include <asm/amd_iommu_types.h>
  31. #include <asm/amd_iommu.h>
  32. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  33. #define EXIT_LOOP_COUNT 10000000
  34. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  35. /* A list of preallocated protection domains */
  36. static LIST_HEAD(iommu_pd_list);
  37. static DEFINE_SPINLOCK(iommu_pd_list_lock);
  38. #ifdef CONFIG_IOMMU_API
  39. static struct iommu_ops amd_iommu_ops;
  40. #endif
  41. /*
  42. * general struct to manage commands send to an IOMMU
  43. */
  44. struct iommu_cmd {
  45. u32 data[4];
  46. };
  47. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  48. struct unity_map_entry *e);
  49. static struct dma_ops_domain *find_protection_domain(u16 devid);
  50. static u64* alloc_pte(struct protection_domain *dom,
  51. unsigned long address, u64
  52. **pte_page, gfp_t gfp);
  53. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  54. unsigned long start_page,
  55. unsigned int pages);
  56. static u64 *fetch_pte(struct protection_domain *domain,
  57. unsigned long address);
  58. #ifndef BUS_NOTIFY_UNBOUND_DRIVER
  59. #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
  60. #endif
  61. #ifdef CONFIG_AMD_IOMMU_STATS
  62. /*
  63. * Initialization code for statistics collection
  64. */
  65. DECLARE_STATS_COUNTER(compl_wait);
  66. DECLARE_STATS_COUNTER(cnt_map_single);
  67. DECLARE_STATS_COUNTER(cnt_unmap_single);
  68. DECLARE_STATS_COUNTER(cnt_map_sg);
  69. DECLARE_STATS_COUNTER(cnt_unmap_sg);
  70. DECLARE_STATS_COUNTER(cnt_alloc_coherent);
  71. DECLARE_STATS_COUNTER(cnt_free_coherent);
  72. DECLARE_STATS_COUNTER(cross_page);
  73. DECLARE_STATS_COUNTER(domain_flush_single);
  74. DECLARE_STATS_COUNTER(domain_flush_all);
  75. DECLARE_STATS_COUNTER(alloced_io_mem);
  76. DECLARE_STATS_COUNTER(total_map_requests);
  77. static struct dentry *stats_dir;
  78. static struct dentry *de_isolate;
  79. static struct dentry *de_fflush;
  80. static void amd_iommu_stats_add(struct __iommu_counter *cnt)
  81. {
  82. if (stats_dir == NULL)
  83. return;
  84. cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
  85. &cnt->value);
  86. }
  87. static void amd_iommu_stats_init(void)
  88. {
  89. stats_dir = debugfs_create_dir("amd-iommu", NULL);
  90. if (stats_dir == NULL)
  91. return;
  92. de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
  93. (u32 *)&amd_iommu_isolate);
  94. de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
  95. (u32 *)&amd_iommu_unmap_flush);
  96. amd_iommu_stats_add(&compl_wait);
  97. amd_iommu_stats_add(&cnt_map_single);
  98. amd_iommu_stats_add(&cnt_unmap_single);
  99. amd_iommu_stats_add(&cnt_map_sg);
  100. amd_iommu_stats_add(&cnt_unmap_sg);
  101. amd_iommu_stats_add(&cnt_alloc_coherent);
  102. amd_iommu_stats_add(&cnt_free_coherent);
  103. amd_iommu_stats_add(&cross_page);
  104. amd_iommu_stats_add(&domain_flush_single);
  105. amd_iommu_stats_add(&domain_flush_all);
  106. amd_iommu_stats_add(&alloced_io_mem);
  107. amd_iommu_stats_add(&total_map_requests);
  108. }
  109. #endif
  110. /* returns !0 if the IOMMU is caching non-present entries in its TLB */
  111. static int iommu_has_npcache(struct amd_iommu *iommu)
  112. {
  113. return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
  114. }
  115. /****************************************************************************
  116. *
  117. * Interrupt handling functions
  118. *
  119. ****************************************************************************/
  120. static void iommu_print_event(void *__evt)
  121. {
  122. u32 *event = __evt;
  123. int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  124. int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  125. int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  126. int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  127. u64 address = (u64)(((u64)event[3]) << 32) | event[2];
  128. printk(KERN_ERR "AMD IOMMU: Event logged [");
  129. switch (type) {
  130. case EVENT_TYPE_ILL_DEV:
  131. printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  132. "address=0x%016llx flags=0x%04x]\n",
  133. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  134. address, flags);
  135. break;
  136. case EVENT_TYPE_IO_FAULT:
  137. printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  138. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  139. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  140. domid, address, flags);
  141. break;
  142. case EVENT_TYPE_DEV_TAB_ERR:
  143. printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  144. "address=0x%016llx flags=0x%04x]\n",
  145. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  146. address, flags);
  147. break;
  148. case EVENT_TYPE_PAGE_TAB_ERR:
  149. printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  150. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  151. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  152. domid, address, flags);
  153. break;
  154. case EVENT_TYPE_ILL_CMD:
  155. printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
  156. break;
  157. case EVENT_TYPE_CMD_HARD_ERR:
  158. printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  159. "flags=0x%04x]\n", address, flags);
  160. break;
  161. case EVENT_TYPE_IOTLB_INV_TO:
  162. printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  163. "address=0x%016llx]\n",
  164. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  165. address);
  166. break;
  167. case EVENT_TYPE_INV_DEV_REQ:
  168. printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  169. "address=0x%016llx flags=0x%04x]\n",
  170. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  171. address, flags);
  172. break;
  173. default:
  174. printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
  175. }
  176. }
  177. static void iommu_poll_events(struct amd_iommu *iommu)
  178. {
  179. u32 head, tail;
  180. unsigned long flags;
  181. spin_lock_irqsave(&iommu->lock, flags);
  182. head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  183. tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  184. while (head != tail) {
  185. iommu_print_event(iommu->evt_buf + head);
  186. head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  187. }
  188. writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  189. spin_unlock_irqrestore(&iommu->lock, flags);
  190. }
  191. irqreturn_t amd_iommu_int_handler(int irq, void *data)
  192. {
  193. struct amd_iommu *iommu;
  194. for_each_iommu(iommu)
  195. iommu_poll_events(iommu);
  196. return IRQ_HANDLED;
  197. }
  198. /****************************************************************************
  199. *
  200. * IOMMU command queuing functions
  201. *
  202. ****************************************************************************/
  203. /*
  204. * Writes the command to the IOMMUs command buffer and informs the
  205. * hardware about the new command. Must be called with iommu->lock held.
  206. */
  207. static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  208. {
  209. u32 tail, head;
  210. u8 *target;
  211. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  212. target = iommu->cmd_buf + tail;
  213. memcpy_toio(target, cmd, sizeof(*cmd));
  214. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  215. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  216. if (tail == head)
  217. return -ENOMEM;
  218. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  219. return 0;
  220. }
  221. /*
  222. * General queuing function for commands. Takes iommu->lock and calls
  223. * __iommu_queue_command().
  224. */
  225. static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  226. {
  227. unsigned long flags;
  228. int ret;
  229. spin_lock_irqsave(&iommu->lock, flags);
  230. ret = __iommu_queue_command(iommu, cmd);
  231. if (!ret)
  232. iommu->need_sync = true;
  233. spin_unlock_irqrestore(&iommu->lock, flags);
  234. return ret;
  235. }
  236. /*
  237. * This function waits until an IOMMU has completed a completion
  238. * wait command
  239. */
  240. static void __iommu_wait_for_completion(struct amd_iommu *iommu)
  241. {
  242. int ready = 0;
  243. unsigned status = 0;
  244. unsigned long i = 0;
  245. INC_STATS_COUNTER(compl_wait);
  246. while (!ready && (i < EXIT_LOOP_COUNT)) {
  247. ++i;
  248. /* wait for the bit to become one */
  249. status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
  250. ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
  251. }
  252. /* set bit back to zero */
  253. status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
  254. writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
  255. if (unlikely(i == EXIT_LOOP_COUNT))
  256. panic("AMD IOMMU: Completion wait loop failed\n");
  257. }
  258. /*
  259. * This function queues a completion wait command into the command
  260. * buffer of an IOMMU
  261. */
  262. static int __iommu_completion_wait(struct amd_iommu *iommu)
  263. {
  264. struct iommu_cmd cmd;
  265. memset(&cmd, 0, sizeof(cmd));
  266. cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
  267. CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
  268. return __iommu_queue_command(iommu, &cmd);
  269. }
  270. /*
  271. * This function is called whenever we need to ensure that the IOMMU has
  272. * completed execution of all commands we sent. It sends a
  273. * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
  274. * us about that by writing a value to a physical address we pass with
  275. * the command.
  276. */
  277. static int iommu_completion_wait(struct amd_iommu *iommu)
  278. {
  279. int ret = 0;
  280. unsigned long flags;
  281. spin_lock_irqsave(&iommu->lock, flags);
  282. if (!iommu->need_sync)
  283. goto out;
  284. ret = __iommu_completion_wait(iommu);
  285. iommu->need_sync = false;
  286. if (ret)
  287. goto out;
  288. __iommu_wait_for_completion(iommu);
  289. out:
  290. spin_unlock_irqrestore(&iommu->lock, flags);
  291. return 0;
  292. }
  293. /*
  294. * Command send function for invalidating a device table entry
  295. */
  296. static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
  297. {
  298. struct iommu_cmd cmd;
  299. int ret;
  300. BUG_ON(iommu == NULL);
  301. memset(&cmd, 0, sizeof(cmd));
  302. CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
  303. cmd.data[0] = devid;
  304. ret = iommu_queue_command(iommu, &cmd);
  305. return ret;
  306. }
  307. static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
  308. u16 domid, int pde, int s)
  309. {
  310. memset(cmd, 0, sizeof(*cmd));
  311. address &= PAGE_MASK;
  312. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  313. cmd->data[1] |= domid;
  314. cmd->data[2] = lower_32_bits(address);
  315. cmd->data[3] = upper_32_bits(address);
  316. if (s) /* size bit - we flush more than one 4kb page */
  317. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  318. if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
  319. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  320. }
  321. /*
  322. * Generic command send function for invalidaing TLB entries
  323. */
  324. static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
  325. u64 address, u16 domid, int pde, int s)
  326. {
  327. struct iommu_cmd cmd;
  328. int ret;
  329. __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
  330. ret = iommu_queue_command(iommu, &cmd);
  331. return ret;
  332. }
  333. /*
  334. * TLB invalidation function which is called from the mapping functions.
  335. * It invalidates a single PTE if the range to flush is within a single
  336. * page. Otherwise it flushes the whole TLB of the IOMMU.
  337. */
  338. static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
  339. u64 address, size_t size)
  340. {
  341. int s = 0;
  342. unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
  343. address &= PAGE_MASK;
  344. if (pages > 1) {
  345. /*
  346. * If we have to flush more than one page, flush all
  347. * TLB entries for this domain
  348. */
  349. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  350. s = 1;
  351. }
  352. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
  353. return 0;
  354. }
  355. /* Flush the whole IO/TLB for a given protection domain */
  356. static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
  357. {
  358. u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  359. INC_STATS_COUNTER(domain_flush_single);
  360. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
  361. }
  362. /* Flush the whole IO/TLB for a given protection domain - including PDE */
  363. static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
  364. {
  365. u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  366. INC_STATS_COUNTER(domain_flush_single);
  367. iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
  368. }
  369. /*
  370. * This function is used to flush the IO/TLB for a given protection domain
  371. * on every IOMMU in the system
  372. */
  373. static void iommu_flush_domain(u16 domid)
  374. {
  375. unsigned long flags;
  376. struct amd_iommu *iommu;
  377. struct iommu_cmd cmd;
  378. INC_STATS_COUNTER(domain_flush_all);
  379. __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  380. domid, 1, 1);
  381. for_each_iommu(iommu) {
  382. spin_lock_irqsave(&iommu->lock, flags);
  383. __iommu_queue_command(iommu, &cmd);
  384. __iommu_completion_wait(iommu);
  385. __iommu_wait_for_completion(iommu);
  386. spin_unlock_irqrestore(&iommu->lock, flags);
  387. }
  388. }
  389. void amd_iommu_flush_all_domains(void)
  390. {
  391. int i;
  392. for (i = 1; i < MAX_DOMAIN_ID; ++i) {
  393. if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
  394. continue;
  395. iommu_flush_domain(i);
  396. }
  397. }
  398. static void flush_devices_by_domain(struct protection_domain *domain)
  399. {
  400. struct amd_iommu *iommu;
  401. int i;
  402. for (i = 0; i <= amd_iommu_last_bdf; ++i) {
  403. if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
  404. (amd_iommu_pd_table[i] != domain))
  405. continue;
  406. iommu = amd_iommu_rlookup_table[i];
  407. if (!iommu)
  408. continue;
  409. iommu_queue_inv_dev_entry(iommu, i);
  410. iommu_completion_wait(iommu);
  411. }
  412. }
  413. void amd_iommu_flush_all_devices(void)
  414. {
  415. flush_devices_by_domain(NULL);
  416. }
  417. /****************************************************************************
  418. *
  419. * The functions below are used the create the page table mappings for
  420. * unity mapped regions.
  421. *
  422. ****************************************************************************/
  423. /*
  424. * Generic mapping functions. It maps a physical address into a DMA
  425. * address space. It allocates the page table pages if necessary.
  426. * In the future it can be extended to a generic mapping function
  427. * supporting all features of AMD IOMMU page tables like level skipping
  428. * and full 64 bit address spaces.
  429. */
  430. static int iommu_map_page(struct protection_domain *dom,
  431. unsigned long bus_addr,
  432. unsigned long phys_addr,
  433. int prot)
  434. {
  435. u64 __pte, *pte;
  436. bus_addr = PAGE_ALIGN(bus_addr);
  437. phys_addr = PAGE_ALIGN(phys_addr);
  438. /* only support 512GB address spaces for now */
  439. if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
  440. return -EINVAL;
  441. pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL);
  442. if (IOMMU_PTE_PRESENT(*pte))
  443. return -EBUSY;
  444. __pte = phys_addr | IOMMU_PTE_P;
  445. if (prot & IOMMU_PROT_IR)
  446. __pte |= IOMMU_PTE_IR;
  447. if (prot & IOMMU_PROT_IW)
  448. __pte |= IOMMU_PTE_IW;
  449. *pte = __pte;
  450. return 0;
  451. }
  452. static void iommu_unmap_page(struct protection_domain *dom,
  453. unsigned long bus_addr)
  454. {
  455. u64 *pte = fetch_pte(dom, bus_addr);
  456. if (pte)
  457. *pte = 0;
  458. }
  459. /*
  460. * This function checks if a specific unity mapping entry is needed for
  461. * this specific IOMMU.
  462. */
  463. static int iommu_for_unity_map(struct amd_iommu *iommu,
  464. struct unity_map_entry *entry)
  465. {
  466. u16 bdf, i;
  467. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  468. bdf = amd_iommu_alias_table[i];
  469. if (amd_iommu_rlookup_table[bdf] == iommu)
  470. return 1;
  471. }
  472. return 0;
  473. }
  474. /*
  475. * Init the unity mappings for a specific IOMMU in the system
  476. *
  477. * Basically iterates over all unity mapping entries and applies them to
  478. * the default domain DMA of that IOMMU if necessary.
  479. */
  480. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  481. {
  482. struct unity_map_entry *entry;
  483. int ret;
  484. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  485. if (!iommu_for_unity_map(iommu, entry))
  486. continue;
  487. ret = dma_ops_unity_map(iommu->default_dom, entry);
  488. if (ret)
  489. return ret;
  490. }
  491. return 0;
  492. }
  493. /*
  494. * This function actually applies the mapping to the page table of the
  495. * dma_ops domain.
  496. */
  497. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  498. struct unity_map_entry *e)
  499. {
  500. u64 addr;
  501. int ret;
  502. for (addr = e->address_start; addr < e->address_end;
  503. addr += PAGE_SIZE) {
  504. ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot);
  505. if (ret)
  506. return ret;
  507. /*
  508. * if unity mapping is in aperture range mark the page
  509. * as allocated in the aperture
  510. */
  511. if (addr < dma_dom->aperture_size)
  512. __set_bit(addr >> PAGE_SHIFT,
  513. dma_dom->aperture[0]->bitmap);
  514. }
  515. return 0;
  516. }
  517. /*
  518. * Inits the unity mappings required for a specific device
  519. */
  520. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  521. u16 devid)
  522. {
  523. struct unity_map_entry *e;
  524. int ret;
  525. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  526. if (!(devid >= e->devid_start && devid <= e->devid_end))
  527. continue;
  528. ret = dma_ops_unity_map(dma_dom, e);
  529. if (ret)
  530. return ret;
  531. }
  532. return 0;
  533. }
  534. /****************************************************************************
  535. *
  536. * The next functions belong to the address allocator for the dma_ops
  537. * interface functions. They work like the allocators in the other IOMMU
  538. * drivers. Its basically a bitmap which marks the allocated pages in
  539. * the aperture. Maybe it could be enhanced in the future to a more
  540. * efficient allocator.
  541. *
  542. ****************************************************************************/
  543. /*
  544. * The address allocator core functions.
  545. *
  546. * called with domain->lock held
  547. */
  548. /*
  549. * This function checks if there is a PTE for a given dma address. If
  550. * there is one, it returns the pointer to it.
  551. */
  552. static u64 *fetch_pte(struct protection_domain *domain,
  553. unsigned long address)
  554. {
  555. int level;
  556. u64 *pte;
  557. level = domain->mode - 1;
  558. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  559. while (level > 0) {
  560. if (!IOMMU_PTE_PRESENT(*pte))
  561. return NULL;
  562. level -= 1;
  563. pte = IOMMU_PTE_PAGE(*pte);
  564. pte = &pte[PM_LEVEL_INDEX(level, address)];
  565. }
  566. return pte;
  567. }
  568. /*
  569. * This function is used to add a new aperture range to an existing
  570. * aperture in case of dma_ops domain allocation or address allocation
  571. * failure.
  572. */
  573. static int alloc_new_range(struct amd_iommu *iommu,
  574. struct dma_ops_domain *dma_dom,
  575. bool populate, gfp_t gfp)
  576. {
  577. int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
  578. int i;
  579. #ifdef CONFIG_IOMMU_STRESS
  580. populate = false;
  581. #endif
  582. if (index >= APERTURE_MAX_RANGES)
  583. return -ENOMEM;
  584. dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
  585. if (!dma_dom->aperture[index])
  586. return -ENOMEM;
  587. dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
  588. if (!dma_dom->aperture[index]->bitmap)
  589. goto out_free;
  590. dma_dom->aperture[index]->offset = dma_dom->aperture_size;
  591. if (populate) {
  592. unsigned long address = dma_dom->aperture_size;
  593. int i, num_ptes = APERTURE_RANGE_PAGES / 512;
  594. u64 *pte, *pte_page;
  595. for (i = 0; i < num_ptes; ++i) {
  596. pte = alloc_pte(&dma_dom->domain, address,
  597. &pte_page, gfp);
  598. if (!pte)
  599. goto out_free;
  600. dma_dom->aperture[index]->pte_pages[i] = pte_page;
  601. address += APERTURE_RANGE_SIZE / 64;
  602. }
  603. }
  604. dma_dom->aperture_size += APERTURE_RANGE_SIZE;
  605. /* Intialize the exclusion range if necessary */
  606. if (iommu->exclusion_start &&
  607. iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
  608. iommu->exclusion_start < dma_dom->aperture_size) {
  609. unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
  610. int pages = iommu_num_pages(iommu->exclusion_start,
  611. iommu->exclusion_length,
  612. PAGE_SIZE);
  613. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  614. }
  615. /*
  616. * Check for areas already mapped as present in the new aperture
  617. * range and mark those pages as reserved in the allocator. Such
  618. * mappings may already exist as a result of requested unity
  619. * mappings for devices.
  620. */
  621. for (i = dma_dom->aperture[index]->offset;
  622. i < dma_dom->aperture_size;
  623. i += PAGE_SIZE) {
  624. u64 *pte = fetch_pte(&dma_dom->domain, i);
  625. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  626. continue;
  627. dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
  628. }
  629. return 0;
  630. out_free:
  631. free_page((unsigned long)dma_dom->aperture[index]->bitmap);
  632. kfree(dma_dom->aperture[index]);
  633. dma_dom->aperture[index] = NULL;
  634. return -ENOMEM;
  635. }
  636. static unsigned long dma_ops_area_alloc(struct device *dev,
  637. struct dma_ops_domain *dom,
  638. unsigned int pages,
  639. unsigned long align_mask,
  640. u64 dma_mask,
  641. unsigned long start)
  642. {
  643. unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
  644. int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
  645. int i = start >> APERTURE_RANGE_SHIFT;
  646. unsigned long boundary_size;
  647. unsigned long address = -1;
  648. unsigned long limit;
  649. next_bit >>= PAGE_SHIFT;
  650. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  651. PAGE_SIZE) >> PAGE_SHIFT;
  652. for (;i < max_index; ++i) {
  653. unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
  654. if (dom->aperture[i]->offset >= dma_mask)
  655. break;
  656. limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
  657. dma_mask >> PAGE_SHIFT);
  658. address = iommu_area_alloc(dom->aperture[i]->bitmap,
  659. limit, next_bit, pages, 0,
  660. boundary_size, align_mask);
  661. if (address != -1) {
  662. address = dom->aperture[i]->offset +
  663. (address << PAGE_SHIFT);
  664. dom->next_address = address + (pages << PAGE_SHIFT);
  665. break;
  666. }
  667. next_bit = 0;
  668. }
  669. return address;
  670. }
  671. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  672. struct dma_ops_domain *dom,
  673. unsigned int pages,
  674. unsigned long align_mask,
  675. u64 dma_mask)
  676. {
  677. unsigned long address;
  678. #ifdef CONFIG_IOMMU_STRESS
  679. dom->next_address = 0;
  680. dom->need_flush = true;
  681. #endif
  682. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  683. dma_mask, dom->next_address);
  684. if (address == -1) {
  685. dom->next_address = 0;
  686. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  687. dma_mask, 0);
  688. dom->need_flush = true;
  689. }
  690. if (unlikely(address == -1))
  691. address = bad_dma_address;
  692. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  693. return address;
  694. }
  695. /*
  696. * The address free function.
  697. *
  698. * called with domain->lock held
  699. */
  700. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  701. unsigned long address,
  702. unsigned int pages)
  703. {
  704. unsigned i = address >> APERTURE_RANGE_SHIFT;
  705. struct aperture_range *range = dom->aperture[i];
  706. BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
  707. #ifdef CONFIG_IOMMU_STRESS
  708. if (i < 4)
  709. return;
  710. #endif
  711. if (address >= dom->next_address)
  712. dom->need_flush = true;
  713. address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
  714. iommu_area_free(range->bitmap, address, pages);
  715. }
  716. /****************************************************************************
  717. *
  718. * The next functions belong to the domain allocation. A domain is
  719. * allocated for every IOMMU as the default domain. If device isolation
  720. * is enabled, every device get its own domain. The most important thing
  721. * about domains is the page table mapping the DMA address space they
  722. * contain.
  723. *
  724. ****************************************************************************/
  725. static u16 domain_id_alloc(void)
  726. {
  727. unsigned long flags;
  728. int id;
  729. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  730. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  731. BUG_ON(id == 0);
  732. if (id > 0 && id < MAX_DOMAIN_ID)
  733. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  734. else
  735. id = 0;
  736. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  737. return id;
  738. }
  739. static void domain_id_free(int id)
  740. {
  741. unsigned long flags;
  742. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  743. if (id > 0 && id < MAX_DOMAIN_ID)
  744. __clear_bit(id, amd_iommu_pd_alloc_bitmap);
  745. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  746. }
  747. /*
  748. * Used to reserve address ranges in the aperture (e.g. for exclusion
  749. * ranges.
  750. */
  751. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  752. unsigned long start_page,
  753. unsigned int pages)
  754. {
  755. unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  756. if (start_page + pages > last_page)
  757. pages = last_page - start_page;
  758. for (i = start_page; i < start_page + pages; ++i) {
  759. int index = i / APERTURE_RANGE_PAGES;
  760. int page = i % APERTURE_RANGE_PAGES;
  761. __set_bit(page, dom->aperture[index]->bitmap);
  762. }
  763. }
  764. static void free_pagetable(struct protection_domain *domain)
  765. {
  766. int i, j;
  767. u64 *p1, *p2, *p3;
  768. p1 = domain->pt_root;
  769. if (!p1)
  770. return;
  771. for (i = 0; i < 512; ++i) {
  772. if (!IOMMU_PTE_PRESENT(p1[i]))
  773. continue;
  774. p2 = IOMMU_PTE_PAGE(p1[i]);
  775. for (j = 0; j < 512; ++j) {
  776. if (!IOMMU_PTE_PRESENT(p2[j]))
  777. continue;
  778. p3 = IOMMU_PTE_PAGE(p2[j]);
  779. free_page((unsigned long)p3);
  780. }
  781. free_page((unsigned long)p2);
  782. }
  783. free_page((unsigned long)p1);
  784. domain->pt_root = NULL;
  785. }
  786. /*
  787. * Free a domain, only used if something went wrong in the
  788. * allocation path and we need to free an already allocated page table
  789. */
  790. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  791. {
  792. int i;
  793. if (!dom)
  794. return;
  795. free_pagetable(&dom->domain);
  796. for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
  797. if (!dom->aperture[i])
  798. continue;
  799. free_page((unsigned long)dom->aperture[i]->bitmap);
  800. kfree(dom->aperture[i]);
  801. }
  802. kfree(dom);
  803. }
  804. /*
  805. * Allocates a new protection domain usable for the dma_ops functions.
  806. * It also intializes the page table and the address allocator data
  807. * structures required for the dma_ops interface
  808. */
  809. static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
  810. {
  811. struct dma_ops_domain *dma_dom;
  812. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  813. if (!dma_dom)
  814. return NULL;
  815. spin_lock_init(&dma_dom->domain.lock);
  816. dma_dom->domain.id = domain_id_alloc();
  817. if (dma_dom->domain.id == 0)
  818. goto free_dma_dom;
  819. dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
  820. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  821. dma_dom->domain.flags = PD_DMA_OPS_MASK;
  822. dma_dom->domain.priv = dma_dom;
  823. if (!dma_dom->domain.pt_root)
  824. goto free_dma_dom;
  825. dma_dom->need_flush = false;
  826. dma_dom->target_dev = 0xffff;
  827. if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
  828. goto free_dma_dom;
  829. /*
  830. * mark the first page as allocated so we never return 0 as
  831. * a valid dma-address. So we can use 0 as error value
  832. */
  833. dma_dom->aperture[0]->bitmap[0] = 1;
  834. dma_dom->next_address = 0;
  835. return dma_dom;
  836. free_dma_dom:
  837. dma_ops_domain_free(dma_dom);
  838. return NULL;
  839. }
  840. /*
  841. * little helper function to check whether a given protection domain is a
  842. * dma_ops domain
  843. */
  844. static bool dma_ops_domain(struct protection_domain *domain)
  845. {
  846. return domain->flags & PD_DMA_OPS_MASK;
  847. }
  848. /*
  849. * Find out the protection domain structure for a given PCI device. This
  850. * will give us the pointer to the page table root for example.
  851. */
  852. static struct protection_domain *domain_for_device(u16 devid)
  853. {
  854. struct protection_domain *dom;
  855. unsigned long flags;
  856. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  857. dom = amd_iommu_pd_table[devid];
  858. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  859. return dom;
  860. }
  861. static void set_dte_entry(u16 devid, struct protection_domain *domain)
  862. {
  863. u64 pte_root = virt_to_phys(domain->pt_root);
  864. unsigned long flags;
  865. pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  866. << DEV_ENTRY_MODE_SHIFT;
  867. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
  868. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  869. amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
  870. amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
  871. amd_iommu_dev_table[devid].data[2] = domain->id;
  872. amd_iommu_pd_table[devid] = domain;
  873. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  874. }
  875. /*
  876. * If a device is not yet associated with a domain, this function does
  877. * assigns it visible for the hardware
  878. */
  879. static void attach_device(struct amd_iommu *iommu,
  880. struct protection_domain *domain,
  881. u16 devid)
  882. {
  883. /* set the DTE entry */
  884. set_dte_entry(devid, domain);
  885. /* increase reference counter */
  886. domain->dev_cnt += 1;
  887. /*
  888. * We might boot into a crash-kernel here. The crashed kernel
  889. * left the caches in the IOMMU dirty. So we have to flush
  890. * here to evict all dirty stuff.
  891. */
  892. iommu_queue_inv_dev_entry(iommu, devid);
  893. iommu_flush_tlb_pde(iommu, domain->id);
  894. }
  895. /*
  896. * Removes a device from a protection domain (unlocked)
  897. */
  898. static void __detach_device(struct protection_domain *domain, u16 devid)
  899. {
  900. /* lock domain */
  901. spin_lock(&domain->lock);
  902. /* remove domain from the lookup table */
  903. amd_iommu_pd_table[devid] = NULL;
  904. /* remove entry from the device table seen by the hardware */
  905. amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
  906. amd_iommu_dev_table[devid].data[1] = 0;
  907. amd_iommu_dev_table[devid].data[2] = 0;
  908. /* decrease reference counter */
  909. domain->dev_cnt -= 1;
  910. /* ready */
  911. spin_unlock(&domain->lock);
  912. }
  913. /*
  914. * Removes a device from a protection domain (with devtable_lock held)
  915. */
  916. static void detach_device(struct protection_domain *domain, u16 devid)
  917. {
  918. unsigned long flags;
  919. /* lock device table */
  920. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  921. __detach_device(domain, devid);
  922. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  923. }
  924. static int device_change_notifier(struct notifier_block *nb,
  925. unsigned long action, void *data)
  926. {
  927. struct device *dev = data;
  928. struct pci_dev *pdev = to_pci_dev(dev);
  929. u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
  930. struct protection_domain *domain;
  931. struct dma_ops_domain *dma_domain;
  932. struct amd_iommu *iommu;
  933. unsigned long flags;
  934. if (devid > amd_iommu_last_bdf)
  935. goto out;
  936. devid = amd_iommu_alias_table[devid];
  937. iommu = amd_iommu_rlookup_table[devid];
  938. if (iommu == NULL)
  939. goto out;
  940. domain = domain_for_device(devid);
  941. if (domain && !dma_ops_domain(domain))
  942. WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
  943. "to a non-dma-ops domain\n", dev_name(dev));
  944. switch (action) {
  945. case BUS_NOTIFY_UNBOUND_DRIVER:
  946. if (!domain)
  947. goto out;
  948. detach_device(domain, devid);
  949. break;
  950. case BUS_NOTIFY_ADD_DEVICE:
  951. /* allocate a protection domain if a device is added */
  952. dma_domain = find_protection_domain(devid);
  953. if (dma_domain)
  954. goto out;
  955. dma_domain = dma_ops_domain_alloc(iommu);
  956. if (!dma_domain)
  957. goto out;
  958. dma_domain->target_dev = devid;
  959. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  960. list_add_tail(&dma_domain->list, &iommu_pd_list);
  961. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  962. break;
  963. default:
  964. goto out;
  965. }
  966. iommu_queue_inv_dev_entry(iommu, devid);
  967. iommu_completion_wait(iommu);
  968. out:
  969. return 0;
  970. }
  971. static struct notifier_block device_nb = {
  972. .notifier_call = device_change_notifier,
  973. };
  974. /*****************************************************************************
  975. *
  976. * The next functions belong to the dma_ops mapping/unmapping code.
  977. *
  978. *****************************************************************************/
  979. /*
  980. * This function checks if the driver got a valid device from the caller to
  981. * avoid dereferencing invalid pointers.
  982. */
  983. static bool check_device(struct device *dev)
  984. {
  985. if (!dev || !dev->dma_mask)
  986. return false;
  987. return true;
  988. }
  989. /*
  990. * In this function the list of preallocated protection domains is traversed to
  991. * find the domain for a specific device
  992. */
  993. static struct dma_ops_domain *find_protection_domain(u16 devid)
  994. {
  995. struct dma_ops_domain *entry, *ret = NULL;
  996. unsigned long flags;
  997. if (list_empty(&iommu_pd_list))
  998. return NULL;
  999. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  1000. list_for_each_entry(entry, &iommu_pd_list, list) {
  1001. if (entry->target_dev == devid) {
  1002. ret = entry;
  1003. break;
  1004. }
  1005. }
  1006. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  1007. return ret;
  1008. }
  1009. /*
  1010. * In the dma_ops path we only have the struct device. This function
  1011. * finds the corresponding IOMMU, the protection domain and the
  1012. * requestor id for a given device.
  1013. * If the device is not yet associated with a domain this is also done
  1014. * in this function.
  1015. */
  1016. static int get_device_resources(struct device *dev,
  1017. struct amd_iommu **iommu,
  1018. struct protection_domain **domain,
  1019. u16 *bdf)
  1020. {
  1021. struct dma_ops_domain *dma_dom;
  1022. struct pci_dev *pcidev;
  1023. u16 _bdf;
  1024. *iommu = NULL;
  1025. *domain = NULL;
  1026. *bdf = 0xffff;
  1027. if (dev->bus != &pci_bus_type)
  1028. return 0;
  1029. pcidev = to_pci_dev(dev);
  1030. _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1031. /* device not translated by any IOMMU in the system? */
  1032. if (_bdf > amd_iommu_last_bdf)
  1033. return 0;
  1034. *bdf = amd_iommu_alias_table[_bdf];
  1035. *iommu = amd_iommu_rlookup_table[*bdf];
  1036. if (*iommu == NULL)
  1037. return 0;
  1038. *domain = domain_for_device(*bdf);
  1039. if (*domain == NULL) {
  1040. dma_dom = find_protection_domain(*bdf);
  1041. if (!dma_dom)
  1042. dma_dom = (*iommu)->default_dom;
  1043. *domain = &dma_dom->domain;
  1044. attach_device(*iommu, *domain, *bdf);
  1045. DUMP_printk("Using protection domain %d for device %s\n",
  1046. (*domain)->id, dev_name(dev));
  1047. }
  1048. if (domain_for_device(_bdf) == NULL)
  1049. attach_device(*iommu, *domain, _bdf);
  1050. return 1;
  1051. }
  1052. /*
  1053. * If the pte_page is not yet allocated this function is called
  1054. */
  1055. static u64* alloc_pte(struct protection_domain *dom,
  1056. unsigned long address, u64 **pte_page, gfp_t gfp)
  1057. {
  1058. u64 *pte, *page;
  1059. pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];
  1060. if (!IOMMU_PTE_PRESENT(*pte)) {
  1061. page = (u64 *)get_zeroed_page(gfp);
  1062. if (!page)
  1063. return NULL;
  1064. *pte = IOMMU_L2_PDE(virt_to_phys(page));
  1065. }
  1066. pte = IOMMU_PTE_PAGE(*pte);
  1067. pte = &pte[IOMMU_PTE_L1_INDEX(address)];
  1068. if (!IOMMU_PTE_PRESENT(*pte)) {
  1069. page = (u64 *)get_zeroed_page(gfp);
  1070. if (!page)
  1071. return NULL;
  1072. *pte = IOMMU_L1_PDE(virt_to_phys(page));
  1073. }
  1074. pte = IOMMU_PTE_PAGE(*pte);
  1075. if (pte_page)
  1076. *pte_page = pte;
  1077. pte = &pte[IOMMU_PTE_L0_INDEX(address)];
  1078. return pte;
  1079. }
  1080. /*
  1081. * This function fetches the PTE for a given address in the aperture
  1082. */
  1083. static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  1084. unsigned long address)
  1085. {
  1086. struct aperture_range *aperture;
  1087. u64 *pte, *pte_page;
  1088. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1089. if (!aperture)
  1090. return NULL;
  1091. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1092. if (!pte) {
  1093. pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
  1094. aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
  1095. } else
  1096. pte += IOMMU_PTE_L0_INDEX(address);
  1097. return pte;
  1098. }
  1099. /*
  1100. * This is the generic map function. It maps one 4kb page at paddr to
  1101. * the given address in the DMA address space for the domain.
  1102. */
  1103. static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
  1104. struct dma_ops_domain *dom,
  1105. unsigned long address,
  1106. phys_addr_t paddr,
  1107. int direction)
  1108. {
  1109. u64 *pte, __pte;
  1110. WARN_ON(address > dom->aperture_size);
  1111. paddr &= PAGE_MASK;
  1112. pte = dma_ops_get_pte(dom, address);
  1113. if (!pte)
  1114. return bad_dma_address;
  1115. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  1116. if (direction == DMA_TO_DEVICE)
  1117. __pte |= IOMMU_PTE_IR;
  1118. else if (direction == DMA_FROM_DEVICE)
  1119. __pte |= IOMMU_PTE_IW;
  1120. else if (direction == DMA_BIDIRECTIONAL)
  1121. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  1122. WARN_ON(*pte);
  1123. *pte = __pte;
  1124. return (dma_addr_t)address;
  1125. }
  1126. /*
  1127. * The generic unmapping function for on page in the DMA address space.
  1128. */
  1129. static void dma_ops_domain_unmap(struct amd_iommu *iommu,
  1130. struct dma_ops_domain *dom,
  1131. unsigned long address)
  1132. {
  1133. struct aperture_range *aperture;
  1134. u64 *pte;
  1135. if (address >= dom->aperture_size)
  1136. return;
  1137. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1138. if (!aperture)
  1139. return;
  1140. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1141. if (!pte)
  1142. return;
  1143. pte += IOMMU_PTE_L0_INDEX(address);
  1144. WARN_ON(!*pte);
  1145. *pte = 0ULL;
  1146. }
  1147. /*
  1148. * This function contains common code for mapping of a physically
  1149. * contiguous memory region into DMA address space. It is used by all
  1150. * mapping functions provided with this IOMMU driver.
  1151. * Must be called with the domain lock held.
  1152. */
  1153. static dma_addr_t __map_single(struct device *dev,
  1154. struct amd_iommu *iommu,
  1155. struct dma_ops_domain *dma_dom,
  1156. phys_addr_t paddr,
  1157. size_t size,
  1158. int dir,
  1159. bool align,
  1160. u64 dma_mask)
  1161. {
  1162. dma_addr_t offset = paddr & ~PAGE_MASK;
  1163. dma_addr_t address, start, ret;
  1164. unsigned int pages;
  1165. unsigned long align_mask = 0;
  1166. int i;
  1167. pages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1168. paddr &= PAGE_MASK;
  1169. INC_STATS_COUNTER(total_map_requests);
  1170. if (pages > 1)
  1171. INC_STATS_COUNTER(cross_page);
  1172. if (align)
  1173. align_mask = (1UL << get_order(size)) - 1;
  1174. retry:
  1175. address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
  1176. dma_mask);
  1177. if (unlikely(address == bad_dma_address)) {
  1178. /*
  1179. * setting next_address here will let the address
  1180. * allocator only scan the new allocated range in the
  1181. * first run. This is a small optimization.
  1182. */
  1183. dma_dom->next_address = dma_dom->aperture_size;
  1184. if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
  1185. goto out;
  1186. /*
  1187. * aperture was sucessfully enlarged by 128 MB, try
  1188. * allocation again
  1189. */
  1190. goto retry;
  1191. }
  1192. start = address;
  1193. for (i = 0; i < pages; ++i) {
  1194. ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
  1195. if (ret == bad_dma_address)
  1196. goto out_unmap;
  1197. paddr += PAGE_SIZE;
  1198. start += PAGE_SIZE;
  1199. }
  1200. address += offset;
  1201. ADD_STATS_COUNTER(alloced_io_mem, size);
  1202. if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
  1203. iommu_flush_tlb(iommu, dma_dom->domain.id);
  1204. dma_dom->need_flush = false;
  1205. } else if (unlikely(iommu_has_npcache(iommu)))
  1206. iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
  1207. out:
  1208. return address;
  1209. out_unmap:
  1210. for (--i; i >= 0; --i) {
  1211. start -= PAGE_SIZE;
  1212. dma_ops_domain_unmap(iommu, dma_dom, start);
  1213. }
  1214. dma_ops_free_addresses(dma_dom, address, pages);
  1215. return bad_dma_address;
  1216. }
  1217. /*
  1218. * Does the reverse of the __map_single function. Must be called with
  1219. * the domain lock held too
  1220. */
  1221. static void __unmap_single(struct amd_iommu *iommu,
  1222. struct dma_ops_domain *dma_dom,
  1223. dma_addr_t dma_addr,
  1224. size_t size,
  1225. int dir)
  1226. {
  1227. dma_addr_t i, start;
  1228. unsigned int pages;
  1229. if ((dma_addr == bad_dma_address) ||
  1230. (dma_addr + size > dma_dom->aperture_size))
  1231. return;
  1232. pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  1233. dma_addr &= PAGE_MASK;
  1234. start = dma_addr;
  1235. for (i = 0; i < pages; ++i) {
  1236. dma_ops_domain_unmap(iommu, dma_dom, start);
  1237. start += PAGE_SIZE;
  1238. }
  1239. SUB_STATS_COUNTER(alloced_io_mem, size);
  1240. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  1241. if (amd_iommu_unmap_flush || dma_dom->need_flush) {
  1242. iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
  1243. dma_dom->need_flush = false;
  1244. }
  1245. }
  1246. /*
  1247. * The exported map_single function for dma_ops.
  1248. */
  1249. static dma_addr_t map_page(struct device *dev, struct page *page,
  1250. unsigned long offset, size_t size,
  1251. enum dma_data_direction dir,
  1252. struct dma_attrs *attrs)
  1253. {
  1254. unsigned long flags;
  1255. struct amd_iommu *iommu;
  1256. struct protection_domain *domain;
  1257. u16 devid;
  1258. dma_addr_t addr;
  1259. u64 dma_mask;
  1260. phys_addr_t paddr = page_to_phys(page) + offset;
  1261. INC_STATS_COUNTER(cnt_map_single);
  1262. if (!check_device(dev))
  1263. return bad_dma_address;
  1264. dma_mask = *dev->dma_mask;
  1265. get_device_resources(dev, &iommu, &domain, &devid);
  1266. if (iommu == NULL || domain == NULL)
  1267. /* device not handled by any AMD IOMMU */
  1268. return (dma_addr_t)paddr;
  1269. if (!dma_ops_domain(domain))
  1270. return bad_dma_address;
  1271. spin_lock_irqsave(&domain->lock, flags);
  1272. addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
  1273. dma_mask);
  1274. if (addr == bad_dma_address)
  1275. goto out;
  1276. iommu_completion_wait(iommu);
  1277. out:
  1278. spin_unlock_irqrestore(&domain->lock, flags);
  1279. return addr;
  1280. }
  1281. /*
  1282. * The exported unmap_single function for dma_ops.
  1283. */
  1284. static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  1285. enum dma_data_direction dir, struct dma_attrs *attrs)
  1286. {
  1287. unsigned long flags;
  1288. struct amd_iommu *iommu;
  1289. struct protection_domain *domain;
  1290. u16 devid;
  1291. INC_STATS_COUNTER(cnt_unmap_single);
  1292. if (!check_device(dev) ||
  1293. !get_device_resources(dev, &iommu, &domain, &devid))
  1294. /* device not handled by any AMD IOMMU */
  1295. return;
  1296. if (!dma_ops_domain(domain))
  1297. return;
  1298. spin_lock_irqsave(&domain->lock, flags);
  1299. __unmap_single(iommu, domain->priv, dma_addr, size, dir);
  1300. iommu_completion_wait(iommu);
  1301. spin_unlock_irqrestore(&domain->lock, flags);
  1302. }
  1303. /*
  1304. * This is a special map_sg function which is used if we should map a
  1305. * device which is not handled by an AMD IOMMU in the system.
  1306. */
  1307. static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  1308. int nelems, int dir)
  1309. {
  1310. struct scatterlist *s;
  1311. int i;
  1312. for_each_sg(sglist, s, nelems, i) {
  1313. s->dma_address = (dma_addr_t)sg_phys(s);
  1314. s->dma_length = s->length;
  1315. }
  1316. return nelems;
  1317. }
  1318. /*
  1319. * The exported map_sg function for dma_ops (handles scatter-gather
  1320. * lists).
  1321. */
  1322. static int map_sg(struct device *dev, struct scatterlist *sglist,
  1323. int nelems, enum dma_data_direction dir,
  1324. struct dma_attrs *attrs)
  1325. {
  1326. unsigned long flags;
  1327. struct amd_iommu *iommu;
  1328. struct protection_domain *domain;
  1329. u16 devid;
  1330. int i;
  1331. struct scatterlist *s;
  1332. phys_addr_t paddr;
  1333. int mapped_elems = 0;
  1334. u64 dma_mask;
  1335. INC_STATS_COUNTER(cnt_map_sg);
  1336. if (!check_device(dev))
  1337. return 0;
  1338. dma_mask = *dev->dma_mask;
  1339. get_device_resources(dev, &iommu, &domain, &devid);
  1340. if (!iommu || !domain)
  1341. return map_sg_no_iommu(dev, sglist, nelems, dir);
  1342. if (!dma_ops_domain(domain))
  1343. return 0;
  1344. spin_lock_irqsave(&domain->lock, flags);
  1345. for_each_sg(sglist, s, nelems, i) {
  1346. paddr = sg_phys(s);
  1347. s->dma_address = __map_single(dev, iommu, domain->priv,
  1348. paddr, s->length, dir, false,
  1349. dma_mask);
  1350. if (s->dma_address) {
  1351. s->dma_length = s->length;
  1352. mapped_elems++;
  1353. } else
  1354. goto unmap;
  1355. }
  1356. iommu_completion_wait(iommu);
  1357. out:
  1358. spin_unlock_irqrestore(&domain->lock, flags);
  1359. return mapped_elems;
  1360. unmap:
  1361. for_each_sg(sglist, s, mapped_elems, i) {
  1362. if (s->dma_address)
  1363. __unmap_single(iommu, domain->priv, s->dma_address,
  1364. s->dma_length, dir);
  1365. s->dma_address = s->dma_length = 0;
  1366. }
  1367. mapped_elems = 0;
  1368. goto out;
  1369. }
  1370. /*
  1371. * The exported map_sg function for dma_ops (handles scatter-gather
  1372. * lists).
  1373. */
  1374. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  1375. int nelems, enum dma_data_direction dir,
  1376. struct dma_attrs *attrs)
  1377. {
  1378. unsigned long flags;
  1379. struct amd_iommu *iommu;
  1380. struct protection_domain *domain;
  1381. struct scatterlist *s;
  1382. u16 devid;
  1383. int i;
  1384. INC_STATS_COUNTER(cnt_unmap_sg);
  1385. if (!check_device(dev) ||
  1386. !get_device_resources(dev, &iommu, &domain, &devid))
  1387. return;
  1388. if (!dma_ops_domain(domain))
  1389. return;
  1390. spin_lock_irqsave(&domain->lock, flags);
  1391. for_each_sg(sglist, s, nelems, i) {
  1392. __unmap_single(iommu, domain->priv, s->dma_address,
  1393. s->dma_length, dir);
  1394. s->dma_address = s->dma_length = 0;
  1395. }
  1396. iommu_completion_wait(iommu);
  1397. spin_unlock_irqrestore(&domain->lock, flags);
  1398. }
  1399. /*
  1400. * The exported alloc_coherent function for dma_ops.
  1401. */
  1402. static void *alloc_coherent(struct device *dev, size_t size,
  1403. dma_addr_t *dma_addr, gfp_t flag)
  1404. {
  1405. unsigned long flags;
  1406. void *virt_addr;
  1407. struct amd_iommu *iommu;
  1408. struct protection_domain *domain;
  1409. u16 devid;
  1410. phys_addr_t paddr;
  1411. u64 dma_mask = dev->coherent_dma_mask;
  1412. INC_STATS_COUNTER(cnt_alloc_coherent);
  1413. if (!check_device(dev))
  1414. return NULL;
  1415. if (!get_device_resources(dev, &iommu, &domain, &devid))
  1416. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  1417. flag |= __GFP_ZERO;
  1418. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  1419. if (!virt_addr)
  1420. return NULL;
  1421. paddr = virt_to_phys(virt_addr);
  1422. if (!iommu || !domain) {
  1423. *dma_addr = (dma_addr_t)paddr;
  1424. return virt_addr;
  1425. }
  1426. if (!dma_ops_domain(domain))
  1427. goto out_free;
  1428. if (!dma_mask)
  1429. dma_mask = *dev->dma_mask;
  1430. spin_lock_irqsave(&domain->lock, flags);
  1431. *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
  1432. size, DMA_BIDIRECTIONAL, true, dma_mask);
  1433. if (*dma_addr == bad_dma_address) {
  1434. spin_unlock_irqrestore(&domain->lock, flags);
  1435. goto out_free;
  1436. }
  1437. iommu_completion_wait(iommu);
  1438. spin_unlock_irqrestore(&domain->lock, flags);
  1439. return virt_addr;
  1440. out_free:
  1441. free_pages((unsigned long)virt_addr, get_order(size));
  1442. return NULL;
  1443. }
  1444. /*
  1445. * The exported free_coherent function for dma_ops.
  1446. */
  1447. static void free_coherent(struct device *dev, size_t size,
  1448. void *virt_addr, dma_addr_t dma_addr)
  1449. {
  1450. unsigned long flags;
  1451. struct amd_iommu *iommu;
  1452. struct protection_domain *domain;
  1453. u16 devid;
  1454. INC_STATS_COUNTER(cnt_free_coherent);
  1455. if (!check_device(dev))
  1456. return;
  1457. get_device_resources(dev, &iommu, &domain, &devid);
  1458. if (!iommu || !domain)
  1459. goto free_mem;
  1460. if (!dma_ops_domain(domain))
  1461. goto free_mem;
  1462. spin_lock_irqsave(&domain->lock, flags);
  1463. __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  1464. iommu_completion_wait(iommu);
  1465. spin_unlock_irqrestore(&domain->lock, flags);
  1466. free_mem:
  1467. free_pages((unsigned long)virt_addr, get_order(size));
  1468. }
  1469. /*
  1470. * This function is called by the DMA layer to find out if we can handle a
  1471. * particular device. It is part of the dma_ops.
  1472. */
  1473. static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  1474. {
  1475. u16 bdf;
  1476. struct pci_dev *pcidev;
  1477. /* No device or no PCI device */
  1478. if (!dev || dev->bus != &pci_bus_type)
  1479. return 0;
  1480. pcidev = to_pci_dev(dev);
  1481. bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1482. /* Out of our scope? */
  1483. if (bdf > amd_iommu_last_bdf)
  1484. return 0;
  1485. return 1;
  1486. }
  1487. /*
  1488. * The function for pre-allocating protection domains.
  1489. *
  1490. * If the driver core informs the DMA layer if a driver grabs a device
  1491. * we don't need to preallocate the protection domains anymore.
  1492. * For now we have to.
  1493. */
  1494. static void prealloc_protection_domains(void)
  1495. {
  1496. struct pci_dev *dev = NULL;
  1497. struct dma_ops_domain *dma_dom;
  1498. struct amd_iommu *iommu;
  1499. u16 devid;
  1500. while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
  1501. devid = calc_devid(dev->bus->number, dev->devfn);
  1502. if (devid > amd_iommu_last_bdf)
  1503. continue;
  1504. devid = amd_iommu_alias_table[devid];
  1505. if (domain_for_device(devid))
  1506. continue;
  1507. iommu = amd_iommu_rlookup_table[devid];
  1508. if (!iommu)
  1509. continue;
  1510. dma_dom = dma_ops_domain_alloc(iommu);
  1511. if (!dma_dom)
  1512. continue;
  1513. init_unity_mappings_for_device(dma_dom, devid);
  1514. dma_dom->target_dev = devid;
  1515. list_add_tail(&dma_dom->list, &iommu_pd_list);
  1516. }
  1517. }
  1518. static struct dma_map_ops amd_iommu_dma_ops = {
  1519. .alloc_coherent = alloc_coherent,
  1520. .free_coherent = free_coherent,
  1521. .map_page = map_page,
  1522. .unmap_page = unmap_page,
  1523. .map_sg = map_sg,
  1524. .unmap_sg = unmap_sg,
  1525. .dma_supported = amd_iommu_dma_supported,
  1526. };
  1527. /*
  1528. * The function which clues the AMD IOMMU driver into dma_ops.
  1529. */
  1530. int __init amd_iommu_init_dma_ops(void)
  1531. {
  1532. struct amd_iommu *iommu;
  1533. int ret;
  1534. /*
  1535. * first allocate a default protection domain for every IOMMU we
  1536. * found in the system. Devices not assigned to any other
  1537. * protection domain will be assigned to the default one.
  1538. */
  1539. for_each_iommu(iommu) {
  1540. iommu->default_dom = dma_ops_domain_alloc(iommu);
  1541. if (iommu->default_dom == NULL)
  1542. return -ENOMEM;
  1543. iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
  1544. ret = iommu_init_unity_mappings(iommu);
  1545. if (ret)
  1546. goto free_domains;
  1547. }
  1548. /*
  1549. * If device isolation is enabled, pre-allocate the protection
  1550. * domains for each device.
  1551. */
  1552. if (amd_iommu_isolate)
  1553. prealloc_protection_domains();
  1554. iommu_detected = 1;
  1555. force_iommu = 1;
  1556. bad_dma_address = 0;
  1557. #ifdef CONFIG_GART_IOMMU
  1558. gart_iommu_aperture_disabled = 1;
  1559. gart_iommu_aperture = 0;
  1560. #endif
  1561. /* Make the driver finally visible to the drivers */
  1562. dma_ops = &amd_iommu_dma_ops;
  1563. register_iommu(&amd_iommu_ops);
  1564. bus_register_notifier(&pci_bus_type, &device_nb);
  1565. amd_iommu_stats_init();
  1566. return 0;
  1567. free_domains:
  1568. for_each_iommu(iommu) {
  1569. if (iommu->default_dom)
  1570. dma_ops_domain_free(iommu->default_dom);
  1571. }
  1572. return ret;
  1573. }
  1574. /*****************************************************************************
  1575. *
  1576. * The following functions belong to the exported interface of AMD IOMMU
  1577. *
  1578. * This interface allows access to lower level functions of the IOMMU
  1579. * like protection domain handling and assignement of devices to domains
  1580. * which is not possible with the dma_ops interface.
  1581. *
  1582. *****************************************************************************/
  1583. static void cleanup_domain(struct protection_domain *domain)
  1584. {
  1585. unsigned long flags;
  1586. u16 devid;
  1587. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1588. for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
  1589. if (amd_iommu_pd_table[devid] == domain)
  1590. __detach_device(domain, devid);
  1591. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1592. }
  1593. static int amd_iommu_domain_init(struct iommu_domain *dom)
  1594. {
  1595. struct protection_domain *domain;
  1596. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  1597. if (!domain)
  1598. return -ENOMEM;
  1599. spin_lock_init(&domain->lock);
  1600. domain->mode = PAGE_MODE_3_LEVEL;
  1601. domain->id = domain_id_alloc();
  1602. if (!domain->id)
  1603. goto out_free;
  1604. domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  1605. if (!domain->pt_root)
  1606. goto out_free;
  1607. dom->priv = domain;
  1608. return 0;
  1609. out_free:
  1610. kfree(domain);
  1611. return -ENOMEM;
  1612. }
  1613. static void amd_iommu_domain_destroy(struct iommu_domain *dom)
  1614. {
  1615. struct protection_domain *domain = dom->priv;
  1616. if (!domain)
  1617. return;
  1618. if (domain->dev_cnt > 0)
  1619. cleanup_domain(domain);
  1620. BUG_ON(domain->dev_cnt != 0);
  1621. free_pagetable(domain);
  1622. domain_id_free(domain->id);
  1623. kfree(domain);
  1624. dom->priv = NULL;
  1625. }
  1626. static void amd_iommu_detach_device(struct iommu_domain *dom,
  1627. struct device *dev)
  1628. {
  1629. struct protection_domain *domain = dom->priv;
  1630. struct amd_iommu *iommu;
  1631. struct pci_dev *pdev;
  1632. u16 devid;
  1633. if (dev->bus != &pci_bus_type)
  1634. return;
  1635. pdev = to_pci_dev(dev);
  1636. devid = calc_devid(pdev->bus->number, pdev->devfn);
  1637. if (devid > 0)
  1638. detach_device(domain, devid);
  1639. iommu = amd_iommu_rlookup_table[devid];
  1640. if (!iommu)
  1641. return;
  1642. iommu_queue_inv_dev_entry(iommu, devid);
  1643. iommu_completion_wait(iommu);
  1644. }
  1645. static int amd_iommu_attach_device(struct iommu_domain *dom,
  1646. struct device *dev)
  1647. {
  1648. struct protection_domain *domain = dom->priv;
  1649. struct protection_domain *old_domain;
  1650. struct amd_iommu *iommu;
  1651. struct pci_dev *pdev;
  1652. u16 devid;
  1653. if (dev->bus != &pci_bus_type)
  1654. return -EINVAL;
  1655. pdev = to_pci_dev(dev);
  1656. devid = calc_devid(pdev->bus->number, pdev->devfn);
  1657. if (devid >= amd_iommu_last_bdf ||
  1658. devid != amd_iommu_alias_table[devid])
  1659. return -EINVAL;
  1660. iommu = amd_iommu_rlookup_table[devid];
  1661. if (!iommu)
  1662. return -EINVAL;
  1663. old_domain = domain_for_device(devid);
  1664. if (old_domain)
  1665. detach_device(old_domain, devid);
  1666. attach_device(iommu, domain, devid);
  1667. iommu_completion_wait(iommu);
  1668. return 0;
  1669. }
  1670. static int amd_iommu_map_range(struct iommu_domain *dom,
  1671. unsigned long iova, phys_addr_t paddr,
  1672. size_t size, int iommu_prot)
  1673. {
  1674. struct protection_domain *domain = dom->priv;
  1675. unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1676. int prot = 0;
  1677. int ret;
  1678. if (iommu_prot & IOMMU_READ)
  1679. prot |= IOMMU_PROT_IR;
  1680. if (iommu_prot & IOMMU_WRITE)
  1681. prot |= IOMMU_PROT_IW;
  1682. iova &= PAGE_MASK;
  1683. paddr &= PAGE_MASK;
  1684. for (i = 0; i < npages; ++i) {
  1685. ret = iommu_map_page(domain, iova, paddr, prot);
  1686. if (ret)
  1687. return ret;
  1688. iova += PAGE_SIZE;
  1689. paddr += PAGE_SIZE;
  1690. }
  1691. return 0;
  1692. }
  1693. static void amd_iommu_unmap_range(struct iommu_domain *dom,
  1694. unsigned long iova, size_t size)
  1695. {
  1696. struct protection_domain *domain = dom->priv;
  1697. unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
  1698. iova &= PAGE_MASK;
  1699. for (i = 0; i < npages; ++i) {
  1700. iommu_unmap_page(domain, iova);
  1701. iova += PAGE_SIZE;
  1702. }
  1703. iommu_flush_domain(domain->id);
  1704. }
  1705. static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
  1706. unsigned long iova)
  1707. {
  1708. struct protection_domain *domain = dom->priv;
  1709. unsigned long offset = iova & ~PAGE_MASK;
  1710. phys_addr_t paddr;
  1711. u64 *pte;
  1712. pte = fetch_pte(domain, iova);
  1713. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  1714. return 0;
  1715. paddr = *pte & IOMMU_PAGE_MASK;
  1716. paddr |= offset;
  1717. return paddr;
  1718. }
  1719. static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
  1720. unsigned long cap)
  1721. {
  1722. return 0;
  1723. }
  1724. static struct iommu_ops amd_iommu_ops = {
  1725. .domain_init = amd_iommu_domain_init,
  1726. .domain_destroy = amd_iommu_domain_destroy,
  1727. .attach_dev = amd_iommu_attach_device,
  1728. .detach_dev = amd_iommu_detach_device,
  1729. .map = amd_iommu_map_range,
  1730. .unmap = amd_iommu_unmap_range,
  1731. .iova_to_phys = amd_iommu_iova_to_phys,
  1732. .domain_has_cap = amd_iommu_domain_has_cap,
  1733. };