amd_iommu.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/gfp.h>
  21. #include <linux/bitops.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/iommu-helper.h>
  26. #include <linux/iommu.h>
  27. #include <asm/proto.h>
  28. #include <asm/iommu.h>
  29. #include <asm/gart.h>
  30. #include <asm/amd_iommu_types.h>
  31. #include <asm/amd_iommu.h>
  32. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  33. #define EXIT_LOOP_COUNT 10000000
  34. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  35. /* A list of preallocated protection domains */
  36. static LIST_HEAD(iommu_pd_list);
  37. static DEFINE_SPINLOCK(iommu_pd_list_lock);
  38. /*
  39. * Domain for untranslated devices - only allocated
  40. * if iommu=pt passed on kernel cmd line.
  41. */
  42. static struct protection_domain *pt_domain;
  43. static struct iommu_ops amd_iommu_ops;
  44. /*
  45. * general struct to manage commands send to an IOMMU
  46. */
  47. struct iommu_cmd {
  48. u32 data[4];
  49. };
  50. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  51. struct unity_map_entry *e);
  52. static struct dma_ops_domain *find_protection_domain(u16 devid);
  53. static u64 *alloc_pte(struct protection_domain *domain,
  54. unsigned long address, int end_lvl,
  55. u64 **pte_page, gfp_t gfp);
  56. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  57. unsigned long start_page,
  58. unsigned int pages);
  59. static void reset_iommu_command_buffer(struct amd_iommu *iommu);
  60. static u64 *fetch_pte(struct protection_domain *domain,
  61. unsigned long address, int map_size);
  62. static void update_domain(struct protection_domain *domain);
  63. #ifdef CONFIG_AMD_IOMMU_STATS
  64. /*
  65. * Initialization code for statistics collection
  66. */
  67. DECLARE_STATS_COUNTER(compl_wait);
  68. DECLARE_STATS_COUNTER(cnt_map_single);
  69. DECLARE_STATS_COUNTER(cnt_unmap_single);
  70. DECLARE_STATS_COUNTER(cnt_map_sg);
  71. DECLARE_STATS_COUNTER(cnt_unmap_sg);
  72. DECLARE_STATS_COUNTER(cnt_alloc_coherent);
  73. DECLARE_STATS_COUNTER(cnt_free_coherent);
  74. DECLARE_STATS_COUNTER(cross_page);
  75. DECLARE_STATS_COUNTER(domain_flush_single);
  76. DECLARE_STATS_COUNTER(domain_flush_all);
  77. DECLARE_STATS_COUNTER(alloced_io_mem);
  78. DECLARE_STATS_COUNTER(total_map_requests);
  79. static struct dentry *stats_dir;
  80. static struct dentry *de_isolate;
  81. static struct dentry *de_fflush;
  82. static void amd_iommu_stats_add(struct __iommu_counter *cnt)
  83. {
  84. if (stats_dir == NULL)
  85. return;
  86. cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
  87. &cnt->value);
  88. }
  89. static void amd_iommu_stats_init(void)
  90. {
  91. stats_dir = debugfs_create_dir("amd-iommu", NULL);
  92. if (stats_dir == NULL)
  93. return;
  94. de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
  95. (u32 *)&amd_iommu_isolate);
  96. de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
  97. (u32 *)&amd_iommu_unmap_flush);
  98. amd_iommu_stats_add(&compl_wait);
  99. amd_iommu_stats_add(&cnt_map_single);
  100. amd_iommu_stats_add(&cnt_unmap_single);
  101. amd_iommu_stats_add(&cnt_map_sg);
  102. amd_iommu_stats_add(&cnt_unmap_sg);
  103. amd_iommu_stats_add(&cnt_alloc_coherent);
  104. amd_iommu_stats_add(&cnt_free_coherent);
  105. amd_iommu_stats_add(&cross_page);
  106. amd_iommu_stats_add(&domain_flush_single);
  107. amd_iommu_stats_add(&domain_flush_all);
  108. amd_iommu_stats_add(&alloced_io_mem);
  109. amd_iommu_stats_add(&total_map_requests);
  110. }
  111. #endif
  112. /* returns !0 if the IOMMU is caching non-present entries in its TLB */
  113. static int iommu_has_npcache(struct amd_iommu *iommu)
  114. {
  115. return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
  116. }
  117. /****************************************************************************
  118. *
  119. * Interrupt handling functions
  120. *
  121. ****************************************************************************/
  122. static void dump_dte_entry(u16 devid)
  123. {
  124. int i;
  125. for (i = 0; i < 8; ++i)
  126. pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
  127. amd_iommu_dev_table[devid].data[i]);
  128. }
  129. static void dump_command(unsigned long phys_addr)
  130. {
  131. struct iommu_cmd *cmd = phys_to_virt(phys_addr);
  132. int i;
  133. for (i = 0; i < 4; ++i)
  134. pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
  135. }
  136. static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
  137. {
  138. u32 *event = __evt;
  139. int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  140. int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  141. int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  142. int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  143. u64 address = (u64)(((u64)event[3]) << 32) | event[2];
  144. printk(KERN_ERR "AMD-Vi: Event logged [");
  145. switch (type) {
  146. case EVENT_TYPE_ILL_DEV:
  147. printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  148. "address=0x%016llx flags=0x%04x]\n",
  149. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  150. address, flags);
  151. dump_dte_entry(devid);
  152. break;
  153. case EVENT_TYPE_IO_FAULT:
  154. printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  155. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  156. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  157. domid, address, flags);
  158. break;
  159. case EVENT_TYPE_DEV_TAB_ERR:
  160. printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  161. "address=0x%016llx flags=0x%04x]\n",
  162. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  163. address, flags);
  164. break;
  165. case EVENT_TYPE_PAGE_TAB_ERR:
  166. printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  167. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  168. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  169. domid, address, flags);
  170. break;
  171. case EVENT_TYPE_ILL_CMD:
  172. printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
  173. reset_iommu_command_buffer(iommu);
  174. dump_command(address);
  175. break;
  176. case EVENT_TYPE_CMD_HARD_ERR:
  177. printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  178. "flags=0x%04x]\n", address, flags);
  179. break;
  180. case EVENT_TYPE_IOTLB_INV_TO:
  181. printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  182. "address=0x%016llx]\n",
  183. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  184. address);
  185. break;
  186. case EVENT_TYPE_INV_DEV_REQ:
  187. printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  188. "address=0x%016llx flags=0x%04x]\n",
  189. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  190. address, flags);
  191. break;
  192. default:
  193. printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
  194. }
  195. }
  196. static void iommu_poll_events(struct amd_iommu *iommu)
  197. {
  198. u32 head, tail;
  199. unsigned long flags;
  200. spin_lock_irqsave(&iommu->lock, flags);
  201. head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  202. tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  203. while (head != tail) {
  204. iommu_print_event(iommu, iommu->evt_buf + head);
  205. head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  206. }
  207. writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  208. spin_unlock_irqrestore(&iommu->lock, flags);
  209. }
  210. irqreturn_t amd_iommu_int_handler(int irq, void *data)
  211. {
  212. struct amd_iommu *iommu;
  213. for_each_iommu(iommu)
  214. iommu_poll_events(iommu);
  215. return IRQ_HANDLED;
  216. }
  217. /****************************************************************************
  218. *
  219. * IOMMU command queuing functions
  220. *
  221. ****************************************************************************/
  222. /*
  223. * Writes the command to the IOMMUs command buffer and informs the
  224. * hardware about the new command. Must be called with iommu->lock held.
  225. */
  226. static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  227. {
  228. u32 tail, head;
  229. u8 *target;
  230. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  231. target = iommu->cmd_buf + tail;
  232. memcpy_toio(target, cmd, sizeof(*cmd));
  233. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  234. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  235. if (tail == head)
  236. return -ENOMEM;
  237. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  238. return 0;
  239. }
  240. /*
  241. * General queuing function for commands. Takes iommu->lock and calls
  242. * __iommu_queue_command().
  243. */
  244. static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  245. {
  246. unsigned long flags;
  247. int ret;
  248. spin_lock_irqsave(&iommu->lock, flags);
  249. ret = __iommu_queue_command(iommu, cmd);
  250. if (!ret)
  251. iommu->need_sync = true;
  252. spin_unlock_irqrestore(&iommu->lock, flags);
  253. return ret;
  254. }
  255. /*
  256. * This function waits until an IOMMU has completed a completion
  257. * wait command
  258. */
  259. static void __iommu_wait_for_completion(struct amd_iommu *iommu)
  260. {
  261. int ready = 0;
  262. unsigned status = 0;
  263. unsigned long i = 0;
  264. INC_STATS_COUNTER(compl_wait);
  265. while (!ready && (i < EXIT_LOOP_COUNT)) {
  266. ++i;
  267. /* wait for the bit to become one */
  268. status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
  269. ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
  270. }
  271. /* set bit back to zero */
  272. status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
  273. writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
  274. if (unlikely(i == EXIT_LOOP_COUNT)) {
  275. spin_unlock(&iommu->lock);
  276. reset_iommu_command_buffer(iommu);
  277. spin_lock(&iommu->lock);
  278. }
  279. }
  280. /*
  281. * This function queues a completion wait command into the command
  282. * buffer of an IOMMU
  283. */
  284. static int __iommu_completion_wait(struct amd_iommu *iommu)
  285. {
  286. struct iommu_cmd cmd;
  287. memset(&cmd, 0, sizeof(cmd));
  288. cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
  289. CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
  290. return __iommu_queue_command(iommu, &cmd);
  291. }
  292. /*
  293. * This function is called whenever we need to ensure that the IOMMU has
  294. * completed execution of all commands we sent. It sends a
  295. * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
  296. * us about that by writing a value to a physical address we pass with
  297. * the command.
  298. */
  299. static int iommu_completion_wait(struct amd_iommu *iommu)
  300. {
  301. int ret = 0;
  302. unsigned long flags;
  303. spin_lock_irqsave(&iommu->lock, flags);
  304. if (!iommu->need_sync)
  305. goto out;
  306. ret = __iommu_completion_wait(iommu);
  307. iommu->need_sync = false;
  308. if (ret)
  309. goto out;
  310. __iommu_wait_for_completion(iommu);
  311. out:
  312. spin_unlock_irqrestore(&iommu->lock, flags);
  313. return 0;
  314. }
  315. /*
  316. * Command send function for invalidating a device table entry
  317. */
  318. static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
  319. {
  320. struct iommu_cmd cmd;
  321. int ret;
  322. BUG_ON(iommu == NULL);
  323. memset(&cmd, 0, sizeof(cmd));
  324. CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
  325. cmd.data[0] = devid;
  326. ret = iommu_queue_command(iommu, &cmd);
  327. return ret;
  328. }
  329. static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
  330. u16 domid, int pde, int s)
  331. {
  332. memset(cmd, 0, sizeof(*cmd));
  333. address &= PAGE_MASK;
  334. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  335. cmd->data[1] |= domid;
  336. cmd->data[2] = lower_32_bits(address);
  337. cmd->data[3] = upper_32_bits(address);
  338. if (s) /* size bit - we flush more than one 4kb page */
  339. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  340. if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
  341. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  342. }
  343. /*
  344. * Generic command send function for invalidaing TLB entries
  345. */
  346. static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
  347. u64 address, u16 domid, int pde, int s)
  348. {
  349. struct iommu_cmd cmd;
  350. int ret;
  351. __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
  352. ret = iommu_queue_command(iommu, &cmd);
  353. return ret;
  354. }
  355. /*
  356. * TLB invalidation function which is called from the mapping functions.
  357. * It invalidates a single PTE if the range to flush is within a single
  358. * page. Otherwise it flushes the whole TLB of the IOMMU.
  359. */
  360. static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
  361. u64 address, size_t size)
  362. {
  363. int s = 0;
  364. unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
  365. address &= PAGE_MASK;
  366. if (pages > 1) {
  367. /*
  368. * If we have to flush more than one page, flush all
  369. * TLB entries for this domain
  370. */
  371. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  372. s = 1;
  373. }
  374. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
  375. return 0;
  376. }
  377. /* Flush the whole IO/TLB for a given protection domain */
  378. static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
  379. {
  380. u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  381. INC_STATS_COUNTER(domain_flush_single);
  382. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
  383. }
  384. /* Flush the whole IO/TLB for a given protection domain - including PDE */
  385. static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
  386. {
  387. u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  388. INC_STATS_COUNTER(domain_flush_single);
  389. iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
  390. }
  391. /*
  392. * This function flushes one domain on one IOMMU
  393. */
  394. static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
  395. {
  396. struct iommu_cmd cmd;
  397. unsigned long flags;
  398. __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  399. domid, 1, 1);
  400. spin_lock_irqsave(&iommu->lock, flags);
  401. __iommu_queue_command(iommu, &cmd);
  402. __iommu_completion_wait(iommu);
  403. __iommu_wait_for_completion(iommu);
  404. spin_unlock_irqrestore(&iommu->lock, flags);
  405. }
  406. static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
  407. {
  408. int i;
  409. for (i = 1; i < MAX_DOMAIN_ID; ++i) {
  410. if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
  411. continue;
  412. flush_domain_on_iommu(iommu, i);
  413. }
  414. }
  415. /*
  416. * This function is used to flush the IO/TLB for a given protection domain
  417. * on every IOMMU in the system
  418. */
  419. static void iommu_flush_domain(u16 domid)
  420. {
  421. struct amd_iommu *iommu;
  422. INC_STATS_COUNTER(domain_flush_all);
  423. for_each_iommu(iommu)
  424. flush_domain_on_iommu(iommu, domid);
  425. }
  426. void amd_iommu_flush_all_domains(void)
  427. {
  428. struct amd_iommu *iommu;
  429. for_each_iommu(iommu)
  430. flush_all_domains_on_iommu(iommu);
  431. }
  432. static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
  433. {
  434. int i;
  435. for (i = 0; i <= amd_iommu_last_bdf; ++i) {
  436. if (iommu != amd_iommu_rlookup_table[i])
  437. continue;
  438. iommu_queue_inv_dev_entry(iommu, i);
  439. iommu_completion_wait(iommu);
  440. }
  441. }
  442. static void flush_devices_by_domain(struct protection_domain *domain)
  443. {
  444. struct amd_iommu *iommu;
  445. int i;
  446. for (i = 0; i <= amd_iommu_last_bdf; ++i) {
  447. if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
  448. (amd_iommu_pd_table[i] != domain))
  449. continue;
  450. iommu = amd_iommu_rlookup_table[i];
  451. if (!iommu)
  452. continue;
  453. iommu_queue_inv_dev_entry(iommu, i);
  454. iommu_completion_wait(iommu);
  455. }
  456. }
  457. static void reset_iommu_command_buffer(struct amd_iommu *iommu)
  458. {
  459. pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
  460. if (iommu->reset_in_progress)
  461. panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
  462. iommu->reset_in_progress = true;
  463. amd_iommu_reset_cmd_buffer(iommu);
  464. flush_all_devices_for_iommu(iommu);
  465. flush_all_domains_on_iommu(iommu);
  466. iommu->reset_in_progress = false;
  467. }
  468. void amd_iommu_flush_all_devices(void)
  469. {
  470. flush_devices_by_domain(NULL);
  471. }
  472. /****************************************************************************
  473. *
  474. * The functions below are used the create the page table mappings for
  475. * unity mapped regions.
  476. *
  477. ****************************************************************************/
  478. /*
  479. * Generic mapping functions. It maps a physical address into a DMA
  480. * address space. It allocates the page table pages if necessary.
  481. * In the future it can be extended to a generic mapping function
  482. * supporting all features of AMD IOMMU page tables like level skipping
  483. * and full 64 bit address spaces.
  484. */
  485. static int iommu_map_page(struct protection_domain *dom,
  486. unsigned long bus_addr,
  487. unsigned long phys_addr,
  488. int prot,
  489. int map_size)
  490. {
  491. u64 __pte, *pte;
  492. bus_addr = PAGE_ALIGN(bus_addr);
  493. phys_addr = PAGE_ALIGN(phys_addr);
  494. BUG_ON(!PM_ALIGNED(map_size, bus_addr));
  495. BUG_ON(!PM_ALIGNED(map_size, phys_addr));
  496. if (!(prot & IOMMU_PROT_MASK))
  497. return -EINVAL;
  498. pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
  499. if (IOMMU_PTE_PRESENT(*pte))
  500. return -EBUSY;
  501. __pte = phys_addr | IOMMU_PTE_P;
  502. if (prot & IOMMU_PROT_IR)
  503. __pte |= IOMMU_PTE_IR;
  504. if (prot & IOMMU_PROT_IW)
  505. __pte |= IOMMU_PTE_IW;
  506. *pte = __pte;
  507. update_domain(dom);
  508. return 0;
  509. }
  510. static void iommu_unmap_page(struct protection_domain *dom,
  511. unsigned long bus_addr, int map_size)
  512. {
  513. u64 *pte = fetch_pte(dom, bus_addr, map_size);
  514. if (pte)
  515. *pte = 0;
  516. }
  517. /*
  518. * This function checks if a specific unity mapping entry is needed for
  519. * this specific IOMMU.
  520. */
  521. static int iommu_for_unity_map(struct amd_iommu *iommu,
  522. struct unity_map_entry *entry)
  523. {
  524. u16 bdf, i;
  525. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  526. bdf = amd_iommu_alias_table[i];
  527. if (amd_iommu_rlookup_table[bdf] == iommu)
  528. return 1;
  529. }
  530. return 0;
  531. }
  532. /*
  533. * Init the unity mappings for a specific IOMMU in the system
  534. *
  535. * Basically iterates over all unity mapping entries and applies them to
  536. * the default domain DMA of that IOMMU if necessary.
  537. */
  538. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  539. {
  540. struct unity_map_entry *entry;
  541. int ret;
  542. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  543. if (!iommu_for_unity_map(iommu, entry))
  544. continue;
  545. ret = dma_ops_unity_map(iommu->default_dom, entry);
  546. if (ret)
  547. return ret;
  548. }
  549. return 0;
  550. }
  551. /*
  552. * This function actually applies the mapping to the page table of the
  553. * dma_ops domain.
  554. */
  555. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  556. struct unity_map_entry *e)
  557. {
  558. u64 addr;
  559. int ret;
  560. for (addr = e->address_start; addr < e->address_end;
  561. addr += PAGE_SIZE) {
  562. ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
  563. PM_MAP_4k);
  564. if (ret)
  565. return ret;
  566. /*
  567. * if unity mapping is in aperture range mark the page
  568. * as allocated in the aperture
  569. */
  570. if (addr < dma_dom->aperture_size)
  571. __set_bit(addr >> PAGE_SHIFT,
  572. dma_dom->aperture[0]->bitmap);
  573. }
  574. return 0;
  575. }
  576. /*
  577. * Inits the unity mappings required for a specific device
  578. */
  579. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  580. u16 devid)
  581. {
  582. struct unity_map_entry *e;
  583. int ret;
  584. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  585. if (!(devid >= e->devid_start && devid <= e->devid_end))
  586. continue;
  587. ret = dma_ops_unity_map(dma_dom, e);
  588. if (ret)
  589. return ret;
  590. }
  591. return 0;
  592. }
  593. /****************************************************************************
  594. *
  595. * The next functions belong to the address allocator for the dma_ops
  596. * interface functions. They work like the allocators in the other IOMMU
  597. * drivers. Its basically a bitmap which marks the allocated pages in
  598. * the aperture. Maybe it could be enhanced in the future to a more
  599. * efficient allocator.
  600. *
  601. ****************************************************************************/
  602. /*
  603. * The address allocator core functions.
  604. *
  605. * called with domain->lock held
  606. */
  607. /*
  608. * This function checks if there is a PTE for a given dma address. If
  609. * there is one, it returns the pointer to it.
  610. */
  611. static u64 *fetch_pte(struct protection_domain *domain,
  612. unsigned long address, int map_size)
  613. {
  614. int level;
  615. u64 *pte;
  616. level = domain->mode - 1;
  617. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  618. while (level > map_size) {
  619. if (!IOMMU_PTE_PRESENT(*pte))
  620. return NULL;
  621. level -= 1;
  622. pte = IOMMU_PTE_PAGE(*pte);
  623. pte = &pte[PM_LEVEL_INDEX(level, address)];
  624. if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
  625. pte = NULL;
  626. break;
  627. }
  628. }
  629. return pte;
  630. }
  631. /*
  632. * This function is used to add a new aperture range to an existing
  633. * aperture in case of dma_ops domain allocation or address allocation
  634. * failure.
  635. */
  636. static int alloc_new_range(struct amd_iommu *iommu,
  637. struct dma_ops_domain *dma_dom,
  638. bool populate, gfp_t gfp)
  639. {
  640. int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
  641. int i;
  642. #ifdef CONFIG_IOMMU_STRESS
  643. populate = false;
  644. #endif
  645. if (index >= APERTURE_MAX_RANGES)
  646. return -ENOMEM;
  647. dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
  648. if (!dma_dom->aperture[index])
  649. return -ENOMEM;
  650. dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
  651. if (!dma_dom->aperture[index]->bitmap)
  652. goto out_free;
  653. dma_dom->aperture[index]->offset = dma_dom->aperture_size;
  654. if (populate) {
  655. unsigned long address = dma_dom->aperture_size;
  656. int i, num_ptes = APERTURE_RANGE_PAGES / 512;
  657. u64 *pte, *pte_page;
  658. for (i = 0; i < num_ptes; ++i) {
  659. pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
  660. &pte_page, gfp);
  661. if (!pte)
  662. goto out_free;
  663. dma_dom->aperture[index]->pte_pages[i] = pte_page;
  664. address += APERTURE_RANGE_SIZE / 64;
  665. }
  666. }
  667. dma_dom->aperture_size += APERTURE_RANGE_SIZE;
  668. /* Intialize the exclusion range if necessary */
  669. if (iommu->exclusion_start &&
  670. iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
  671. iommu->exclusion_start < dma_dom->aperture_size) {
  672. unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
  673. int pages = iommu_num_pages(iommu->exclusion_start,
  674. iommu->exclusion_length,
  675. PAGE_SIZE);
  676. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  677. }
  678. /*
  679. * Check for areas already mapped as present in the new aperture
  680. * range and mark those pages as reserved in the allocator. Such
  681. * mappings may already exist as a result of requested unity
  682. * mappings for devices.
  683. */
  684. for (i = dma_dom->aperture[index]->offset;
  685. i < dma_dom->aperture_size;
  686. i += PAGE_SIZE) {
  687. u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
  688. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  689. continue;
  690. dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
  691. }
  692. update_domain(&dma_dom->domain);
  693. return 0;
  694. out_free:
  695. update_domain(&dma_dom->domain);
  696. free_page((unsigned long)dma_dom->aperture[index]->bitmap);
  697. kfree(dma_dom->aperture[index]);
  698. dma_dom->aperture[index] = NULL;
  699. return -ENOMEM;
  700. }
  701. static unsigned long dma_ops_area_alloc(struct device *dev,
  702. struct dma_ops_domain *dom,
  703. unsigned int pages,
  704. unsigned long align_mask,
  705. u64 dma_mask,
  706. unsigned long start)
  707. {
  708. unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
  709. int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
  710. int i = start >> APERTURE_RANGE_SHIFT;
  711. unsigned long boundary_size;
  712. unsigned long address = -1;
  713. unsigned long limit;
  714. next_bit >>= PAGE_SHIFT;
  715. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  716. PAGE_SIZE) >> PAGE_SHIFT;
  717. for (;i < max_index; ++i) {
  718. unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
  719. if (dom->aperture[i]->offset >= dma_mask)
  720. break;
  721. limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
  722. dma_mask >> PAGE_SHIFT);
  723. address = iommu_area_alloc(dom->aperture[i]->bitmap,
  724. limit, next_bit, pages, 0,
  725. boundary_size, align_mask);
  726. if (address != -1) {
  727. address = dom->aperture[i]->offset +
  728. (address << PAGE_SHIFT);
  729. dom->next_address = address + (pages << PAGE_SHIFT);
  730. break;
  731. }
  732. next_bit = 0;
  733. }
  734. return address;
  735. }
  736. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  737. struct dma_ops_domain *dom,
  738. unsigned int pages,
  739. unsigned long align_mask,
  740. u64 dma_mask)
  741. {
  742. unsigned long address;
  743. #ifdef CONFIG_IOMMU_STRESS
  744. dom->next_address = 0;
  745. dom->need_flush = true;
  746. #endif
  747. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  748. dma_mask, dom->next_address);
  749. if (address == -1) {
  750. dom->next_address = 0;
  751. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  752. dma_mask, 0);
  753. dom->need_flush = true;
  754. }
  755. if (unlikely(address == -1))
  756. address = bad_dma_address;
  757. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  758. return address;
  759. }
  760. /*
  761. * The address free function.
  762. *
  763. * called with domain->lock held
  764. */
  765. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  766. unsigned long address,
  767. unsigned int pages)
  768. {
  769. unsigned i = address >> APERTURE_RANGE_SHIFT;
  770. struct aperture_range *range = dom->aperture[i];
  771. BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
  772. #ifdef CONFIG_IOMMU_STRESS
  773. if (i < 4)
  774. return;
  775. #endif
  776. if (address >= dom->next_address)
  777. dom->need_flush = true;
  778. address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
  779. iommu_area_free(range->bitmap, address, pages);
  780. }
  781. /****************************************************************************
  782. *
  783. * The next functions belong to the domain allocation. A domain is
  784. * allocated for every IOMMU as the default domain. If device isolation
  785. * is enabled, every device get its own domain. The most important thing
  786. * about domains is the page table mapping the DMA address space they
  787. * contain.
  788. *
  789. ****************************************************************************/
  790. static u16 domain_id_alloc(void)
  791. {
  792. unsigned long flags;
  793. int id;
  794. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  795. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  796. BUG_ON(id == 0);
  797. if (id > 0 && id < MAX_DOMAIN_ID)
  798. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  799. else
  800. id = 0;
  801. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  802. return id;
  803. }
  804. static void domain_id_free(int id)
  805. {
  806. unsigned long flags;
  807. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  808. if (id > 0 && id < MAX_DOMAIN_ID)
  809. __clear_bit(id, amd_iommu_pd_alloc_bitmap);
  810. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  811. }
  812. /*
  813. * Used to reserve address ranges in the aperture (e.g. for exclusion
  814. * ranges.
  815. */
  816. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  817. unsigned long start_page,
  818. unsigned int pages)
  819. {
  820. unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  821. if (start_page + pages > last_page)
  822. pages = last_page - start_page;
  823. for (i = start_page; i < start_page + pages; ++i) {
  824. int index = i / APERTURE_RANGE_PAGES;
  825. int page = i % APERTURE_RANGE_PAGES;
  826. __set_bit(page, dom->aperture[index]->bitmap);
  827. }
  828. }
  829. static void free_pagetable(struct protection_domain *domain)
  830. {
  831. int i, j;
  832. u64 *p1, *p2, *p3;
  833. p1 = domain->pt_root;
  834. if (!p1)
  835. return;
  836. for (i = 0; i < 512; ++i) {
  837. if (!IOMMU_PTE_PRESENT(p1[i]))
  838. continue;
  839. p2 = IOMMU_PTE_PAGE(p1[i]);
  840. for (j = 0; j < 512; ++j) {
  841. if (!IOMMU_PTE_PRESENT(p2[j]))
  842. continue;
  843. p3 = IOMMU_PTE_PAGE(p2[j]);
  844. free_page((unsigned long)p3);
  845. }
  846. free_page((unsigned long)p2);
  847. }
  848. free_page((unsigned long)p1);
  849. domain->pt_root = NULL;
  850. }
  851. /*
  852. * Free a domain, only used if something went wrong in the
  853. * allocation path and we need to free an already allocated page table
  854. */
  855. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  856. {
  857. int i;
  858. if (!dom)
  859. return;
  860. free_pagetable(&dom->domain);
  861. for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
  862. if (!dom->aperture[i])
  863. continue;
  864. free_page((unsigned long)dom->aperture[i]->bitmap);
  865. kfree(dom->aperture[i]);
  866. }
  867. kfree(dom);
  868. }
  869. /*
  870. * Allocates a new protection domain usable for the dma_ops functions.
  871. * It also intializes the page table and the address allocator data
  872. * structures required for the dma_ops interface
  873. */
  874. static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
  875. {
  876. struct dma_ops_domain *dma_dom;
  877. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  878. if (!dma_dom)
  879. return NULL;
  880. spin_lock_init(&dma_dom->domain.lock);
  881. dma_dom->domain.id = domain_id_alloc();
  882. if (dma_dom->domain.id == 0)
  883. goto free_dma_dom;
  884. dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
  885. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  886. dma_dom->domain.flags = PD_DMA_OPS_MASK;
  887. dma_dom->domain.priv = dma_dom;
  888. if (!dma_dom->domain.pt_root)
  889. goto free_dma_dom;
  890. dma_dom->need_flush = false;
  891. dma_dom->target_dev = 0xffff;
  892. if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
  893. goto free_dma_dom;
  894. /*
  895. * mark the first page as allocated so we never return 0 as
  896. * a valid dma-address. So we can use 0 as error value
  897. */
  898. dma_dom->aperture[0]->bitmap[0] = 1;
  899. dma_dom->next_address = 0;
  900. return dma_dom;
  901. free_dma_dom:
  902. dma_ops_domain_free(dma_dom);
  903. return NULL;
  904. }
  905. /*
  906. * little helper function to check whether a given protection domain is a
  907. * dma_ops domain
  908. */
  909. static bool dma_ops_domain(struct protection_domain *domain)
  910. {
  911. return domain->flags & PD_DMA_OPS_MASK;
  912. }
  913. /*
  914. * Find out the protection domain structure for a given PCI device. This
  915. * will give us the pointer to the page table root for example.
  916. */
  917. static struct protection_domain *domain_for_device(u16 devid)
  918. {
  919. struct protection_domain *dom;
  920. unsigned long flags;
  921. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  922. dom = amd_iommu_pd_table[devid];
  923. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  924. return dom;
  925. }
  926. static void set_dte_entry(u16 devid, struct protection_domain *domain)
  927. {
  928. u64 pte_root = virt_to_phys(domain->pt_root);
  929. pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  930. << DEV_ENTRY_MODE_SHIFT;
  931. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
  932. amd_iommu_dev_table[devid].data[2] = domain->id;
  933. amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
  934. amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
  935. amd_iommu_pd_table[devid] = domain;
  936. }
  937. /*
  938. * If a device is not yet associated with a domain, this function does
  939. * assigns it visible for the hardware
  940. */
  941. static void __attach_device(struct amd_iommu *iommu,
  942. struct protection_domain *domain,
  943. u16 devid)
  944. {
  945. /* lock domain */
  946. spin_lock(&domain->lock);
  947. /* update DTE entry */
  948. set_dte_entry(devid, domain);
  949. domain->dev_cnt += 1;
  950. /* ready */
  951. spin_unlock(&domain->lock);
  952. }
  953. /*
  954. * If a device is not yet associated with a domain, this function does
  955. * assigns it visible for the hardware
  956. */
  957. static void attach_device(struct amd_iommu *iommu,
  958. struct protection_domain *domain,
  959. u16 devid)
  960. {
  961. unsigned long flags;
  962. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  963. __attach_device(iommu, domain, devid);
  964. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  965. /*
  966. * We might boot into a crash-kernel here. The crashed kernel
  967. * left the caches in the IOMMU dirty. So we have to flush
  968. * here to evict all dirty stuff.
  969. */
  970. iommu_queue_inv_dev_entry(iommu, devid);
  971. iommu_flush_tlb_pde(iommu, domain->id);
  972. }
  973. /*
  974. * Removes a device from a protection domain (unlocked)
  975. */
  976. static void __detach_device(struct protection_domain *domain, u16 devid)
  977. {
  978. /* lock domain */
  979. spin_lock(&domain->lock);
  980. /* remove domain from the lookup table */
  981. amd_iommu_pd_table[devid] = NULL;
  982. /* remove entry from the device table seen by the hardware */
  983. amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
  984. amd_iommu_dev_table[devid].data[1] = 0;
  985. amd_iommu_dev_table[devid].data[2] = 0;
  986. amd_iommu_apply_erratum_63(devid);
  987. /* decrease reference counter */
  988. domain->dev_cnt -= 1;
  989. /* ready */
  990. spin_unlock(&domain->lock);
  991. /*
  992. * If we run in passthrough mode the device must be assigned to the
  993. * passthrough domain if it is detached from any other domain
  994. */
  995. if (iommu_pass_through) {
  996. struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
  997. __attach_device(iommu, pt_domain, devid);
  998. }
  999. }
  1000. /*
  1001. * Removes a device from a protection domain (with devtable_lock held)
  1002. */
  1003. static void detach_device(struct protection_domain *domain, u16 devid)
  1004. {
  1005. unsigned long flags;
  1006. /* lock device table */
  1007. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1008. __detach_device(domain, devid);
  1009. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1010. }
  1011. static int device_change_notifier(struct notifier_block *nb,
  1012. unsigned long action, void *data)
  1013. {
  1014. struct device *dev = data;
  1015. struct pci_dev *pdev = to_pci_dev(dev);
  1016. u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
  1017. struct protection_domain *domain;
  1018. struct dma_ops_domain *dma_domain;
  1019. struct amd_iommu *iommu;
  1020. unsigned long flags;
  1021. if (devid > amd_iommu_last_bdf)
  1022. goto out;
  1023. devid = amd_iommu_alias_table[devid];
  1024. iommu = amd_iommu_rlookup_table[devid];
  1025. if (iommu == NULL)
  1026. goto out;
  1027. domain = domain_for_device(devid);
  1028. if (domain && !dma_ops_domain(domain))
  1029. WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
  1030. "to a non-dma-ops domain\n", dev_name(dev));
  1031. switch (action) {
  1032. case BUS_NOTIFY_UNBOUND_DRIVER:
  1033. if (!domain)
  1034. goto out;
  1035. if (iommu_pass_through)
  1036. break;
  1037. detach_device(domain, devid);
  1038. break;
  1039. case BUS_NOTIFY_ADD_DEVICE:
  1040. /* allocate a protection domain if a device is added */
  1041. dma_domain = find_protection_domain(devid);
  1042. if (dma_domain)
  1043. goto out;
  1044. dma_domain = dma_ops_domain_alloc(iommu);
  1045. if (!dma_domain)
  1046. goto out;
  1047. dma_domain->target_dev = devid;
  1048. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  1049. list_add_tail(&dma_domain->list, &iommu_pd_list);
  1050. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  1051. break;
  1052. default:
  1053. goto out;
  1054. }
  1055. iommu_queue_inv_dev_entry(iommu, devid);
  1056. iommu_completion_wait(iommu);
  1057. out:
  1058. return 0;
  1059. }
  1060. static struct notifier_block device_nb = {
  1061. .notifier_call = device_change_notifier,
  1062. };
  1063. /*****************************************************************************
  1064. *
  1065. * The next functions belong to the dma_ops mapping/unmapping code.
  1066. *
  1067. *****************************************************************************/
  1068. /*
  1069. * This function checks if the driver got a valid device from the caller to
  1070. * avoid dereferencing invalid pointers.
  1071. */
  1072. static bool check_device(struct device *dev)
  1073. {
  1074. if (!dev || !dev->dma_mask)
  1075. return false;
  1076. return true;
  1077. }
  1078. /*
  1079. * In this function the list of preallocated protection domains is traversed to
  1080. * find the domain for a specific device
  1081. */
  1082. static struct dma_ops_domain *find_protection_domain(u16 devid)
  1083. {
  1084. struct dma_ops_domain *entry, *ret = NULL;
  1085. unsigned long flags;
  1086. if (list_empty(&iommu_pd_list))
  1087. return NULL;
  1088. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  1089. list_for_each_entry(entry, &iommu_pd_list, list) {
  1090. if (entry->target_dev == devid) {
  1091. ret = entry;
  1092. break;
  1093. }
  1094. }
  1095. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  1096. return ret;
  1097. }
  1098. /*
  1099. * In the dma_ops path we only have the struct device. This function
  1100. * finds the corresponding IOMMU, the protection domain and the
  1101. * requestor id for a given device.
  1102. * If the device is not yet associated with a domain this is also done
  1103. * in this function.
  1104. */
  1105. static int get_device_resources(struct device *dev,
  1106. struct amd_iommu **iommu,
  1107. struct protection_domain **domain,
  1108. u16 *bdf)
  1109. {
  1110. struct dma_ops_domain *dma_dom;
  1111. struct pci_dev *pcidev;
  1112. u16 _bdf;
  1113. *iommu = NULL;
  1114. *domain = NULL;
  1115. *bdf = 0xffff;
  1116. if (dev->bus != &pci_bus_type)
  1117. return 0;
  1118. pcidev = to_pci_dev(dev);
  1119. _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1120. /* device not translated by any IOMMU in the system? */
  1121. if (_bdf > amd_iommu_last_bdf)
  1122. return 0;
  1123. *bdf = amd_iommu_alias_table[_bdf];
  1124. *iommu = amd_iommu_rlookup_table[*bdf];
  1125. if (*iommu == NULL)
  1126. return 0;
  1127. *domain = domain_for_device(*bdf);
  1128. if (*domain == NULL) {
  1129. dma_dom = find_protection_domain(*bdf);
  1130. if (!dma_dom)
  1131. dma_dom = (*iommu)->default_dom;
  1132. *domain = &dma_dom->domain;
  1133. attach_device(*iommu, *domain, *bdf);
  1134. DUMP_printk("Using protection domain %d for device %s\n",
  1135. (*domain)->id, dev_name(dev));
  1136. }
  1137. if (domain_for_device(_bdf) == NULL)
  1138. attach_device(*iommu, *domain, _bdf);
  1139. return 1;
  1140. }
  1141. static void update_device_table(struct protection_domain *domain)
  1142. {
  1143. unsigned long flags;
  1144. int i;
  1145. for (i = 0; i <= amd_iommu_last_bdf; ++i) {
  1146. if (amd_iommu_pd_table[i] != domain)
  1147. continue;
  1148. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1149. set_dte_entry(i, domain);
  1150. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1151. }
  1152. }
  1153. static void update_domain(struct protection_domain *domain)
  1154. {
  1155. if (!domain->updated)
  1156. return;
  1157. update_device_table(domain);
  1158. flush_devices_by_domain(domain);
  1159. iommu_flush_domain(domain->id);
  1160. domain->updated = false;
  1161. }
  1162. /*
  1163. * This function is used to add another level to an IO page table. Adding
  1164. * another level increases the size of the address space by 9 bits to a size up
  1165. * to 64 bits.
  1166. */
  1167. static bool increase_address_space(struct protection_domain *domain,
  1168. gfp_t gfp)
  1169. {
  1170. u64 *pte;
  1171. if (domain->mode == PAGE_MODE_6_LEVEL)
  1172. /* address space already 64 bit large */
  1173. return false;
  1174. pte = (void *)get_zeroed_page(gfp);
  1175. if (!pte)
  1176. return false;
  1177. *pte = PM_LEVEL_PDE(domain->mode,
  1178. virt_to_phys(domain->pt_root));
  1179. domain->pt_root = pte;
  1180. domain->mode += 1;
  1181. domain->updated = true;
  1182. return true;
  1183. }
  1184. static u64 *alloc_pte(struct protection_domain *domain,
  1185. unsigned long address,
  1186. int end_lvl,
  1187. u64 **pte_page,
  1188. gfp_t gfp)
  1189. {
  1190. u64 *pte, *page;
  1191. int level;
  1192. while (address > PM_LEVEL_SIZE(domain->mode))
  1193. increase_address_space(domain, gfp);
  1194. level = domain->mode - 1;
  1195. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  1196. while (level > end_lvl) {
  1197. if (!IOMMU_PTE_PRESENT(*pte)) {
  1198. page = (u64 *)get_zeroed_page(gfp);
  1199. if (!page)
  1200. return NULL;
  1201. *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
  1202. }
  1203. level -= 1;
  1204. pte = IOMMU_PTE_PAGE(*pte);
  1205. if (pte_page && level == end_lvl)
  1206. *pte_page = pte;
  1207. pte = &pte[PM_LEVEL_INDEX(level, address)];
  1208. }
  1209. return pte;
  1210. }
  1211. /*
  1212. * This function fetches the PTE for a given address in the aperture
  1213. */
  1214. static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  1215. unsigned long address)
  1216. {
  1217. struct aperture_range *aperture;
  1218. u64 *pte, *pte_page;
  1219. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1220. if (!aperture)
  1221. return NULL;
  1222. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1223. if (!pte) {
  1224. pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
  1225. GFP_ATOMIC);
  1226. aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
  1227. } else
  1228. pte += PM_LEVEL_INDEX(0, address);
  1229. update_domain(&dom->domain);
  1230. return pte;
  1231. }
  1232. /*
  1233. * This is the generic map function. It maps one 4kb page at paddr to
  1234. * the given address in the DMA address space for the domain.
  1235. */
  1236. static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
  1237. struct dma_ops_domain *dom,
  1238. unsigned long address,
  1239. phys_addr_t paddr,
  1240. int direction)
  1241. {
  1242. u64 *pte, __pte;
  1243. WARN_ON(address > dom->aperture_size);
  1244. paddr &= PAGE_MASK;
  1245. pte = dma_ops_get_pte(dom, address);
  1246. if (!pte)
  1247. return bad_dma_address;
  1248. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  1249. if (direction == DMA_TO_DEVICE)
  1250. __pte |= IOMMU_PTE_IR;
  1251. else if (direction == DMA_FROM_DEVICE)
  1252. __pte |= IOMMU_PTE_IW;
  1253. else if (direction == DMA_BIDIRECTIONAL)
  1254. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  1255. WARN_ON(*pte);
  1256. *pte = __pte;
  1257. return (dma_addr_t)address;
  1258. }
  1259. /*
  1260. * The generic unmapping function for on page in the DMA address space.
  1261. */
  1262. static void dma_ops_domain_unmap(struct amd_iommu *iommu,
  1263. struct dma_ops_domain *dom,
  1264. unsigned long address)
  1265. {
  1266. struct aperture_range *aperture;
  1267. u64 *pte;
  1268. if (address >= dom->aperture_size)
  1269. return;
  1270. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  1271. if (!aperture)
  1272. return;
  1273. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  1274. if (!pte)
  1275. return;
  1276. pte += PM_LEVEL_INDEX(0, address);
  1277. WARN_ON(!*pte);
  1278. *pte = 0ULL;
  1279. }
  1280. /*
  1281. * This function contains common code for mapping of a physically
  1282. * contiguous memory region into DMA address space. It is used by all
  1283. * mapping functions provided with this IOMMU driver.
  1284. * Must be called with the domain lock held.
  1285. */
  1286. static dma_addr_t __map_single(struct device *dev,
  1287. struct amd_iommu *iommu,
  1288. struct dma_ops_domain *dma_dom,
  1289. phys_addr_t paddr,
  1290. size_t size,
  1291. int dir,
  1292. bool align,
  1293. u64 dma_mask)
  1294. {
  1295. dma_addr_t offset = paddr & ~PAGE_MASK;
  1296. dma_addr_t address, start, ret;
  1297. unsigned int pages;
  1298. unsigned long align_mask = 0;
  1299. int i;
  1300. pages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1301. paddr &= PAGE_MASK;
  1302. INC_STATS_COUNTER(total_map_requests);
  1303. if (pages > 1)
  1304. INC_STATS_COUNTER(cross_page);
  1305. if (align)
  1306. align_mask = (1UL << get_order(size)) - 1;
  1307. retry:
  1308. address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
  1309. dma_mask);
  1310. if (unlikely(address == bad_dma_address)) {
  1311. /*
  1312. * setting next_address here will let the address
  1313. * allocator only scan the new allocated range in the
  1314. * first run. This is a small optimization.
  1315. */
  1316. dma_dom->next_address = dma_dom->aperture_size;
  1317. if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
  1318. goto out;
  1319. /*
  1320. * aperture was sucessfully enlarged by 128 MB, try
  1321. * allocation again
  1322. */
  1323. goto retry;
  1324. }
  1325. start = address;
  1326. for (i = 0; i < pages; ++i) {
  1327. ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
  1328. if (ret == bad_dma_address)
  1329. goto out_unmap;
  1330. paddr += PAGE_SIZE;
  1331. start += PAGE_SIZE;
  1332. }
  1333. address += offset;
  1334. ADD_STATS_COUNTER(alloced_io_mem, size);
  1335. if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
  1336. iommu_flush_tlb(iommu, dma_dom->domain.id);
  1337. dma_dom->need_flush = false;
  1338. } else if (unlikely(iommu_has_npcache(iommu)))
  1339. iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
  1340. out:
  1341. return address;
  1342. out_unmap:
  1343. for (--i; i >= 0; --i) {
  1344. start -= PAGE_SIZE;
  1345. dma_ops_domain_unmap(iommu, dma_dom, start);
  1346. }
  1347. dma_ops_free_addresses(dma_dom, address, pages);
  1348. return bad_dma_address;
  1349. }
  1350. /*
  1351. * Does the reverse of the __map_single function. Must be called with
  1352. * the domain lock held too
  1353. */
  1354. static void __unmap_single(struct amd_iommu *iommu,
  1355. struct dma_ops_domain *dma_dom,
  1356. dma_addr_t dma_addr,
  1357. size_t size,
  1358. int dir)
  1359. {
  1360. dma_addr_t i, start;
  1361. unsigned int pages;
  1362. if ((dma_addr == bad_dma_address) ||
  1363. (dma_addr + size > dma_dom->aperture_size))
  1364. return;
  1365. pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  1366. dma_addr &= PAGE_MASK;
  1367. start = dma_addr;
  1368. for (i = 0; i < pages; ++i) {
  1369. dma_ops_domain_unmap(iommu, dma_dom, start);
  1370. start += PAGE_SIZE;
  1371. }
  1372. SUB_STATS_COUNTER(alloced_io_mem, size);
  1373. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  1374. if (amd_iommu_unmap_flush || dma_dom->need_flush) {
  1375. iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
  1376. dma_dom->need_flush = false;
  1377. }
  1378. }
  1379. /*
  1380. * The exported map_single function for dma_ops.
  1381. */
  1382. static dma_addr_t map_page(struct device *dev, struct page *page,
  1383. unsigned long offset, size_t size,
  1384. enum dma_data_direction dir,
  1385. struct dma_attrs *attrs)
  1386. {
  1387. unsigned long flags;
  1388. struct amd_iommu *iommu;
  1389. struct protection_domain *domain;
  1390. u16 devid;
  1391. dma_addr_t addr;
  1392. u64 dma_mask;
  1393. phys_addr_t paddr = page_to_phys(page) + offset;
  1394. INC_STATS_COUNTER(cnt_map_single);
  1395. if (!check_device(dev))
  1396. return bad_dma_address;
  1397. dma_mask = *dev->dma_mask;
  1398. get_device_resources(dev, &iommu, &domain, &devid);
  1399. if (iommu == NULL || domain == NULL)
  1400. /* device not handled by any AMD IOMMU */
  1401. return (dma_addr_t)paddr;
  1402. if (!dma_ops_domain(domain))
  1403. return bad_dma_address;
  1404. spin_lock_irqsave(&domain->lock, flags);
  1405. addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
  1406. dma_mask);
  1407. if (addr == bad_dma_address)
  1408. goto out;
  1409. iommu_completion_wait(iommu);
  1410. out:
  1411. spin_unlock_irqrestore(&domain->lock, flags);
  1412. return addr;
  1413. }
  1414. /*
  1415. * The exported unmap_single function for dma_ops.
  1416. */
  1417. static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  1418. enum dma_data_direction dir, struct dma_attrs *attrs)
  1419. {
  1420. unsigned long flags;
  1421. struct amd_iommu *iommu;
  1422. struct protection_domain *domain;
  1423. u16 devid;
  1424. INC_STATS_COUNTER(cnt_unmap_single);
  1425. if (!check_device(dev) ||
  1426. !get_device_resources(dev, &iommu, &domain, &devid))
  1427. /* device not handled by any AMD IOMMU */
  1428. return;
  1429. if (!dma_ops_domain(domain))
  1430. return;
  1431. spin_lock_irqsave(&domain->lock, flags);
  1432. __unmap_single(iommu, domain->priv, dma_addr, size, dir);
  1433. iommu_completion_wait(iommu);
  1434. spin_unlock_irqrestore(&domain->lock, flags);
  1435. }
  1436. /*
  1437. * This is a special map_sg function which is used if we should map a
  1438. * device which is not handled by an AMD IOMMU in the system.
  1439. */
  1440. static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  1441. int nelems, int dir)
  1442. {
  1443. struct scatterlist *s;
  1444. int i;
  1445. for_each_sg(sglist, s, nelems, i) {
  1446. s->dma_address = (dma_addr_t)sg_phys(s);
  1447. s->dma_length = s->length;
  1448. }
  1449. return nelems;
  1450. }
  1451. /*
  1452. * The exported map_sg function for dma_ops (handles scatter-gather
  1453. * lists).
  1454. */
  1455. static int map_sg(struct device *dev, struct scatterlist *sglist,
  1456. int nelems, enum dma_data_direction dir,
  1457. struct dma_attrs *attrs)
  1458. {
  1459. unsigned long flags;
  1460. struct amd_iommu *iommu;
  1461. struct protection_domain *domain;
  1462. u16 devid;
  1463. int i;
  1464. struct scatterlist *s;
  1465. phys_addr_t paddr;
  1466. int mapped_elems = 0;
  1467. u64 dma_mask;
  1468. INC_STATS_COUNTER(cnt_map_sg);
  1469. if (!check_device(dev))
  1470. return 0;
  1471. dma_mask = *dev->dma_mask;
  1472. get_device_resources(dev, &iommu, &domain, &devid);
  1473. if (!iommu || !domain)
  1474. return map_sg_no_iommu(dev, sglist, nelems, dir);
  1475. if (!dma_ops_domain(domain))
  1476. return 0;
  1477. spin_lock_irqsave(&domain->lock, flags);
  1478. for_each_sg(sglist, s, nelems, i) {
  1479. paddr = sg_phys(s);
  1480. s->dma_address = __map_single(dev, iommu, domain->priv,
  1481. paddr, s->length, dir, false,
  1482. dma_mask);
  1483. if (s->dma_address) {
  1484. s->dma_length = s->length;
  1485. mapped_elems++;
  1486. } else
  1487. goto unmap;
  1488. }
  1489. iommu_completion_wait(iommu);
  1490. out:
  1491. spin_unlock_irqrestore(&domain->lock, flags);
  1492. return mapped_elems;
  1493. unmap:
  1494. for_each_sg(sglist, s, mapped_elems, i) {
  1495. if (s->dma_address)
  1496. __unmap_single(iommu, domain->priv, s->dma_address,
  1497. s->dma_length, dir);
  1498. s->dma_address = s->dma_length = 0;
  1499. }
  1500. mapped_elems = 0;
  1501. goto out;
  1502. }
  1503. /*
  1504. * The exported map_sg function for dma_ops (handles scatter-gather
  1505. * lists).
  1506. */
  1507. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  1508. int nelems, enum dma_data_direction dir,
  1509. struct dma_attrs *attrs)
  1510. {
  1511. unsigned long flags;
  1512. struct amd_iommu *iommu;
  1513. struct protection_domain *domain;
  1514. struct scatterlist *s;
  1515. u16 devid;
  1516. int i;
  1517. INC_STATS_COUNTER(cnt_unmap_sg);
  1518. if (!check_device(dev) ||
  1519. !get_device_resources(dev, &iommu, &domain, &devid))
  1520. return;
  1521. if (!dma_ops_domain(domain))
  1522. return;
  1523. spin_lock_irqsave(&domain->lock, flags);
  1524. for_each_sg(sglist, s, nelems, i) {
  1525. __unmap_single(iommu, domain->priv, s->dma_address,
  1526. s->dma_length, dir);
  1527. s->dma_address = s->dma_length = 0;
  1528. }
  1529. iommu_completion_wait(iommu);
  1530. spin_unlock_irqrestore(&domain->lock, flags);
  1531. }
  1532. /*
  1533. * The exported alloc_coherent function for dma_ops.
  1534. */
  1535. static void *alloc_coherent(struct device *dev, size_t size,
  1536. dma_addr_t *dma_addr, gfp_t flag)
  1537. {
  1538. unsigned long flags;
  1539. void *virt_addr;
  1540. struct amd_iommu *iommu;
  1541. struct protection_domain *domain;
  1542. u16 devid;
  1543. phys_addr_t paddr;
  1544. u64 dma_mask = dev->coherent_dma_mask;
  1545. INC_STATS_COUNTER(cnt_alloc_coherent);
  1546. if (!check_device(dev))
  1547. return NULL;
  1548. if (!get_device_resources(dev, &iommu, &domain, &devid))
  1549. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  1550. flag |= __GFP_ZERO;
  1551. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  1552. if (!virt_addr)
  1553. return NULL;
  1554. paddr = virt_to_phys(virt_addr);
  1555. if (!iommu || !domain) {
  1556. *dma_addr = (dma_addr_t)paddr;
  1557. return virt_addr;
  1558. }
  1559. if (!dma_ops_domain(domain))
  1560. goto out_free;
  1561. if (!dma_mask)
  1562. dma_mask = *dev->dma_mask;
  1563. spin_lock_irqsave(&domain->lock, flags);
  1564. *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
  1565. size, DMA_BIDIRECTIONAL, true, dma_mask);
  1566. if (*dma_addr == bad_dma_address) {
  1567. spin_unlock_irqrestore(&domain->lock, flags);
  1568. goto out_free;
  1569. }
  1570. iommu_completion_wait(iommu);
  1571. spin_unlock_irqrestore(&domain->lock, flags);
  1572. return virt_addr;
  1573. out_free:
  1574. free_pages((unsigned long)virt_addr, get_order(size));
  1575. return NULL;
  1576. }
  1577. /*
  1578. * The exported free_coherent function for dma_ops.
  1579. */
  1580. static void free_coherent(struct device *dev, size_t size,
  1581. void *virt_addr, dma_addr_t dma_addr)
  1582. {
  1583. unsigned long flags;
  1584. struct amd_iommu *iommu;
  1585. struct protection_domain *domain;
  1586. u16 devid;
  1587. INC_STATS_COUNTER(cnt_free_coherent);
  1588. if (!check_device(dev))
  1589. return;
  1590. get_device_resources(dev, &iommu, &domain, &devid);
  1591. if (!iommu || !domain)
  1592. goto free_mem;
  1593. if (!dma_ops_domain(domain))
  1594. goto free_mem;
  1595. spin_lock_irqsave(&domain->lock, flags);
  1596. __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  1597. iommu_completion_wait(iommu);
  1598. spin_unlock_irqrestore(&domain->lock, flags);
  1599. free_mem:
  1600. free_pages((unsigned long)virt_addr, get_order(size));
  1601. }
  1602. /*
  1603. * This function is called by the DMA layer to find out if we can handle a
  1604. * particular device. It is part of the dma_ops.
  1605. */
  1606. static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  1607. {
  1608. u16 bdf;
  1609. struct pci_dev *pcidev;
  1610. /* No device or no PCI device */
  1611. if (!dev || dev->bus != &pci_bus_type)
  1612. return 0;
  1613. pcidev = to_pci_dev(dev);
  1614. bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1615. /* Out of our scope? */
  1616. if (bdf > amd_iommu_last_bdf)
  1617. return 0;
  1618. return 1;
  1619. }
  1620. /*
  1621. * The function for pre-allocating protection domains.
  1622. *
  1623. * If the driver core informs the DMA layer if a driver grabs a device
  1624. * we don't need to preallocate the protection domains anymore.
  1625. * For now we have to.
  1626. */
  1627. static void prealloc_protection_domains(void)
  1628. {
  1629. struct pci_dev *dev = NULL;
  1630. struct dma_ops_domain *dma_dom;
  1631. struct amd_iommu *iommu;
  1632. u16 devid;
  1633. while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
  1634. devid = calc_devid(dev->bus->number, dev->devfn);
  1635. if (devid > amd_iommu_last_bdf)
  1636. continue;
  1637. devid = amd_iommu_alias_table[devid];
  1638. if (domain_for_device(devid))
  1639. continue;
  1640. iommu = amd_iommu_rlookup_table[devid];
  1641. if (!iommu)
  1642. continue;
  1643. dma_dom = dma_ops_domain_alloc(iommu);
  1644. if (!dma_dom)
  1645. continue;
  1646. init_unity_mappings_for_device(dma_dom, devid);
  1647. dma_dom->target_dev = devid;
  1648. list_add_tail(&dma_dom->list, &iommu_pd_list);
  1649. }
  1650. }
  1651. static struct dma_map_ops amd_iommu_dma_ops = {
  1652. .alloc_coherent = alloc_coherent,
  1653. .free_coherent = free_coherent,
  1654. .map_page = map_page,
  1655. .unmap_page = unmap_page,
  1656. .map_sg = map_sg,
  1657. .unmap_sg = unmap_sg,
  1658. .dma_supported = amd_iommu_dma_supported,
  1659. };
  1660. /*
  1661. * The function which clues the AMD IOMMU driver into dma_ops.
  1662. */
  1663. int __init amd_iommu_init_dma_ops(void)
  1664. {
  1665. struct amd_iommu *iommu;
  1666. int ret;
  1667. /*
  1668. * first allocate a default protection domain for every IOMMU we
  1669. * found in the system. Devices not assigned to any other
  1670. * protection domain will be assigned to the default one.
  1671. */
  1672. for_each_iommu(iommu) {
  1673. iommu->default_dom = dma_ops_domain_alloc(iommu);
  1674. if (iommu->default_dom == NULL)
  1675. return -ENOMEM;
  1676. iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
  1677. ret = iommu_init_unity_mappings(iommu);
  1678. if (ret)
  1679. goto free_domains;
  1680. }
  1681. /*
  1682. * If device isolation is enabled, pre-allocate the protection
  1683. * domains for each device.
  1684. */
  1685. if (amd_iommu_isolate)
  1686. prealloc_protection_domains();
  1687. iommu_detected = 1;
  1688. force_iommu = 1;
  1689. bad_dma_address = 0;
  1690. #ifdef CONFIG_GART_IOMMU
  1691. gart_iommu_aperture_disabled = 1;
  1692. gart_iommu_aperture = 0;
  1693. #endif
  1694. /* Make the driver finally visible to the drivers */
  1695. dma_ops = &amd_iommu_dma_ops;
  1696. register_iommu(&amd_iommu_ops);
  1697. bus_register_notifier(&pci_bus_type, &device_nb);
  1698. amd_iommu_stats_init();
  1699. return 0;
  1700. free_domains:
  1701. for_each_iommu(iommu) {
  1702. if (iommu->default_dom)
  1703. dma_ops_domain_free(iommu->default_dom);
  1704. }
  1705. return ret;
  1706. }
  1707. /*****************************************************************************
  1708. *
  1709. * The following functions belong to the exported interface of AMD IOMMU
  1710. *
  1711. * This interface allows access to lower level functions of the IOMMU
  1712. * like protection domain handling and assignement of devices to domains
  1713. * which is not possible with the dma_ops interface.
  1714. *
  1715. *****************************************************************************/
  1716. static void cleanup_domain(struct protection_domain *domain)
  1717. {
  1718. unsigned long flags;
  1719. u16 devid;
  1720. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1721. for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
  1722. if (amd_iommu_pd_table[devid] == domain)
  1723. __detach_device(domain, devid);
  1724. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1725. }
  1726. static void protection_domain_free(struct protection_domain *domain)
  1727. {
  1728. if (!domain)
  1729. return;
  1730. if (domain->id)
  1731. domain_id_free(domain->id);
  1732. kfree(domain);
  1733. }
  1734. static struct protection_domain *protection_domain_alloc(void)
  1735. {
  1736. struct protection_domain *domain;
  1737. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  1738. if (!domain)
  1739. return NULL;
  1740. spin_lock_init(&domain->lock);
  1741. domain->id = domain_id_alloc();
  1742. if (!domain->id)
  1743. goto out_err;
  1744. return domain;
  1745. out_err:
  1746. kfree(domain);
  1747. return NULL;
  1748. }
  1749. static int amd_iommu_domain_init(struct iommu_domain *dom)
  1750. {
  1751. struct protection_domain *domain;
  1752. domain = protection_domain_alloc();
  1753. if (!domain)
  1754. goto out_free;
  1755. domain->mode = PAGE_MODE_3_LEVEL;
  1756. domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  1757. if (!domain->pt_root)
  1758. goto out_free;
  1759. dom->priv = domain;
  1760. return 0;
  1761. out_free:
  1762. protection_domain_free(domain);
  1763. return -ENOMEM;
  1764. }
  1765. static void amd_iommu_domain_destroy(struct iommu_domain *dom)
  1766. {
  1767. struct protection_domain *domain = dom->priv;
  1768. if (!domain)
  1769. return;
  1770. if (domain->dev_cnt > 0)
  1771. cleanup_domain(domain);
  1772. BUG_ON(domain->dev_cnt != 0);
  1773. free_pagetable(domain);
  1774. domain_id_free(domain->id);
  1775. kfree(domain);
  1776. dom->priv = NULL;
  1777. }
  1778. static void amd_iommu_detach_device(struct iommu_domain *dom,
  1779. struct device *dev)
  1780. {
  1781. struct protection_domain *domain = dom->priv;
  1782. struct amd_iommu *iommu;
  1783. struct pci_dev *pdev;
  1784. u16 devid;
  1785. if (dev->bus != &pci_bus_type)
  1786. return;
  1787. pdev = to_pci_dev(dev);
  1788. devid = calc_devid(pdev->bus->number, pdev->devfn);
  1789. if (devid > 0)
  1790. detach_device(domain, devid);
  1791. iommu = amd_iommu_rlookup_table[devid];
  1792. if (!iommu)
  1793. return;
  1794. iommu_queue_inv_dev_entry(iommu, devid);
  1795. iommu_completion_wait(iommu);
  1796. }
  1797. static int amd_iommu_attach_device(struct iommu_domain *dom,
  1798. struct device *dev)
  1799. {
  1800. struct protection_domain *domain = dom->priv;
  1801. struct protection_domain *old_domain;
  1802. struct amd_iommu *iommu;
  1803. struct pci_dev *pdev;
  1804. u16 devid;
  1805. if (dev->bus != &pci_bus_type)
  1806. return -EINVAL;
  1807. pdev = to_pci_dev(dev);
  1808. devid = calc_devid(pdev->bus->number, pdev->devfn);
  1809. if (devid >= amd_iommu_last_bdf ||
  1810. devid != amd_iommu_alias_table[devid])
  1811. return -EINVAL;
  1812. iommu = amd_iommu_rlookup_table[devid];
  1813. if (!iommu)
  1814. return -EINVAL;
  1815. old_domain = domain_for_device(devid);
  1816. if (old_domain)
  1817. detach_device(old_domain, devid);
  1818. attach_device(iommu, domain, devid);
  1819. iommu_completion_wait(iommu);
  1820. return 0;
  1821. }
  1822. static int amd_iommu_map_range(struct iommu_domain *dom,
  1823. unsigned long iova, phys_addr_t paddr,
  1824. size_t size, int iommu_prot)
  1825. {
  1826. struct protection_domain *domain = dom->priv;
  1827. unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
  1828. int prot = 0;
  1829. int ret;
  1830. if (iommu_prot & IOMMU_READ)
  1831. prot |= IOMMU_PROT_IR;
  1832. if (iommu_prot & IOMMU_WRITE)
  1833. prot |= IOMMU_PROT_IW;
  1834. iova &= PAGE_MASK;
  1835. paddr &= PAGE_MASK;
  1836. for (i = 0; i < npages; ++i) {
  1837. ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
  1838. if (ret)
  1839. return ret;
  1840. iova += PAGE_SIZE;
  1841. paddr += PAGE_SIZE;
  1842. }
  1843. return 0;
  1844. }
  1845. static void amd_iommu_unmap_range(struct iommu_domain *dom,
  1846. unsigned long iova, size_t size)
  1847. {
  1848. struct protection_domain *domain = dom->priv;
  1849. unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
  1850. iova &= PAGE_MASK;
  1851. for (i = 0; i < npages; ++i) {
  1852. iommu_unmap_page(domain, iova, PM_MAP_4k);
  1853. iova += PAGE_SIZE;
  1854. }
  1855. iommu_flush_domain(domain->id);
  1856. }
  1857. static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
  1858. unsigned long iova)
  1859. {
  1860. struct protection_domain *domain = dom->priv;
  1861. unsigned long offset = iova & ~PAGE_MASK;
  1862. phys_addr_t paddr;
  1863. u64 *pte;
  1864. pte = fetch_pte(domain, iova, PM_MAP_4k);
  1865. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  1866. return 0;
  1867. paddr = *pte & IOMMU_PAGE_MASK;
  1868. paddr |= offset;
  1869. return paddr;
  1870. }
  1871. static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
  1872. unsigned long cap)
  1873. {
  1874. return 0;
  1875. }
  1876. static struct iommu_ops amd_iommu_ops = {
  1877. .domain_init = amd_iommu_domain_init,
  1878. .domain_destroy = amd_iommu_domain_destroy,
  1879. .attach_dev = amd_iommu_attach_device,
  1880. .detach_dev = amd_iommu_detach_device,
  1881. .map = amd_iommu_map_range,
  1882. .unmap = amd_iommu_unmap_range,
  1883. .iova_to_phys = amd_iommu_iova_to_phys,
  1884. .domain_has_cap = amd_iommu_domain_has_cap,
  1885. };
  1886. /*****************************************************************************
  1887. *
  1888. * The next functions do a basic initialization of IOMMU for pass through
  1889. * mode
  1890. *
  1891. * In passthrough mode the IOMMU is initialized and enabled but not used for
  1892. * DMA-API translation.
  1893. *
  1894. *****************************************************************************/
  1895. int __init amd_iommu_init_passthrough(void)
  1896. {
  1897. struct pci_dev *dev = NULL;
  1898. u16 devid, devid2;
  1899. /* allocate passthroug domain */
  1900. pt_domain = protection_domain_alloc();
  1901. if (!pt_domain)
  1902. return -ENOMEM;
  1903. pt_domain->mode |= PAGE_MODE_NONE;
  1904. while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
  1905. struct amd_iommu *iommu;
  1906. devid = calc_devid(dev->bus->number, dev->devfn);
  1907. if (devid > amd_iommu_last_bdf)
  1908. continue;
  1909. devid2 = amd_iommu_alias_table[devid];
  1910. iommu = amd_iommu_rlookup_table[devid2];
  1911. if (!iommu)
  1912. continue;
  1913. __attach_device(iommu, pt_domain, devid);
  1914. __attach_device(iommu, pt_domain, devid2);
  1915. }
  1916. pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
  1917. return 0;
  1918. }