amd_iommu_init.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402
  1. /*
  2. * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/acpi.h>
  21. #include <linux/gfp.h>
  22. #include <linux/list.h>
  23. #include <linux/sysdev.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/msi.h>
  26. #include <asm/pci-direct.h>
  27. #include <asm/amd_iommu_proto.h>
  28. #include <asm/amd_iommu_types.h>
  29. #include <asm/amd_iommu.h>
  30. #include <asm/iommu.h>
  31. #include <asm/gart.h>
  32. #include <asm/x86_init.h>
  33. /*
  34. * definitions for the ACPI scanning code
  35. */
  36. #define IVRS_HEADER_LENGTH 48
  37. #define ACPI_IVHD_TYPE 0x10
  38. #define ACPI_IVMD_TYPE_ALL 0x20
  39. #define ACPI_IVMD_TYPE 0x21
  40. #define ACPI_IVMD_TYPE_RANGE 0x22
  41. #define IVHD_DEV_ALL 0x01
  42. #define IVHD_DEV_SELECT 0x02
  43. #define IVHD_DEV_SELECT_RANGE_START 0x03
  44. #define IVHD_DEV_RANGE_END 0x04
  45. #define IVHD_DEV_ALIAS 0x42
  46. #define IVHD_DEV_ALIAS_RANGE 0x43
  47. #define IVHD_DEV_EXT_SELECT 0x46
  48. #define IVHD_DEV_EXT_SELECT_RANGE 0x47
  49. #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
  50. #define IVHD_FLAG_PASSPW_EN_MASK 0x02
  51. #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
  52. #define IVHD_FLAG_ISOC_EN_MASK 0x08
  53. #define IVMD_FLAG_EXCL_RANGE 0x08
  54. #define IVMD_FLAG_UNITY_MAP 0x01
  55. #define ACPI_DEVFLAG_INITPASS 0x01
  56. #define ACPI_DEVFLAG_EXTINT 0x02
  57. #define ACPI_DEVFLAG_NMI 0x04
  58. #define ACPI_DEVFLAG_SYSMGT1 0x10
  59. #define ACPI_DEVFLAG_SYSMGT2 0x20
  60. #define ACPI_DEVFLAG_LINT0 0x40
  61. #define ACPI_DEVFLAG_LINT1 0x80
  62. #define ACPI_DEVFLAG_ATSDIS 0x10000000
  63. /*
  64. * ACPI table definitions
  65. *
  66. * These data structures are laid over the table to parse the important values
  67. * out of it.
  68. */
  69. /*
  70. * structure describing one IOMMU in the ACPI table. Typically followed by one
  71. * or more ivhd_entrys.
  72. */
  73. struct ivhd_header {
  74. u8 type;
  75. u8 flags;
  76. u16 length;
  77. u16 devid;
  78. u16 cap_ptr;
  79. u64 mmio_phys;
  80. u16 pci_seg;
  81. u16 info;
  82. u32 reserved;
  83. } __attribute__((packed));
  84. /*
  85. * A device entry describing which devices a specific IOMMU translates and
  86. * which requestor ids they use.
  87. */
  88. struct ivhd_entry {
  89. u8 type;
  90. u16 devid;
  91. u8 flags;
  92. u32 ext;
  93. } __attribute__((packed));
  94. /*
  95. * An AMD IOMMU memory definition structure. It defines things like exclusion
  96. * ranges for devices and regions that should be unity mapped.
  97. */
  98. struct ivmd_header {
  99. u8 type;
  100. u8 flags;
  101. u16 length;
  102. u16 devid;
  103. u16 aux;
  104. u64 resv;
  105. u64 range_start;
  106. u64 range_length;
  107. } __attribute__((packed));
  108. bool amd_iommu_dump;
  109. static int __initdata amd_iommu_detected;
  110. u16 amd_iommu_last_bdf; /* largest PCI device id we have
  111. to handle */
  112. LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
  113. we find in ACPI */
  114. #ifdef CONFIG_IOMMU_STRESS
  115. bool amd_iommu_isolate = false;
  116. #else
  117. bool amd_iommu_isolate = true; /* if true, device isolation is
  118. enabled */
  119. #endif
  120. bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
  121. LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
  122. system */
  123. /* Array to assign indices to IOMMUs*/
  124. struct amd_iommu *amd_iommus[MAX_IOMMUS];
  125. int amd_iommus_present;
  126. /* IOMMUs have a non-present cache? */
  127. bool amd_iommu_np_cache __read_mostly;
  128. /*
  129. * List of protection domains - used during resume
  130. */
  131. LIST_HEAD(amd_iommu_pd_list);
  132. spinlock_t amd_iommu_pd_lock;
  133. /*
  134. * Pointer to the device table which is shared by all AMD IOMMUs
  135. * it is indexed by the PCI device id or the HT unit id and contains
  136. * information about the domain the device belongs to as well as the
  137. * page table root pointer.
  138. */
  139. struct dev_table_entry *amd_iommu_dev_table;
  140. /*
  141. * The alias table is a driver specific data structure which contains the
  142. * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
  143. * More than one device can share the same requestor id.
  144. */
  145. u16 *amd_iommu_alias_table;
  146. /*
  147. * The rlookup table is used to find the IOMMU which is responsible
  148. * for a specific device. It is also indexed by the PCI device id.
  149. */
  150. struct amd_iommu **amd_iommu_rlookup_table;
  151. /*
  152. * The pd table (protection domain table) is used to find the protection domain
  153. * data structure a device belongs to. Indexed with the PCI device id too.
  154. */
  155. struct protection_domain **amd_iommu_pd_table;
  156. /*
  157. * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
  158. * to know which ones are already in use.
  159. */
  160. unsigned long *amd_iommu_pd_alloc_bitmap;
  161. static u32 dev_table_size; /* size of the device table */
  162. static u32 alias_table_size; /* size of the alias table */
  163. static u32 rlookup_table_size; /* size if the rlookup table */
  164. static inline void update_last_devid(u16 devid)
  165. {
  166. if (devid > amd_iommu_last_bdf)
  167. amd_iommu_last_bdf = devid;
  168. }
  169. static inline unsigned long tbl_size(int entry_size)
  170. {
  171. unsigned shift = PAGE_SHIFT +
  172. get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
  173. return 1UL << shift;
  174. }
  175. /****************************************************************************
  176. *
  177. * AMD IOMMU MMIO register space handling functions
  178. *
  179. * These functions are used to program the IOMMU device registers in
  180. * MMIO space required for that driver.
  181. *
  182. ****************************************************************************/
  183. /*
  184. * This function set the exclusion range in the IOMMU. DMA accesses to the
  185. * exclusion range are passed through untranslated
  186. */
  187. static void iommu_set_exclusion_range(struct amd_iommu *iommu)
  188. {
  189. u64 start = iommu->exclusion_start & PAGE_MASK;
  190. u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
  191. u64 entry;
  192. if (!iommu->exclusion_start)
  193. return;
  194. entry = start | MMIO_EXCL_ENABLE_MASK;
  195. memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
  196. &entry, sizeof(entry));
  197. entry = limit;
  198. memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
  199. &entry, sizeof(entry));
  200. }
  201. /* Programs the physical address of the device table into the IOMMU hardware */
  202. static void __init iommu_set_device_table(struct amd_iommu *iommu)
  203. {
  204. u64 entry;
  205. BUG_ON(iommu->mmio_base == NULL);
  206. entry = virt_to_phys(amd_iommu_dev_table);
  207. entry |= (dev_table_size >> 12) - 1;
  208. memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
  209. &entry, sizeof(entry));
  210. }
  211. /* Generic functions to enable/disable certain features of the IOMMU. */
  212. static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
  213. {
  214. u32 ctrl;
  215. ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
  216. ctrl |= (1 << bit);
  217. writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
  218. }
  219. static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
  220. {
  221. u32 ctrl;
  222. ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
  223. ctrl &= ~(1 << bit);
  224. writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
  225. }
  226. /* Function to enable the hardware */
  227. static void iommu_enable(struct amd_iommu *iommu)
  228. {
  229. printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
  230. dev_name(&iommu->dev->dev), iommu->cap_ptr);
  231. iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
  232. }
  233. static void iommu_disable(struct amd_iommu *iommu)
  234. {
  235. /* Disable command buffer */
  236. iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
  237. /* Disable event logging and event interrupts */
  238. iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
  239. iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
  240. /* Disable IOMMU hardware itself */
  241. iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
  242. }
  243. /*
  244. * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
  245. * the system has one.
  246. */
  247. static u8 * __init iommu_map_mmio_space(u64 address)
  248. {
  249. u8 *ret;
  250. if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
  251. return NULL;
  252. ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
  253. if (ret != NULL)
  254. return ret;
  255. release_mem_region(address, MMIO_REGION_LENGTH);
  256. return NULL;
  257. }
  258. static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
  259. {
  260. if (iommu->mmio_base)
  261. iounmap(iommu->mmio_base);
  262. release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
  263. }
  264. /****************************************************************************
  265. *
  266. * The functions below belong to the first pass of AMD IOMMU ACPI table
  267. * parsing. In this pass we try to find out the highest device id this
  268. * code has to handle. Upon this information the size of the shared data
  269. * structures is determined later.
  270. *
  271. ****************************************************************************/
  272. /*
  273. * This function calculates the length of a given IVHD entry
  274. */
  275. static inline int ivhd_entry_length(u8 *ivhd)
  276. {
  277. return 0x04 << (*ivhd >> 6);
  278. }
  279. /*
  280. * This function reads the last device id the IOMMU has to handle from the PCI
  281. * capability header for this IOMMU
  282. */
  283. static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
  284. {
  285. u32 cap;
  286. cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
  287. update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
  288. return 0;
  289. }
  290. /*
  291. * After reading the highest device id from the IOMMU PCI capability header
  292. * this function looks if there is a higher device id defined in the ACPI table
  293. */
  294. static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
  295. {
  296. u8 *p = (void *)h, *end = (void *)h;
  297. struct ivhd_entry *dev;
  298. p += sizeof(*h);
  299. end += h->length;
  300. find_last_devid_on_pci(PCI_BUS(h->devid),
  301. PCI_SLOT(h->devid),
  302. PCI_FUNC(h->devid),
  303. h->cap_ptr);
  304. while (p < end) {
  305. dev = (struct ivhd_entry *)p;
  306. switch (dev->type) {
  307. case IVHD_DEV_SELECT:
  308. case IVHD_DEV_RANGE_END:
  309. case IVHD_DEV_ALIAS:
  310. case IVHD_DEV_EXT_SELECT:
  311. /* all the above subfield types refer to device ids */
  312. update_last_devid(dev->devid);
  313. break;
  314. default:
  315. break;
  316. }
  317. p += ivhd_entry_length(p);
  318. }
  319. WARN_ON(p != end);
  320. return 0;
  321. }
  322. /*
  323. * Iterate over all IVHD entries in the ACPI table and find the highest device
  324. * id which we need to handle. This is the first of three functions which parse
  325. * the ACPI table. So we check the checksum here.
  326. */
  327. static int __init find_last_devid_acpi(struct acpi_table_header *table)
  328. {
  329. int i;
  330. u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
  331. struct ivhd_header *h;
  332. /*
  333. * Validate checksum here so we don't need to do it when
  334. * we actually parse the table
  335. */
  336. for (i = 0; i < table->length; ++i)
  337. checksum += p[i];
  338. if (checksum != 0)
  339. /* ACPI table corrupt */
  340. return -ENODEV;
  341. p += IVRS_HEADER_LENGTH;
  342. end += table->length;
  343. while (p < end) {
  344. h = (struct ivhd_header *)p;
  345. switch (h->type) {
  346. case ACPI_IVHD_TYPE:
  347. find_last_devid_from_ivhd(h);
  348. break;
  349. default:
  350. break;
  351. }
  352. p += h->length;
  353. }
  354. WARN_ON(p != end);
  355. return 0;
  356. }
  357. /****************************************************************************
  358. *
  359. * The following functions belong the the code path which parses the ACPI table
  360. * the second time. In this ACPI parsing iteration we allocate IOMMU specific
  361. * data structures, initialize the device/alias/rlookup table and also
  362. * basically initialize the hardware.
  363. *
  364. ****************************************************************************/
  365. /*
  366. * Allocates the command buffer. This buffer is per AMD IOMMU. We can
  367. * write commands to that buffer later and the IOMMU will execute them
  368. * asynchronously
  369. */
  370. static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
  371. {
  372. u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  373. get_order(CMD_BUFFER_SIZE));
  374. if (cmd_buf == NULL)
  375. return NULL;
  376. iommu->cmd_buf_size = CMD_BUFFER_SIZE;
  377. return cmd_buf;
  378. }
  379. /*
  380. * This function resets the command buffer if the IOMMU stopped fetching
  381. * commands from it.
  382. */
  383. void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
  384. {
  385. iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
  386. writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  387. writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  388. iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
  389. }
  390. /*
  391. * This function writes the command buffer address to the hardware and
  392. * enables it.
  393. */
  394. static void iommu_enable_command_buffer(struct amd_iommu *iommu)
  395. {
  396. u64 entry;
  397. BUG_ON(iommu->cmd_buf == NULL);
  398. entry = (u64)virt_to_phys(iommu->cmd_buf);
  399. entry |= MMIO_CMD_SIZE_512;
  400. memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
  401. &entry, sizeof(entry));
  402. amd_iommu_reset_cmd_buffer(iommu);
  403. }
  404. static void __init free_command_buffer(struct amd_iommu *iommu)
  405. {
  406. free_pages((unsigned long)iommu->cmd_buf,
  407. get_order(iommu->cmd_buf_size));
  408. }
  409. /* allocates the memory where the IOMMU will log its events to */
  410. static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
  411. {
  412. iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  413. get_order(EVT_BUFFER_SIZE));
  414. if (iommu->evt_buf == NULL)
  415. return NULL;
  416. iommu->evt_buf_size = EVT_BUFFER_SIZE;
  417. return iommu->evt_buf;
  418. }
  419. static void iommu_enable_event_buffer(struct amd_iommu *iommu)
  420. {
  421. u64 entry;
  422. BUG_ON(iommu->evt_buf == NULL);
  423. entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
  424. memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
  425. &entry, sizeof(entry));
  426. /* set head and tail to zero manually */
  427. writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  428. writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  429. iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
  430. }
  431. static void __init free_event_buffer(struct amd_iommu *iommu)
  432. {
  433. free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
  434. }
  435. /* sets a specific bit in the device table entry. */
  436. static void set_dev_entry_bit(u16 devid, u8 bit)
  437. {
  438. int i = (bit >> 5) & 0x07;
  439. int _bit = bit & 0x1f;
  440. amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
  441. }
  442. static int get_dev_entry_bit(u16 devid, u8 bit)
  443. {
  444. int i = (bit >> 5) & 0x07;
  445. int _bit = bit & 0x1f;
  446. return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
  447. }
  448. void amd_iommu_apply_erratum_63(u16 devid)
  449. {
  450. int sysmgt;
  451. sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
  452. (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
  453. if (sysmgt == 0x01)
  454. set_dev_entry_bit(devid, DEV_ENTRY_IW);
  455. }
  456. /* Writes the specific IOMMU for a device into the rlookup table */
  457. static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
  458. {
  459. amd_iommu_rlookup_table[devid] = iommu;
  460. }
  461. /*
  462. * This function takes the device specific flags read from the ACPI
  463. * table and sets up the device table entry with that information
  464. */
  465. static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
  466. u16 devid, u32 flags, u32 ext_flags)
  467. {
  468. if (flags & ACPI_DEVFLAG_INITPASS)
  469. set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
  470. if (flags & ACPI_DEVFLAG_EXTINT)
  471. set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
  472. if (flags & ACPI_DEVFLAG_NMI)
  473. set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
  474. if (flags & ACPI_DEVFLAG_SYSMGT1)
  475. set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
  476. if (flags & ACPI_DEVFLAG_SYSMGT2)
  477. set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
  478. if (flags & ACPI_DEVFLAG_LINT0)
  479. set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
  480. if (flags & ACPI_DEVFLAG_LINT1)
  481. set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
  482. amd_iommu_apply_erratum_63(devid);
  483. set_iommu_for_device(iommu, devid);
  484. }
  485. /*
  486. * Reads the device exclusion range from ACPI and initialize IOMMU with
  487. * it
  488. */
  489. static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
  490. {
  491. struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
  492. if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
  493. return;
  494. if (iommu) {
  495. /*
  496. * We only can configure exclusion ranges per IOMMU, not
  497. * per device. But we can enable the exclusion range per
  498. * device. This is done here
  499. */
  500. set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
  501. iommu->exclusion_start = m->range_start;
  502. iommu->exclusion_length = m->range_length;
  503. }
  504. }
  505. /*
  506. * This function reads some important data from the IOMMU PCI space and
  507. * initializes the driver data structure with it. It reads the hardware
  508. * capabilities and the first/last device entries
  509. */
  510. static void __init init_iommu_from_pci(struct amd_iommu *iommu)
  511. {
  512. int cap_ptr = iommu->cap_ptr;
  513. u32 range, misc;
  514. pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
  515. &iommu->cap);
  516. pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
  517. &range);
  518. pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
  519. &misc);
  520. iommu->first_device = calc_devid(MMIO_GET_BUS(range),
  521. MMIO_GET_FD(range));
  522. iommu->last_device = calc_devid(MMIO_GET_BUS(range),
  523. MMIO_GET_LD(range));
  524. iommu->evt_msi_num = MMIO_MSI_NUM(misc);
  525. }
  526. /*
  527. * Takes a pointer to an AMD IOMMU entry in the ACPI table and
  528. * initializes the hardware and our data structures with it.
  529. */
  530. static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
  531. struct ivhd_header *h)
  532. {
  533. u8 *p = (u8 *)h;
  534. u8 *end = p, flags = 0;
  535. u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
  536. u32 ext_flags = 0;
  537. bool alias = false;
  538. struct ivhd_entry *e;
  539. /*
  540. * First set the recommended feature enable bits from ACPI
  541. * into the IOMMU control registers
  542. */
  543. h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
  544. iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
  545. iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
  546. h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
  547. iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
  548. iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
  549. h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
  550. iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
  551. iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
  552. h->flags & IVHD_FLAG_ISOC_EN_MASK ?
  553. iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
  554. iommu_feature_disable(iommu, CONTROL_ISOC_EN);
  555. /*
  556. * make IOMMU memory accesses cache coherent
  557. */
  558. iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
  559. /*
  560. * Done. Now parse the device entries
  561. */
  562. p += sizeof(struct ivhd_header);
  563. end += h->length;
  564. while (p < end) {
  565. e = (struct ivhd_entry *)p;
  566. switch (e->type) {
  567. case IVHD_DEV_ALL:
  568. DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
  569. " last device %02x:%02x.%x flags: %02x\n",
  570. PCI_BUS(iommu->first_device),
  571. PCI_SLOT(iommu->first_device),
  572. PCI_FUNC(iommu->first_device),
  573. PCI_BUS(iommu->last_device),
  574. PCI_SLOT(iommu->last_device),
  575. PCI_FUNC(iommu->last_device),
  576. e->flags);
  577. for (dev_i = iommu->first_device;
  578. dev_i <= iommu->last_device; ++dev_i)
  579. set_dev_entry_from_acpi(iommu, dev_i,
  580. e->flags, 0);
  581. break;
  582. case IVHD_DEV_SELECT:
  583. DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
  584. "flags: %02x\n",
  585. PCI_BUS(e->devid),
  586. PCI_SLOT(e->devid),
  587. PCI_FUNC(e->devid),
  588. e->flags);
  589. devid = e->devid;
  590. set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
  591. break;
  592. case IVHD_DEV_SELECT_RANGE_START:
  593. DUMP_printk(" DEV_SELECT_RANGE_START\t "
  594. "devid: %02x:%02x.%x flags: %02x\n",
  595. PCI_BUS(e->devid),
  596. PCI_SLOT(e->devid),
  597. PCI_FUNC(e->devid),
  598. e->flags);
  599. devid_start = e->devid;
  600. flags = e->flags;
  601. ext_flags = 0;
  602. alias = false;
  603. break;
  604. case IVHD_DEV_ALIAS:
  605. DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
  606. "flags: %02x devid_to: %02x:%02x.%x\n",
  607. PCI_BUS(e->devid),
  608. PCI_SLOT(e->devid),
  609. PCI_FUNC(e->devid),
  610. e->flags,
  611. PCI_BUS(e->ext >> 8),
  612. PCI_SLOT(e->ext >> 8),
  613. PCI_FUNC(e->ext >> 8));
  614. devid = e->devid;
  615. devid_to = e->ext >> 8;
  616. set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
  617. set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
  618. amd_iommu_alias_table[devid] = devid_to;
  619. break;
  620. case IVHD_DEV_ALIAS_RANGE:
  621. DUMP_printk(" DEV_ALIAS_RANGE\t\t "
  622. "devid: %02x:%02x.%x flags: %02x "
  623. "devid_to: %02x:%02x.%x\n",
  624. PCI_BUS(e->devid),
  625. PCI_SLOT(e->devid),
  626. PCI_FUNC(e->devid),
  627. e->flags,
  628. PCI_BUS(e->ext >> 8),
  629. PCI_SLOT(e->ext >> 8),
  630. PCI_FUNC(e->ext >> 8));
  631. devid_start = e->devid;
  632. flags = e->flags;
  633. devid_to = e->ext >> 8;
  634. ext_flags = 0;
  635. alias = true;
  636. break;
  637. case IVHD_DEV_EXT_SELECT:
  638. DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
  639. "flags: %02x ext: %08x\n",
  640. PCI_BUS(e->devid),
  641. PCI_SLOT(e->devid),
  642. PCI_FUNC(e->devid),
  643. e->flags, e->ext);
  644. devid = e->devid;
  645. set_dev_entry_from_acpi(iommu, devid, e->flags,
  646. e->ext);
  647. break;
  648. case IVHD_DEV_EXT_SELECT_RANGE:
  649. DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
  650. "%02x:%02x.%x flags: %02x ext: %08x\n",
  651. PCI_BUS(e->devid),
  652. PCI_SLOT(e->devid),
  653. PCI_FUNC(e->devid),
  654. e->flags, e->ext);
  655. devid_start = e->devid;
  656. flags = e->flags;
  657. ext_flags = e->ext;
  658. alias = false;
  659. break;
  660. case IVHD_DEV_RANGE_END:
  661. DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
  662. PCI_BUS(e->devid),
  663. PCI_SLOT(e->devid),
  664. PCI_FUNC(e->devid));
  665. devid = e->devid;
  666. for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
  667. if (alias) {
  668. amd_iommu_alias_table[dev_i] = devid_to;
  669. set_dev_entry_from_acpi(iommu,
  670. devid_to, flags, ext_flags);
  671. }
  672. set_dev_entry_from_acpi(iommu, dev_i,
  673. flags, ext_flags);
  674. }
  675. break;
  676. default:
  677. break;
  678. }
  679. p += ivhd_entry_length(p);
  680. }
  681. }
  682. /* Initializes the device->iommu mapping for the driver */
  683. static int __init init_iommu_devices(struct amd_iommu *iommu)
  684. {
  685. u16 i;
  686. for (i = iommu->first_device; i <= iommu->last_device; ++i)
  687. set_iommu_for_device(iommu, i);
  688. return 0;
  689. }
  690. static void __init free_iommu_one(struct amd_iommu *iommu)
  691. {
  692. free_command_buffer(iommu);
  693. free_event_buffer(iommu);
  694. iommu_unmap_mmio_space(iommu);
  695. }
  696. static void __init free_iommu_all(void)
  697. {
  698. struct amd_iommu *iommu, *next;
  699. for_each_iommu_safe(iommu, next) {
  700. list_del(&iommu->list);
  701. free_iommu_one(iommu);
  702. kfree(iommu);
  703. }
  704. }
  705. /*
  706. * This function clues the initialization function for one IOMMU
  707. * together and also allocates the command buffer and programs the
  708. * hardware. It does NOT enable the IOMMU. This is done afterwards.
  709. */
  710. static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
  711. {
  712. spin_lock_init(&iommu->lock);
  713. /* Add IOMMU to internal data structures */
  714. list_add_tail(&iommu->list, &amd_iommu_list);
  715. iommu->index = amd_iommus_present++;
  716. if (unlikely(iommu->index >= MAX_IOMMUS)) {
  717. WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
  718. return -ENOSYS;
  719. }
  720. /* Index is fine - add IOMMU to the array */
  721. amd_iommus[iommu->index] = iommu;
  722. /*
  723. * Copy data from ACPI table entry to the iommu struct
  724. */
  725. iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
  726. if (!iommu->dev)
  727. return 1;
  728. iommu->cap_ptr = h->cap_ptr;
  729. iommu->pci_seg = h->pci_seg;
  730. iommu->mmio_phys = h->mmio_phys;
  731. iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
  732. if (!iommu->mmio_base)
  733. return -ENOMEM;
  734. iommu->cmd_buf = alloc_command_buffer(iommu);
  735. if (!iommu->cmd_buf)
  736. return -ENOMEM;
  737. iommu->evt_buf = alloc_event_buffer(iommu);
  738. if (!iommu->evt_buf)
  739. return -ENOMEM;
  740. iommu->int_enabled = false;
  741. init_iommu_from_pci(iommu);
  742. init_iommu_from_acpi(iommu, h);
  743. init_iommu_devices(iommu);
  744. if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
  745. amd_iommu_np_cache = true;
  746. return pci_enable_device(iommu->dev);
  747. }
  748. /*
  749. * Iterates over all IOMMU entries in the ACPI table, allocates the
  750. * IOMMU structure and initializes it with init_iommu_one()
  751. */
  752. static int __init init_iommu_all(struct acpi_table_header *table)
  753. {
  754. u8 *p = (u8 *)table, *end = (u8 *)table;
  755. struct ivhd_header *h;
  756. struct amd_iommu *iommu;
  757. int ret;
  758. end += table->length;
  759. p += IVRS_HEADER_LENGTH;
  760. while (p < end) {
  761. h = (struct ivhd_header *)p;
  762. switch (*p) {
  763. case ACPI_IVHD_TYPE:
  764. DUMP_printk("device: %02x:%02x.%01x cap: %04x "
  765. "seg: %d flags: %01x info %04x\n",
  766. PCI_BUS(h->devid), PCI_SLOT(h->devid),
  767. PCI_FUNC(h->devid), h->cap_ptr,
  768. h->pci_seg, h->flags, h->info);
  769. DUMP_printk(" mmio-addr: %016llx\n",
  770. h->mmio_phys);
  771. iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
  772. if (iommu == NULL)
  773. return -ENOMEM;
  774. ret = init_iommu_one(iommu, h);
  775. if (ret)
  776. return ret;
  777. break;
  778. default:
  779. break;
  780. }
  781. p += h->length;
  782. }
  783. WARN_ON(p != end);
  784. return 0;
  785. }
  786. /****************************************************************************
  787. *
  788. * The following functions initialize the MSI interrupts for all IOMMUs
  789. * in the system. Its a bit challenging because there could be multiple
  790. * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
  791. * pci_dev.
  792. *
  793. ****************************************************************************/
  794. static int iommu_setup_msi(struct amd_iommu *iommu)
  795. {
  796. int r;
  797. if (pci_enable_msi(iommu->dev))
  798. return 1;
  799. r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
  800. IRQF_SAMPLE_RANDOM,
  801. "AMD-Vi",
  802. NULL);
  803. if (r) {
  804. pci_disable_msi(iommu->dev);
  805. return 1;
  806. }
  807. iommu->int_enabled = true;
  808. iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
  809. return 0;
  810. }
  811. static int iommu_init_msi(struct amd_iommu *iommu)
  812. {
  813. if (iommu->int_enabled)
  814. return 0;
  815. if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
  816. return iommu_setup_msi(iommu);
  817. return 1;
  818. }
  819. /****************************************************************************
  820. *
  821. * The next functions belong to the third pass of parsing the ACPI
  822. * table. In this last pass the memory mapping requirements are
  823. * gathered (like exclusion and unity mapping reanges).
  824. *
  825. ****************************************************************************/
  826. static void __init free_unity_maps(void)
  827. {
  828. struct unity_map_entry *entry, *next;
  829. list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
  830. list_del(&entry->list);
  831. kfree(entry);
  832. }
  833. }
  834. /* called when we find an exclusion range definition in ACPI */
  835. static int __init init_exclusion_range(struct ivmd_header *m)
  836. {
  837. int i;
  838. switch (m->type) {
  839. case ACPI_IVMD_TYPE:
  840. set_device_exclusion_range(m->devid, m);
  841. break;
  842. case ACPI_IVMD_TYPE_ALL:
  843. for (i = 0; i <= amd_iommu_last_bdf; ++i)
  844. set_device_exclusion_range(i, m);
  845. break;
  846. case ACPI_IVMD_TYPE_RANGE:
  847. for (i = m->devid; i <= m->aux; ++i)
  848. set_device_exclusion_range(i, m);
  849. break;
  850. default:
  851. break;
  852. }
  853. return 0;
  854. }
  855. /* called for unity map ACPI definition */
  856. static int __init init_unity_map_range(struct ivmd_header *m)
  857. {
  858. struct unity_map_entry *e = 0;
  859. char *s;
  860. e = kzalloc(sizeof(*e), GFP_KERNEL);
  861. if (e == NULL)
  862. return -ENOMEM;
  863. switch (m->type) {
  864. default:
  865. kfree(e);
  866. return 0;
  867. case ACPI_IVMD_TYPE:
  868. s = "IVMD_TYPEi\t\t\t";
  869. e->devid_start = e->devid_end = m->devid;
  870. break;
  871. case ACPI_IVMD_TYPE_ALL:
  872. s = "IVMD_TYPE_ALL\t\t";
  873. e->devid_start = 0;
  874. e->devid_end = amd_iommu_last_bdf;
  875. break;
  876. case ACPI_IVMD_TYPE_RANGE:
  877. s = "IVMD_TYPE_RANGE\t\t";
  878. e->devid_start = m->devid;
  879. e->devid_end = m->aux;
  880. break;
  881. }
  882. e->address_start = PAGE_ALIGN(m->range_start);
  883. e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
  884. e->prot = m->flags >> 1;
  885. DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
  886. " range_start: %016llx range_end: %016llx flags: %x\n", s,
  887. PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
  888. PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
  889. PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
  890. e->address_start, e->address_end, m->flags);
  891. list_add_tail(&e->list, &amd_iommu_unity_map);
  892. return 0;
  893. }
  894. /* iterates over all memory definitions we find in the ACPI table */
  895. static int __init init_memory_definitions(struct acpi_table_header *table)
  896. {
  897. u8 *p = (u8 *)table, *end = (u8 *)table;
  898. struct ivmd_header *m;
  899. end += table->length;
  900. p += IVRS_HEADER_LENGTH;
  901. while (p < end) {
  902. m = (struct ivmd_header *)p;
  903. if (m->flags & IVMD_FLAG_EXCL_RANGE)
  904. init_exclusion_range(m);
  905. else if (m->flags & IVMD_FLAG_UNITY_MAP)
  906. init_unity_map_range(m);
  907. p += m->length;
  908. }
  909. return 0;
  910. }
  911. /*
  912. * Init the device table to not allow DMA access for devices and
  913. * suppress all page faults
  914. */
  915. static void init_device_table(void)
  916. {
  917. u16 devid;
  918. for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
  919. set_dev_entry_bit(devid, DEV_ENTRY_VALID);
  920. set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
  921. }
  922. }
  923. /*
  924. * This function finally enables all IOMMUs found in the system after
  925. * they have been initialized
  926. */
  927. static void enable_iommus(void)
  928. {
  929. struct amd_iommu *iommu;
  930. for_each_iommu(iommu) {
  931. iommu_disable(iommu);
  932. iommu_set_device_table(iommu);
  933. iommu_enable_command_buffer(iommu);
  934. iommu_enable_event_buffer(iommu);
  935. iommu_set_exclusion_range(iommu);
  936. iommu_init_msi(iommu);
  937. iommu_enable(iommu);
  938. }
  939. }
  940. static void disable_iommus(void)
  941. {
  942. struct amd_iommu *iommu;
  943. for_each_iommu(iommu)
  944. iommu_disable(iommu);
  945. }
  946. /*
  947. * Suspend/Resume support
  948. * disable suspend until real resume implemented
  949. */
  950. static int amd_iommu_resume(struct sys_device *dev)
  951. {
  952. /* re-load the hardware */
  953. enable_iommus();
  954. /*
  955. * we have to flush after the IOMMUs are enabled because a
  956. * disabled IOMMU will never execute the commands we send
  957. */
  958. amd_iommu_flush_all_devices();
  959. amd_iommu_flush_all_domains();
  960. return 0;
  961. }
  962. static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
  963. {
  964. /* disable IOMMUs to go out of the way for BIOS */
  965. disable_iommus();
  966. return 0;
  967. }
  968. static struct sysdev_class amd_iommu_sysdev_class = {
  969. .name = "amd_iommu",
  970. .suspend = amd_iommu_suspend,
  971. .resume = amd_iommu_resume,
  972. };
  973. static struct sys_device device_amd_iommu = {
  974. .id = 0,
  975. .cls = &amd_iommu_sysdev_class,
  976. };
  977. /*
  978. * This is the core init function for AMD IOMMU hardware in the system.
  979. * This function is called from the generic x86 DMA layer initialization
  980. * code.
  981. *
  982. * This function basically parses the ACPI table for AMD IOMMU (IVRS)
  983. * three times:
  984. *
  985. * 1 pass) Find the highest PCI device id the driver has to handle.
  986. * Upon this information the size of the data structures is
  987. * determined that needs to be allocated.
  988. *
  989. * 2 pass) Initialize the data structures just allocated with the
  990. * information in the ACPI table about available AMD IOMMUs
  991. * in the system. It also maps the PCI devices in the
  992. * system to specific IOMMUs
  993. *
  994. * 3 pass) After the basic data structures are allocated and
  995. * initialized we update them with information about memory
  996. * remapping requirements parsed out of the ACPI table in
  997. * this last pass.
  998. *
  999. * After that the hardware is initialized and ready to go. In the last
  1000. * step we do some Linux specific things like registering the driver in
  1001. * the dma_ops interface and initializing the suspend/resume support
  1002. * functions. Finally it prints some information about AMD IOMMUs and
  1003. * the driver state and enables the hardware.
  1004. */
  1005. static int __init amd_iommu_init(void)
  1006. {
  1007. int i, ret = 0;
  1008. /*
  1009. * First parse ACPI tables to find the largest Bus/Dev/Func
  1010. * we need to handle. Upon this information the shared data
  1011. * structures for the IOMMUs in the system will be allocated
  1012. */
  1013. if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
  1014. return -ENODEV;
  1015. dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
  1016. alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
  1017. rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
  1018. ret = -ENOMEM;
  1019. /* Device table - directly used by all IOMMUs */
  1020. amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  1021. get_order(dev_table_size));
  1022. if (amd_iommu_dev_table == NULL)
  1023. goto out;
  1024. /*
  1025. * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
  1026. * IOMMU see for that device
  1027. */
  1028. amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
  1029. get_order(alias_table_size));
  1030. if (amd_iommu_alias_table == NULL)
  1031. goto free;
  1032. /* IOMMU rlookup table - find the IOMMU for a specific device */
  1033. amd_iommu_rlookup_table = (void *)__get_free_pages(
  1034. GFP_KERNEL | __GFP_ZERO,
  1035. get_order(rlookup_table_size));
  1036. if (amd_iommu_rlookup_table == NULL)
  1037. goto free;
  1038. /*
  1039. * Protection Domain table - maps devices to protection domains
  1040. * This table has the same size as the rlookup_table
  1041. */
  1042. amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  1043. get_order(rlookup_table_size));
  1044. if (amd_iommu_pd_table == NULL)
  1045. goto free;
  1046. amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
  1047. GFP_KERNEL | __GFP_ZERO,
  1048. get_order(MAX_DOMAIN_ID/8));
  1049. if (amd_iommu_pd_alloc_bitmap == NULL)
  1050. goto free;
  1051. /* init the device table */
  1052. init_device_table();
  1053. /*
  1054. * let all alias entries point to itself
  1055. */
  1056. for (i = 0; i <= amd_iommu_last_bdf; ++i)
  1057. amd_iommu_alias_table[i] = i;
  1058. /*
  1059. * never allocate domain 0 because its used as the non-allocated and
  1060. * error value placeholder
  1061. */
  1062. amd_iommu_pd_alloc_bitmap[0] = 1;
  1063. spin_lock_init(&amd_iommu_pd_lock);
  1064. /*
  1065. * now the data structures are allocated and basically initialized
  1066. * start the real acpi table scan
  1067. */
  1068. ret = -ENODEV;
  1069. if (acpi_table_parse("IVRS", init_iommu_all) != 0)
  1070. goto free;
  1071. if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
  1072. goto free;
  1073. ret = sysdev_class_register(&amd_iommu_sysdev_class);
  1074. if (ret)
  1075. goto free;
  1076. ret = sysdev_register(&device_amd_iommu);
  1077. if (ret)
  1078. goto free;
  1079. if (iommu_pass_through)
  1080. ret = amd_iommu_init_passthrough();
  1081. else
  1082. ret = amd_iommu_init_dma_ops();
  1083. if (ret)
  1084. goto free;
  1085. enable_iommus();
  1086. if (iommu_pass_through)
  1087. goto out;
  1088. printk(KERN_INFO "AMD-Vi: device isolation ");
  1089. if (amd_iommu_isolate)
  1090. printk("enabled\n");
  1091. else
  1092. printk("disabled\n");
  1093. if (amd_iommu_unmap_flush)
  1094. printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
  1095. else
  1096. printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
  1097. x86_platform.iommu_shutdown = disable_iommus;
  1098. out:
  1099. return ret;
  1100. free:
  1101. free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
  1102. get_order(MAX_DOMAIN_ID/8));
  1103. free_pages((unsigned long)amd_iommu_pd_table,
  1104. get_order(rlookup_table_size));
  1105. free_pages((unsigned long)amd_iommu_rlookup_table,
  1106. get_order(rlookup_table_size));
  1107. free_pages((unsigned long)amd_iommu_alias_table,
  1108. get_order(alias_table_size));
  1109. free_pages((unsigned long)amd_iommu_dev_table,
  1110. get_order(dev_table_size));
  1111. free_iommu_all();
  1112. free_unity_maps();
  1113. goto out;
  1114. }
  1115. /****************************************************************************
  1116. *
  1117. * Early detect code. This code runs at IOMMU detection time in the DMA
  1118. * layer. It just looks if there is an IVRS ACPI table to detect AMD
  1119. * IOMMUs
  1120. *
  1121. ****************************************************************************/
  1122. static int __init early_amd_iommu_detect(struct acpi_table_header *table)
  1123. {
  1124. return 0;
  1125. }
  1126. void __init amd_iommu_detect(void)
  1127. {
  1128. if (no_iommu || (iommu_detected && !gart_iommu_aperture))
  1129. return;
  1130. if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
  1131. iommu_detected = 1;
  1132. amd_iommu_detected = 1;
  1133. x86_init.iommu.iommu_init = amd_iommu_init;
  1134. }
  1135. }
  1136. /****************************************************************************
  1137. *
  1138. * Parsing functions for the AMD IOMMU specific kernel command line
  1139. * options.
  1140. *
  1141. ****************************************************************************/
  1142. static int __init parse_amd_iommu_dump(char *str)
  1143. {
  1144. amd_iommu_dump = true;
  1145. return 1;
  1146. }
  1147. static int __init parse_amd_iommu_options(char *str)
  1148. {
  1149. for (; *str; ++str) {
  1150. if (strncmp(str, "isolate", 7) == 0)
  1151. amd_iommu_isolate = true;
  1152. if (strncmp(str, "share", 5) == 0)
  1153. amd_iommu_isolate = false;
  1154. if (strncmp(str, "fullflush", 9) == 0)
  1155. amd_iommu_unmap_flush = true;
  1156. }
  1157. return 1;
  1158. }
  1159. __setup("amd_iommu_dump", parse_amd_iommu_dump);
  1160. __setup("amd_iommu=", parse_amd_iommu_options);