dmar.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Author: Ashok Raj <ashok.raj@intel.com>
  19. * Author: Shaohua Li <shaohua.li@intel.com>
  20. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  21. *
  22. * This file implements early detection/parsing of Remapping Devices
  23. * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
  24. * tables.
  25. *
  26. * These routines are used by both DMA-remapping and Interrupt-remapping
  27. */
  28. #include <linux/pci.h>
  29. #include <linux/dmar.h>
  30. #include "iova.h"
  31. #include "intel-iommu.h"
  32. #undef PREFIX
  33. #define PREFIX "DMAR:"
  34. /* No locks are needed as DMA remapping hardware unit
  35. * list is constructed at boot time and hotplug of
  36. * these units are not supported by the architecture.
  37. */
  38. LIST_HEAD(dmar_drhd_units);
  39. LIST_HEAD(dmar_rmrr_units);
  40. static struct acpi_table_header * __initdata dmar_tbl;
  41. static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
  42. {
  43. /*
  44. * add INCLUDE_ALL at the tail, so scan the list will find it at
  45. * the very end.
  46. */
  47. if (drhd->include_all)
  48. list_add_tail(&drhd->list, &dmar_drhd_units);
  49. else
  50. list_add(&drhd->list, &dmar_drhd_units);
  51. }
  52. static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
  53. {
  54. list_add(&rmrr->list, &dmar_rmrr_units);
  55. }
  56. static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
  57. struct pci_dev **dev, u16 segment)
  58. {
  59. struct pci_bus *bus;
  60. struct pci_dev *pdev = NULL;
  61. struct acpi_dmar_pci_path *path;
  62. int count;
  63. bus = pci_find_bus(segment, scope->bus);
  64. path = (struct acpi_dmar_pci_path *)(scope + 1);
  65. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  66. / sizeof(struct acpi_dmar_pci_path);
  67. while (count) {
  68. if (pdev)
  69. pci_dev_put(pdev);
  70. /*
  71. * Some BIOSes list non-exist devices in DMAR table, just
  72. * ignore it
  73. */
  74. if (!bus) {
  75. printk(KERN_WARNING
  76. PREFIX "Device scope bus [%d] not found\n",
  77. scope->bus);
  78. break;
  79. }
  80. pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
  81. if (!pdev) {
  82. printk(KERN_WARNING PREFIX
  83. "Device scope device [%04x:%02x:%02x.%02x] not found\n",
  84. segment, bus->number, path->dev, path->fn);
  85. break;
  86. }
  87. path ++;
  88. count --;
  89. bus = pdev->subordinate;
  90. }
  91. if (!pdev) {
  92. printk(KERN_WARNING PREFIX
  93. "Device scope device [%04x:%02x:%02x.%02x] not found\n",
  94. segment, scope->bus, path->dev, path->fn);
  95. *dev = NULL;
  96. return 0;
  97. }
  98. if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
  99. pdev->subordinate) || (scope->entry_type == \
  100. ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
  101. pci_dev_put(pdev);
  102. printk(KERN_WARNING PREFIX
  103. "Device scope type does not match for %s\n",
  104. pci_name(pdev));
  105. return -EINVAL;
  106. }
  107. *dev = pdev;
  108. return 0;
  109. }
  110. static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
  111. struct pci_dev ***devices, u16 segment)
  112. {
  113. struct acpi_dmar_device_scope *scope;
  114. void * tmp = start;
  115. int index;
  116. int ret;
  117. *cnt = 0;
  118. while (start < end) {
  119. scope = start;
  120. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
  121. scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
  122. (*cnt)++;
  123. else
  124. printk(KERN_WARNING PREFIX
  125. "Unsupported device scope\n");
  126. start += scope->length;
  127. }
  128. if (*cnt == 0)
  129. return 0;
  130. *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
  131. if (!*devices)
  132. return -ENOMEM;
  133. start = tmp;
  134. index = 0;
  135. while (start < end) {
  136. scope = start;
  137. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
  138. scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
  139. ret = dmar_parse_one_dev_scope(scope,
  140. &(*devices)[index], segment);
  141. if (ret) {
  142. kfree(*devices);
  143. return ret;
  144. }
  145. index ++;
  146. }
  147. start += scope->length;
  148. }
  149. return 0;
  150. }
  151. /**
  152. * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
  153. * structure which uniquely represent one DMA remapping hardware unit
  154. * present in the platform
  155. */
  156. static int __init
  157. dmar_parse_one_drhd(struct acpi_dmar_header *header)
  158. {
  159. struct acpi_dmar_hardware_unit *drhd;
  160. struct dmar_drhd_unit *dmaru;
  161. int ret = 0;
  162. static int include_all;
  163. dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
  164. if (!dmaru)
  165. return -ENOMEM;
  166. drhd = (struct acpi_dmar_hardware_unit *)header;
  167. dmaru->reg_base_addr = drhd->address;
  168. dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
  169. if (!dmaru->include_all)
  170. ret = dmar_parse_dev_scope((void *)(drhd + 1),
  171. ((void *)drhd) + header->length,
  172. &dmaru->devices_cnt, &dmaru->devices,
  173. drhd->segment);
  174. else {
  175. /* Only allow one INCLUDE_ALL */
  176. if (include_all) {
  177. printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
  178. "device scope is allowed\n");
  179. ret = -EINVAL;
  180. }
  181. include_all = 1;
  182. }
  183. if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all))
  184. kfree(dmaru);
  185. else
  186. dmar_register_drhd_unit(dmaru);
  187. return ret;
  188. }
  189. static int __init
  190. dmar_parse_one_rmrr(struct acpi_dmar_header *header)
  191. {
  192. struct acpi_dmar_reserved_memory *rmrr;
  193. struct dmar_rmrr_unit *rmrru;
  194. int ret = 0;
  195. rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
  196. if (!rmrru)
  197. return -ENOMEM;
  198. rmrr = (struct acpi_dmar_reserved_memory *)header;
  199. rmrru->base_address = rmrr->base_address;
  200. rmrru->end_address = rmrr->end_address;
  201. ret = dmar_parse_dev_scope((void *)(rmrr + 1),
  202. ((void *)rmrr) + header->length,
  203. &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
  204. if (ret || (rmrru->devices_cnt == 0))
  205. kfree(rmrru);
  206. else
  207. dmar_register_rmrr_unit(rmrru);
  208. return ret;
  209. }
  210. static void __init
  211. dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
  212. {
  213. struct acpi_dmar_hardware_unit *drhd;
  214. struct acpi_dmar_reserved_memory *rmrr;
  215. switch (header->type) {
  216. case ACPI_DMAR_TYPE_HARDWARE_UNIT:
  217. drhd = (struct acpi_dmar_hardware_unit *)header;
  218. printk (KERN_INFO PREFIX
  219. "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
  220. drhd->flags, drhd->address);
  221. break;
  222. case ACPI_DMAR_TYPE_RESERVED_MEMORY:
  223. rmrr = (struct acpi_dmar_reserved_memory *)header;
  224. printk (KERN_INFO PREFIX
  225. "RMRR base: 0x%016Lx end: 0x%016Lx\n",
  226. rmrr->base_address, rmrr->end_address);
  227. break;
  228. }
  229. }
  230. /**
  231. * parse_dmar_table - parses the DMA reporting table
  232. */
  233. static int __init
  234. parse_dmar_table(void)
  235. {
  236. struct acpi_table_dmar *dmar;
  237. struct acpi_dmar_header *entry_header;
  238. int ret = 0;
  239. dmar = (struct acpi_table_dmar *)dmar_tbl;
  240. if (!dmar)
  241. return -ENODEV;
  242. if (dmar->width < PAGE_SHIFT_4K - 1) {
  243. printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
  244. return -EINVAL;
  245. }
  246. printk (KERN_INFO PREFIX "Host address width %d\n",
  247. dmar->width + 1);
  248. entry_header = (struct acpi_dmar_header *)(dmar + 1);
  249. while (((unsigned long)entry_header) <
  250. (((unsigned long)dmar) + dmar_tbl->length)) {
  251. dmar_table_print_dmar_entry(entry_header);
  252. switch (entry_header->type) {
  253. case ACPI_DMAR_TYPE_HARDWARE_UNIT:
  254. ret = dmar_parse_one_drhd(entry_header);
  255. break;
  256. case ACPI_DMAR_TYPE_RESERVED_MEMORY:
  257. ret = dmar_parse_one_rmrr(entry_header);
  258. break;
  259. default:
  260. printk(KERN_WARNING PREFIX
  261. "Unknown DMAR structure type\n");
  262. ret = 0; /* for forward compatibility */
  263. break;
  264. }
  265. if (ret)
  266. break;
  267. entry_header = ((void *)entry_header + entry_header->length);
  268. }
  269. return ret;
  270. }
  271. int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
  272. struct pci_dev *dev)
  273. {
  274. int index;
  275. while (dev) {
  276. for (index = 0; index < cnt; index++)
  277. if (dev == devices[index])
  278. return 1;
  279. /* Check our parent */
  280. dev = dev->bus->self;
  281. }
  282. return 0;
  283. }
  284. struct dmar_drhd_unit *
  285. dmar_find_matched_drhd_unit(struct pci_dev *dev)
  286. {
  287. struct dmar_drhd_unit *drhd = NULL;
  288. list_for_each_entry(drhd, &dmar_drhd_units, list) {
  289. if (drhd->include_all || dmar_pci_device_match(drhd->devices,
  290. drhd->devices_cnt, dev))
  291. return drhd;
  292. }
  293. return NULL;
  294. }
  295. int __init dmar_table_init(void)
  296. {
  297. int ret;
  298. ret = parse_dmar_table();
  299. if (ret) {
  300. printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
  301. return ret;
  302. }
  303. if (list_empty(&dmar_drhd_units)) {
  304. printk(KERN_INFO PREFIX "No DMAR devices found\n");
  305. return -ENODEV;
  306. }
  307. if (list_empty(&dmar_rmrr_units)) {
  308. printk(KERN_INFO PREFIX "No RMRR found\n");
  309. return -ENODEV;
  310. }
  311. return 0;
  312. }
  313. /**
  314. * early_dmar_detect - checks to see if the platform supports DMAR devices
  315. */
  316. int __init early_dmar_detect(void)
  317. {
  318. acpi_status status = AE_OK;
  319. /* if we could find DMAR table, then there are DMAR devices */
  320. status = acpi_get_table(ACPI_SIG_DMAR, 0,
  321. (struct acpi_table_header **)&dmar_tbl);
  322. if (ACPI_SUCCESS(status) && !dmar_tbl) {
  323. printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
  324. status = AE_NOT_FOUND;
  325. }
  326. return (ACPI_SUCCESS(status) ? 1 : 0);
  327. }
  328. struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
  329. struct dmar_drhd_unit *drhd)
  330. {
  331. int map_size;
  332. u32 ver;
  333. iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
  334. if (!iommu->reg) {
  335. printk(KERN_ERR "IOMMU: can't map the region\n");
  336. goto error;
  337. }
  338. iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
  339. iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
  340. /* the registers might be more than one page */
  341. map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
  342. cap_max_fault_reg_offset(iommu->cap));
  343. map_size = PAGE_ALIGN_4K(map_size);
  344. if (map_size > PAGE_SIZE_4K) {
  345. iounmap(iommu->reg);
  346. iommu->reg = ioremap(drhd->reg_base_addr, map_size);
  347. if (!iommu->reg) {
  348. printk(KERN_ERR "IOMMU: can't map the region\n");
  349. goto error;
  350. }
  351. }
  352. ver = readl(iommu->reg + DMAR_VER_REG);
  353. pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
  354. drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
  355. iommu->cap, iommu->ecap);
  356. spin_lock_init(&iommu->register_lock);
  357. drhd->iommu = iommu;
  358. return iommu;
  359. error:
  360. kfree(iommu);
  361. return NULL;
  362. }
  363. void free_iommu(struct intel_iommu *iommu)
  364. {
  365. if (!iommu)
  366. return;
  367. #ifdef CONFIG_DMAR
  368. free_dmar_iommu(iommu);
  369. #endif
  370. if (iommu->reg)
  371. iounmap(iommu->reg);
  372. kfree(iommu);
  373. }