intel-iommu.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Author: Ashok Raj <ashok.raj@intel.com>
  19. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  20. */
  21. #ifndef _INTEL_IOMMU_H_
  22. #define _INTEL_IOMMU_H_
  23. #include <linux/types.h>
  24. #include <linux/msi.h>
  25. #include <linux/sysdev.h>
  26. #include "iova.h"
  27. #include <linux/io.h>
  28. /*
  29. * We need a fixed PAGE_SIZE of 4K irrespective of
  30. * arch PAGE_SIZE for IOMMU page tables.
  31. */
  32. #define PAGE_SHIFT_4K (12)
  33. #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
  34. #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
  35. #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
  36. #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
  37. #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
  38. #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
  39. /*
  40. * Intel IOMMU register specification per version 1.0 public spec.
  41. */
  42. #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
  43. #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
  44. #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
  45. #define DMAR_GCMD_REG 0x18 /* Global command register */
  46. #define DMAR_GSTS_REG 0x1c /* Global status register */
  47. #define DMAR_RTADDR_REG 0x20 /* Root entry table */
  48. #define DMAR_CCMD_REG 0x28 /* Context command reg */
  49. #define DMAR_FSTS_REG 0x34 /* Fault Status register */
  50. #define DMAR_FECTL_REG 0x38 /* Fault control register */
  51. #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
  52. #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
  53. #define DMAR_FEUADDR_REG 0x44 /* Upper address register */
  54. #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
  55. #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
  56. #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
  57. #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
  58. #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
  59. #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
  60. #define OFFSET_STRIDE (9)
  61. /*
  62. #define dmar_readl(dmar, reg) readl(dmar + reg)
  63. #define dmar_readq(dmar, reg) ({ \
  64. u32 lo, hi; \
  65. lo = readl(dmar + reg); \
  66. hi = readl(dmar + reg + 4); \
  67. (((u64) hi) << 32) + lo; })
  68. */
  69. static inline u64 dmar_readq(void __iomem *addr)
  70. {
  71. u32 lo, hi;
  72. lo = readl(addr);
  73. hi = readl(addr + 4);
  74. return (((u64) hi) << 32) + lo;
  75. }
  76. static inline void dmar_writeq(void __iomem *addr, u64 val)
  77. {
  78. writel((u32)val, addr);
  79. writel((u32)(val >> 32), addr + 4);
  80. }
  81. #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
  82. #define DMAR_VER_MINOR(v) ((v) & 0x0f)
  83. /*
  84. * Decoding Capability Register
  85. */
  86. #define cap_read_drain(c) (((c) >> 55) & 1)
  87. #define cap_write_drain(c) (((c) >> 54) & 1)
  88. #define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
  89. #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
  90. #define cap_pgsel_inv(c) (((c) >> 39) & 1)
  91. #define cap_super_page_val(c) (((c) >> 34) & 0xf)
  92. #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
  93. * OFFSET_STRIDE) + 21)
  94. #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
  95. #define cap_max_fault_reg_offset(c) \
  96. (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
  97. #define cap_zlr(c) (((c) >> 22) & 1)
  98. #define cap_isoch(c) (((c) >> 23) & 1)
  99. #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
  100. #define cap_sagaw(c) (((c) >> 8) & 0x1f)
  101. #define cap_caching_mode(c) (((c) >> 7) & 1)
  102. #define cap_phmr(c) (((c) >> 6) & 1)
  103. #define cap_plmr(c) (((c) >> 5) & 1)
  104. #define cap_rwbf(c) (((c) >> 4) & 1)
  105. #define cap_afl(c) (((c) >> 3) & 1)
  106. #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
  107. /*
  108. * Extended Capability Register
  109. */
  110. #define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
  111. #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
  112. #define ecap_max_iotlb_offset(e) \
  113. (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
  114. #define ecap_coherent(e) ((e) & 0x1)
  115. /* IOTLB_REG */
  116. #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
  117. #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
  118. #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
  119. #define DMA_TLB_IIRG(type) ((type >> 60) & 7)
  120. #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
  121. #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
  122. #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
  123. #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
  124. #define DMA_TLB_IVT (((u64)1) << 63)
  125. #define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
  126. #define DMA_TLB_MAX_SIZE (0x3f)
  127. /* PMEN_REG */
  128. #define DMA_PMEN_EPM (((u32)1)<<31)
  129. #define DMA_PMEN_PRS (((u32)1)<<0)
  130. /* GCMD_REG */
  131. #define DMA_GCMD_TE (((u32)1) << 31)
  132. #define DMA_GCMD_SRTP (((u32)1) << 30)
  133. #define DMA_GCMD_SFL (((u32)1) << 29)
  134. #define DMA_GCMD_EAFL (((u32)1) << 28)
  135. #define DMA_GCMD_WBF (((u32)1) << 27)
  136. /* GSTS_REG */
  137. #define DMA_GSTS_TES (((u32)1) << 31)
  138. #define DMA_GSTS_RTPS (((u32)1) << 30)
  139. #define DMA_GSTS_FLS (((u32)1) << 29)
  140. #define DMA_GSTS_AFLS (((u32)1) << 28)
  141. #define DMA_GSTS_WBFS (((u32)1) << 27)
  142. /* CCMD_REG */
  143. #define DMA_CCMD_ICC (((u64)1) << 63)
  144. #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
  145. #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
  146. #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
  147. #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
  148. #define DMA_CCMD_MASK_NOBIT 0
  149. #define DMA_CCMD_MASK_1BIT 1
  150. #define DMA_CCMD_MASK_2BIT 2
  151. #define DMA_CCMD_MASK_3BIT 3
  152. #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
  153. #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
  154. /* FECTL_REG */
  155. #define DMA_FECTL_IM (((u32)1) << 31)
  156. /* FSTS_REG */
  157. #define DMA_FSTS_PPF ((u32)2)
  158. #define DMA_FSTS_PFO ((u32)1)
  159. #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
  160. /* FRCD_REG, 32 bits access */
  161. #define DMA_FRCD_F (((u32)1) << 31)
  162. #define dma_frcd_type(d) ((d >> 30) & 1)
  163. #define dma_frcd_fault_reason(c) (c & 0xff)
  164. #define dma_frcd_source_id(c) (c & 0xffff)
  165. #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
  166. /*
  167. * 0: Present
  168. * 1-11: Reserved
  169. * 12-63: Context Ptr (12 - (haw-1))
  170. * 64-127: Reserved
  171. */
  172. struct root_entry {
  173. u64 val;
  174. u64 rsvd1;
  175. };
  176. #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
  177. static inline bool root_present(struct root_entry *root)
  178. {
  179. return (root->val & 1);
  180. }
  181. static inline void set_root_present(struct root_entry *root)
  182. {
  183. root->val |= 1;
  184. }
  185. static inline void set_root_value(struct root_entry *root, unsigned long value)
  186. {
  187. root->val |= value & PAGE_MASK_4K;
  188. }
  189. struct context_entry;
  190. static inline struct context_entry *
  191. get_context_addr_from_root(struct root_entry *root)
  192. {
  193. return (struct context_entry *)
  194. (root_present(root)?phys_to_virt(
  195. root->val & PAGE_MASK_4K):
  196. NULL);
  197. }
  198. /*
  199. * low 64 bits:
  200. * 0: present
  201. * 1: fault processing disable
  202. * 2-3: translation type
  203. * 12-63: address space root
  204. * high 64 bits:
  205. * 0-2: address width
  206. * 3-6: aval
  207. * 8-23: domain id
  208. */
  209. struct context_entry {
  210. u64 lo;
  211. u64 hi;
  212. };
  213. #define context_present(c) ((c).lo & 1)
  214. #define context_fault_disable(c) (((c).lo >> 1) & 1)
  215. #define context_translation_type(c) (((c).lo >> 2) & 3)
  216. #define context_address_root(c) ((c).lo & PAGE_MASK_4K)
  217. #define context_address_width(c) ((c).hi & 7)
  218. #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
  219. #define context_set_present(c) do {(c).lo |= 1;} while (0)
  220. #define context_set_fault_enable(c) \
  221. do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
  222. #define context_set_translation_type(c, val) \
  223. do { \
  224. (c).lo &= (((u64)-1) << 4) | 3; \
  225. (c).lo |= ((val) & 3) << 2; \
  226. } while (0)
  227. #define CONTEXT_TT_MULTI_LEVEL 0
  228. #define context_set_address_root(c, val) \
  229. do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
  230. #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
  231. #define context_set_domain_id(c, val) \
  232. do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
  233. #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
  234. /*
  235. * 0: readable
  236. * 1: writable
  237. * 2-6: reserved
  238. * 7: super page
  239. * 8-11: available
  240. * 12-63: Host physcial address
  241. */
  242. struct dma_pte {
  243. u64 val;
  244. };
  245. #define dma_clear_pte(p) do {(p).val = 0;} while (0)
  246. #define DMA_PTE_READ (1)
  247. #define DMA_PTE_WRITE (2)
  248. #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
  249. #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
  250. #define dma_set_pte_prot(p, prot) \
  251. do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
  252. #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
  253. #define dma_set_pte_addr(p, addr) do {\
  254. (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
  255. #define dma_pte_present(p) (((p).val & 3) != 0)
  256. struct intel_iommu;
  257. struct dmar_domain {
  258. int id; /* domain id */
  259. struct intel_iommu *iommu; /* back pointer to owning iommu */
  260. struct list_head devices; /* all devices' list */
  261. struct iova_domain iovad; /* iova's that belong to this domain */
  262. struct dma_pte *pgd; /* virtual address */
  263. spinlock_t mapping_lock; /* page table lock */
  264. int gaw; /* max guest address width */
  265. /* adjusted guest address width, 0 is level 2 30-bit */
  266. int agaw;
  267. #define DOMAIN_FLAG_MULTIPLE_DEVICES 1
  268. int flags;
  269. };
  270. /* PCI domain-device relationship */
  271. struct device_domain_info {
  272. struct list_head link; /* link to domain siblings */
  273. struct list_head global; /* link to global list */
  274. u8 bus; /* PCI bus numer */
  275. u8 devfn; /* PCI devfn number */
  276. struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
  277. struct dmar_domain *domain; /* pointer to domain */
  278. };
  279. extern int init_dmars(void);
  280. struct intel_iommu {
  281. void __iomem *reg; /* Pointer to hardware regs, virtual addr */
  282. u64 cap;
  283. u64 ecap;
  284. unsigned long *domain_ids; /* bitmap of domains */
  285. struct dmar_domain **domains; /* ptr to domains */
  286. int seg;
  287. u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
  288. spinlock_t lock; /* protect context, domain ids */
  289. spinlock_t register_lock; /* protect register handling */
  290. struct root_entry *root_entry; /* virtual address */
  291. unsigned int irq;
  292. unsigned char name[7]; /* Device Name */
  293. struct msi_msg saved_msg;
  294. struct sys_device sysdev;
  295. };
  296. #ifndef CONFIG_DMAR_GFX_WA
  297. static inline void iommu_prepare_gfx_mapping(void)
  298. {
  299. return;
  300. }
  301. #endif /* !CONFIG_DMAR_GFX_WA */
  302. #endif