intel-iommu.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) Ashok Raj <ashok.raj@intel.com>
  18. * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  19. */
  20. #ifndef _INTEL_IOMMU_H_
  21. #define _INTEL_IOMMU_H_
  22. #include <linux/types.h>
  23. #include <linux/msi.h>
  24. #include "iova.h"
  25. #include <linux/io.h>
  26. /*
  27. * Intel IOMMU register specification per version 1.0 public spec.
  28. */
  29. #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
  30. #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
  31. #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
  32. #define DMAR_GCMD_REG 0x18 /* Global command register */
  33. #define DMAR_GSTS_REG 0x1c /* Global status register */
  34. #define DMAR_RTADDR_REG 0x20 /* Root entry table */
  35. #define DMAR_CCMD_REG 0x28 /* Context command reg */
  36. #define DMAR_FSTS_REG 0x34 /* Fault Status register */
  37. #define DMAR_FECTL_REG 0x38 /* Fault control register */
  38. #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
  39. #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
  40. #define DMAR_FEUADDR_REG 0x44 /* Upper address register */
  41. #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
  42. #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
  43. #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
  44. #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
  45. #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
  46. #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
  47. #define OFFSET_STRIDE (9)
  48. /*
  49. #define dmar_readl(dmar, reg) readl(dmar + reg)
  50. #define dmar_readq(dmar, reg) ({ \
  51. u32 lo, hi; \
  52. lo = readl(dmar + reg); \
  53. hi = readl(dmar + reg + 4); \
  54. (((u64) hi) << 32) + lo; })
  55. */
  56. static inline u64 dmar_readq(void __iomem *addr)
  57. {
  58. u32 lo, hi;
  59. lo = readl(addr);
  60. hi = readl(addr + 4);
  61. return (((u64) hi) << 32) + lo;
  62. }
  63. static inline void dmar_writeq(void __iomem *addr, u64 val)
  64. {
  65. writel((u32)val, addr);
  66. writel((u32)(val >> 32), addr + 4);
  67. }
  68. #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
  69. #define DMAR_VER_MINOR(v) ((v) & 0x0f)
  70. /*
  71. * Decoding Capability Register
  72. */
  73. #define cap_read_drain(c) (((c) >> 55) & 1)
  74. #define cap_write_drain(c) (((c) >> 54) & 1)
  75. #define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
  76. #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
  77. #define cap_pgsel_inv(c) (((c) >> 39) & 1)
  78. #define cap_super_page_val(c) (((c) >> 34) & 0xf)
  79. #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
  80. * OFFSET_STRIDE) + 21)
  81. #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
  82. #define cap_max_fault_reg_offset(c) \
  83. (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
  84. #define cap_zlr(c) (((c) >> 22) & 1)
  85. #define cap_isoch(c) (((c) >> 23) & 1)
  86. #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
  87. #define cap_sagaw(c) (((c) >> 8) & 0x1f)
  88. #define cap_caching_mode(c) (((c) >> 7) & 1)
  89. #define cap_phmr(c) (((c) >> 6) & 1)
  90. #define cap_plmr(c) (((c) >> 5) & 1)
  91. #define cap_rwbf(c) (((c) >> 4) & 1)
  92. #define cap_afl(c) (((c) >> 3) & 1)
  93. #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
  94. /*
  95. * Extended Capability Register
  96. */
  97. #define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
  98. #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
  99. #define ecap_max_iotlb_offset(e) \
  100. (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
  101. #define ecap_coherent(e) ((e) & 0x1)
  102. /* IOTLB_REG */
  103. #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
  104. #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
  105. #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
  106. #define DMA_TLB_IIRG(type) ((type >> 60) & 7)
  107. #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
  108. #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
  109. #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
  110. #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
  111. #define DMA_TLB_IVT (((u64)1) << 63)
  112. #define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
  113. #define DMA_TLB_MAX_SIZE (0x3f)
  114. /* GCMD_REG */
  115. #define DMA_GCMD_TE (((u32)1) << 31)
  116. #define DMA_GCMD_SRTP (((u32)1) << 30)
  117. #define DMA_GCMD_SFL (((u32)1) << 29)
  118. #define DMA_GCMD_EAFL (((u32)1) << 28)
  119. #define DMA_GCMD_WBF (((u32)1) << 27)
  120. /* GSTS_REG */
  121. #define DMA_GSTS_TES (((u32)1) << 31)
  122. #define DMA_GSTS_RTPS (((u32)1) << 30)
  123. #define DMA_GSTS_FLS (((u32)1) << 29)
  124. #define DMA_GSTS_AFLS (((u32)1) << 28)
  125. #define DMA_GSTS_WBFS (((u32)1) << 27)
  126. /* CCMD_REG */
  127. #define DMA_CCMD_ICC (((u64)1) << 63)
  128. #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
  129. #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
  130. #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
  131. #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
  132. #define DMA_CCMD_MASK_NOBIT 0
  133. #define DMA_CCMD_MASK_1BIT 1
  134. #define DMA_CCMD_MASK_2BIT 2
  135. #define DMA_CCMD_MASK_3BIT 3
  136. #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
  137. #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
  138. /* FECTL_REG */
  139. #define DMA_FECTL_IM (((u32)1) << 31)
  140. /* FSTS_REG */
  141. #define DMA_FSTS_PPF ((u32)2)
  142. #define DMA_FSTS_PFO ((u32)1)
  143. #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
  144. /* FRCD_REG, 32 bits access */
  145. #define DMA_FRCD_F (((u32)1) << 31)
  146. #define dma_frcd_type(d) ((d >> 30) & 1)
  147. #define dma_frcd_fault_reason(c) (c & 0xff)
  148. #define dma_frcd_source_id(c) (c & 0xffff)
  149. #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
  150. /*
  151. * 0: Present
  152. * 1-11: Reserved
  153. * 12-63: Context Ptr (12 - (haw-1))
  154. * 64-127: Reserved
  155. */
  156. struct root_entry {
  157. u64 val;
  158. u64 rsvd1;
  159. };
  160. #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
  161. static inline bool root_present(struct root_entry *root)
  162. {
  163. return (root->val & 1);
  164. }
  165. static inline void set_root_present(struct root_entry *root)
  166. {
  167. root->val |= 1;
  168. }
  169. static inline void set_root_value(struct root_entry *root, unsigned long value)
  170. {
  171. root->val |= value & PAGE_MASK_4K;
  172. }
  173. struct context_entry;
  174. static inline struct context_entry *
  175. get_context_addr_from_root(struct root_entry *root)
  176. {
  177. return (struct context_entry *)
  178. (root_present(root)?phys_to_virt(
  179. root->val & PAGE_MASK_4K):
  180. NULL);
  181. }
  182. /*
  183. * low 64 bits:
  184. * 0: present
  185. * 1: fault processing disable
  186. * 2-3: translation type
  187. * 12-63: address space root
  188. * high 64 bits:
  189. * 0-2: address width
  190. * 3-6: aval
  191. * 8-23: domain id
  192. */
  193. struct context_entry {
  194. u64 lo;
  195. u64 hi;
  196. };
  197. #define context_present(c) ((c).lo & 1)
  198. #define context_fault_disable(c) (((c).lo >> 1) & 1)
  199. #define context_translation_type(c) (((c).lo >> 2) & 3)
  200. #define context_address_root(c) ((c).lo & PAGE_MASK_4K)
  201. #define context_address_width(c) ((c).hi & 7)
  202. #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
  203. #define context_set_present(c) do {(c).lo |= 1;} while (0)
  204. #define context_set_fault_enable(c) \
  205. do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
  206. #define context_set_translation_type(c, val) \
  207. do { \
  208. (c).lo &= (((u64)-1) << 4) | 3; \
  209. (c).lo |= ((val) & 3) << 2; \
  210. } while (0)
  211. #define CONTEXT_TT_MULTI_LEVEL 0
  212. #define context_set_address_root(c, val) \
  213. do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
  214. #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
  215. #define context_set_domain_id(c, val) \
  216. do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
  217. #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
  218. /*
  219. * 0: readable
  220. * 1: writable
  221. * 2-6: reserved
  222. * 7: super page
  223. * 8-11: available
  224. * 12-63: Host physcial address
  225. */
  226. struct dma_pte {
  227. u64 val;
  228. };
  229. #define dma_clear_pte(p) do {(p).val = 0;} while (0)
  230. #define DMA_PTE_READ (1)
  231. #define DMA_PTE_WRITE (2)
  232. #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
  233. #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
  234. #define dma_set_pte_prot(p, prot) \
  235. do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
  236. #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
  237. #define dma_set_pte_addr(p, addr) do {\
  238. (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
  239. #define dma_pte_present(p) (((p).val & 3) != 0)
  240. struct intel_iommu;
  241. struct dmar_domain {
  242. int id; /* domain id */
  243. struct intel_iommu *iommu; /* back pointer to owning iommu */
  244. struct list_head devices; /* all devices' list */
  245. struct iova_domain iovad; /* iova's that belong to this domain */
  246. struct dma_pte *pgd; /* virtual address */
  247. spinlock_t mapping_lock; /* page table lock */
  248. int gaw; /* max guest address width */
  249. /* adjusted guest address width, 0 is level 2 30-bit */
  250. int agaw;
  251. #define DOMAIN_FLAG_MULTIPLE_DEVICES 1
  252. int flags;
  253. };
  254. /* PCI domain-device relationship */
  255. struct device_domain_info {
  256. struct list_head link; /* link to domain siblings */
  257. struct list_head global; /* link to global list */
  258. u8 bus; /* PCI bus numer */
  259. u8 devfn; /* PCI devfn number */
  260. struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
  261. struct dmar_domain *domain; /* pointer to domain */
  262. };
  263. extern int init_dmars(void);
  264. struct intel_iommu {
  265. void __iomem *reg; /* Pointer to hardware regs, virtual addr */
  266. u64 cap;
  267. u64 ecap;
  268. unsigned long *domain_ids; /* bitmap of domains */
  269. struct dmar_domain **domains; /* ptr to domains */
  270. int seg;
  271. u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
  272. spinlock_t lock; /* protect context, domain ids */
  273. spinlock_t register_lock; /* protect register handling */
  274. struct root_entry *root_entry; /* virtual address */
  275. unsigned int irq;
  276. unsigned char name[7]; /* Device Name */
  277. struct msi_msg saved_msg;
  278. struct sys_device sysdev;
  279. };
  280. #ifndef CONFIG_DMAR_GFX_WA
  281. static inline void iommu_prepare_gfx_mapping(void)
  282. {
  283. return;
  284. }
  285. #endif /* !CONFIG_DMAR_GFX_WA */
  286. #endif