enlighten.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. #include <xen/xen.h>
  2. #include <xen/events.h>
  3. #include <xen/grant_table.h>
  4. #include <xen/hvm.h>
  5. #include <xen/interface/xen.h>
  6. #include <xen/interface/memory.h>
  7. #include <xen/interface/hvm/params.h>
  8. #include <xen/features.h>
  9. #include <xen/platform_pci.h>
  10. #include <xen/xenbus.h>
  11. #include <xen/page.h>
  12. #include <xen/xen-ops.h>
  13. #include <asm/xen/hypervisor.h>
  14. #include <asm/xen/hypercall.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irqreturn.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/of_address.h>
  21. #include <linux/mm.h>
  22. struct start_info _xen_start_info;
  23. struct start_info *xen_start_info = &_xen_start_info;
  24. EXPORT_SYMBOL_GPL(xen_start_info);
  25. enum xen_domain_type xen_domain_type = XEN_NATIVE;
  26. EXPORT_SYMBOL_GPL(xen_domain_type);
  27. struct shared_info xen_dummy_shared_info;
  28. struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  29. DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
  30. /* These are unused until we support booting "pre-ballooned" */
  31. unsigned long xen_released_pages;
  32. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  33. /* TODO: to be removed */
  34. __read_mostly int xen_have_vector_callback;
  35. EXPORT_SYMBOL_GPL(xen_have_vector_callback);
  36. int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
  37. EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
  38. static __read_mostly int xen_events_irq = -1;
  39. /* map fgmfn of domid to lpfn in the current domain */
  40. static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
  41. unsigned int domid)
  42. {
  43. int rc;
  44. struct xen_add_to_physmap_range xatp = {
  45. .domid = DOMID_SELF,
  46. .foreign_domid = domid,
  47. .size = 1,
  48. .space = XENMAPSPACE_gmfn_foreign,
  49. };
  50. xen_ulong_t idx = fgmfn;
  51. xen_pfn_t gpfn = lpfn;
  52. set_xen_guest_handle(xatp.idxs, &idx);
  53. set_xen_guest_handle(xatp.gpfns, &gpfn);
  54. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
  55. if (rc) {
  56. pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
  57. rc, lpfn, fgmfn);
  58. return 1;
  59. }
  60. return 0;
  61. }
  62. struct remap_data {
  63. xen_pfn_t fgmfn; /* foreign domain's gmfn */
  64. pgprot_t prot;
  65. domid_t domid;
  66. struct vm_area_struct *vma;
  67. int index;
  68. struct page **pages;
  69. struct xen_remap_mfn_info *info;
  70. };
  71. static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
  72. void *data)
  73. {
  74. struct remap_data *info = data;
  75. struct page *page = info->pages[info->index++];
  76. unsigned long pfn = page_to_pfn(page);
  77. pte_t pte = pfn_pte(pfn, info->prot);
  78. if (map_foreign_page(pfn, info->fgmfn, info->domid))
  79. return -EFAULT;
  80. set_pte_at(info->vma->vm_mm, addr, ptep, pte);
  81. return 0;
  82. }
  83. int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
  84. unsigned long addr,
  85. xen_pfn_t mfn, int nr,
  86. pgprot_t prot, unsigned domid,
  87. struct page **pages)
  88. {
  89. int err;
  90. struct remap_data data;
  91. /* TBD: Batching, current sole caller only does page at a time */
  92. if (nr > 1)
  93. return -EINVAL;
  94. data.fgmfn = mfn;
  95. data.prot = prot;
  96. data.domid = domid;
  97. data.vma = vma;
  98. data.index = 0;
  99. data.pages = pages;
  100. err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
  101. remap_pte_fn, &data);
  102. return err;
  103. }
  104. EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
  105. int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
  106. int nr, struct page **pages)
  107. {
  108. int i;
  109. for (i = 0; i < nr; i++) {
  110. struct xen_remove_from_physmap xrp;
  111. unsigned long rc, pfn;
  112. pfn = page_to_pfn(pages[i]);
  113. xrp.domid = DOMID_SELF;
  114. xrp.gpfn = pfn;
  115. rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
  116. if (rc) {
  117. pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
  118. pfn, rc);
  119. return rc;
  120. }
  121. }
  122. return 0;
  123. }
  124. EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
  125. /*
  126. * see Documentation/devicetree/bindings/arm/xen.txt for the
  127. * documentation of the Xen Device Tree format.
  128. */
  129. #define GRANT_TABLE_PHYSADDR 0
  130. static int __init xen_guest_init(void)
  131. {
  132. struct xen_add_to_physmap xatp;
  133. static struct shared_info *shared_info_page = 0;
  134. struct device_node *node;
  135. int len;
  136. const char *s = NULL;
  137. const char *version = NULL;
  138. const char *xen_prefix = "xen,xen-";
  139. struct resource res;
  140. node = of_find_compatible_node(NULL, NULL, "xen,xen");
  141. if (!node) {
  142. pr_debug("No Xen support\n");
  143. return 0;
  144. }
  145. s = of_get_property(node, "compatible", &len);
  146. if (strlen(xen_prefix) + 3 < len &&
  147. !strncmp(xen_prefix, s, strlen(xen_prefix)))
  148. version = s + strlen(xen_prefix);
  149. if (version == NULL) {
  150. pr_debug("Xen version not found\n");
  151. return 0;
  152. }
  153. if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
  154. return 0;
  155. xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
  156. xen_events_irq = irq_of_parse_and_map(node, 0);
  157. pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
  158. version, xen_events_irq, xen_hvm_resume_frames);
  159. xen_domain_type = XEN_HVM_DOMAIN;
  160. xen_setup_features();
  161. if (xen_feature(XENFEAT_dom0))
  162. xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
  163. else
  164. xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
  165. if (!shared_info_page)
  166. shared_info_page = (struct shared_info *)
  167. get_zeroed_page(GFP_KERNEL);
  168. if (!shared_info_page) {
  169. pr_err("not enough memory\n");
  170. return -ENOMEM;
  171. }
  172. xatp.domid = DOMID_SELF;
  173. xatp.idx = 0;
  174. xatp.space = XENMAPSPACE_shared_info;
  175. xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
  176. if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  177. BUG();
  178. HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
  179. /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
  180. * page, we use it in the event channel upcall and in some pvclock
  181. * related functions. We don't need the vcpu_info placement
  182. * optimizations because we don't use any pv_mmu or pv_irq op on
  183. * HVM.
  184. * The shared info contains exactly 1 CPU (the boot CPU). The guest
  185. * is required to use VCPUOP_register_vcpu_info to place vcpu info
  186. * for secondary CPUs as they are brought up. */
  187. per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
  188. gnttab_init();
  189. if (!xen_initial_domain())
  190. xenbus_probe(NULL);
  191. return 0;
  192. }
  193. core_initcall(xen_guest_init);
  194. static irqreturn_t xen_arm_callback(int irq, void *arg)
  195. {
  196. xen_hvm_evtchn_do_upcall();
  197. return IRQ_HANDLED;
  198. }
  199. static int __init xen_init_events(void)
  200. {
  201. if (!xen_domain() || xen_events_irq < 0)
  202. return -ENODEV;
  203. xen_init_IRQ();
  204. if (request_percpu_irq(xen_events_irq, xen_arm_callback,
  205. "events", xen_vcpu)) {
  206. pr_err("Error requesting IRQ %d\n", xen_events_irq);
  207. return -EINVAL;
  208. }
  209. enable_percpu_irq(xen_events_irq, 0);
  210. return 0;
  211. }
  212. postcore_initcall(xen_init_events);
  213. /* In the hypervisor.S file. */
  214. EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
  215. EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
  216. EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
  217. EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
  218. EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
  219. EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
  220. EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
  221. EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
  222. EXPORT_SYMBOL_GPL(privcmd_call);