enlighten.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #include <xen/xen.h>
  2. #include <xen/events.h>
  3. #include <xen/grant_table.h>
  4. #include <xen/hvm.h>
  5. #include <xen/interface/xen.h>
  6. #include <xen/interface/memory.h>
  7. #include <xen/interface/hvm/params.h>
  8. #include <xen/features.h>
  9. #include <xen/platform_pci.h>
  10. #include <xen/xenbus.h>
  11. #include <xen/page.h>
  12. #include <xen/xen-ops.h>
  13. #include <asm/xen/hypervisor.h>
  14. #include <asm/xen/hypercall.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irqreturn.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/of_address.h>
  21. #include <linux/mm.h>
  22. struct start_info _xen_start_info;
  23. struct start_info *xen_start_info = &_xen_start_info;
  24. EXPORT_SYMBOL_GPL(xen_start_info);
  25. enum xen_domain_type xen_domain_type = XEN_NATIVE;
  26. EXPORT_SYMBOL_GPL(xen_domain_type);
  27. struct shared_info xen_dummy_shared_info;
  28. struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  29. DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
  30. /* These are unused until we support booting "pre-ballooned" */
  31. unsigned long xen_released_pages;
  32. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  33. /* TODO: to be removed */
  34. __read_mostly int xen_have_vector_callback;
  35. EXPORT_SYMBOL_GPL(xen_have_vector_callback);
  36. int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
  37. EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
  38. static __read_mostly int xen_events_irq = -1;
  39. /* map fgmfn of domid to lpfn in the current domain */
  40. static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
  41. unsigned int domid)
  42. {
  43. int rc;
  44. struct xen_add_to_physmap_range xatp = {
  45. .domid = DOMID_SELF,
  46. .foreign_domid = domid,
  47. .size = 1,
  48. .space = XENMAPSPACE_gmfn_foreign,
  49. };
  50. xen_ulong_t idx = fgmfn;
  51. xen_pfn_t gpfn = lpfn;
  52. int err = 0;
  53. set_xen_guest_handle(xatp.idxs, &idx);
  54. set_xen_guest_handle(xatp.gpfns, &gpfn);
  55. set_xen_guest_handle(xatp.errs, &err);
  56. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
  57. if (rc || err) {
  58. pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
  59. rc, err, lpfn, fgmfn);
  60. return 1;
  61. }
  62. return 0;
  63. }
  64. struct remap_data {
  65. xen_pfn_t fgmfn; /* foreign domain's gmfn */
  66. pgprot_t prot;
  67. domid_t domid;
  68. struct vm_area_struct *vma;
  69. int index;
  70. struct page **pages;
  71. struct xen_remap_mfn_info *info;
  72. };
  73. static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
  74. void *data)
  75. {
  76. struct remap_data *info = data;
  77. struct page *page = info->pages[info->index++];
  78. unsigned long pfn = page_to_pfn(page);
  79. pte_t pte = pfn_pte(pfn, info->prot);
  80. if (map_foreign_page(pfn, info->fgmfn, info->domid))
  81. return -EFAULT;
  82. set_pte_at(info->vma->vm_mm, addr, ptep, pte);
  83. return 0;
  84. }
  85. int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
  86. unsigned long addr,
  87. xen_pfn_t mfn, int nr,
  88. pgprot_t prot, unsigned domid,
  89. struct page **pages)
  90. {
  91. int err;
  92. struct remap_data data;
  93. /* TBD: Batching, current sole caller only does page at a time */
  94. if (nr > 1)
  95. return -EINVAL;
  96. data.fgmfn = mfn;
  97. data.prot = prot;
  98. data.domid = domid;
  99. data.vma = vma;
  100. data.index = 0;
  101. data.pages = pages;
  102. err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
  103. remap_pte_fn, &data);
  104. return err;
  105. }
  106. EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
  107. int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
  108. int nr, struct page **pages)
  109. {
  110. int i;
  111. for (i = 0; i < nr; i++) {
  112. struct xen_remove_from_physmap xrp;
  113. unsigned long rc, pfn;
  114. pfn = page_to_pfn(pages[i]);
  115. xrp.domid = DOMID_SELF;
  116. xrp.gpfn = pfn;
  117. rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
  118. if (rc) {
  119. pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
  120. pfn, rc);
  121. return rc;
  122. }
  123. }
  124. return 0;
  125. }
  126. EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
  127. /*
  128. * see Documentation/devicetree/bindings/arm/xen.txt for the
  129. * documentation of the Xen Device Tree format.
  130. */
  131. #define GRANT_TABLE_PHYSADDR 0
  132. static int __init xen_guest_init(void)
  133. {
  134. struct xen_add_to_physmap xatp;
  135. static struct shared_info *shared_info_page = 0;
  136. struct device_node *node;
  137. int len;
  138. const char *s = NULL;
  139. const char *version = NULL;
  140. const char *xen_prefix = "xen,xen-";
  141. struct resource res;
  142. node = of_find_compatible_node(NULL, NULL, "xen,xen");
  143. if (!node) {
  144. pr_debug("No Xen support\n");
  145. return 0;
  146. }
  147. s = of_get_property(node, "compatible", &len);
  148. if (strlen(xen_prefix) + 3 < len &&
  149. !strncmp(xen_prefix, s, strlen(xen_prefix)))
  150. version = s + strlen(xen_prefix);
  151. if (version == NULL) {
  152. pr_debug("Xen version not found\n");
  153. return 0;
  154. }
  155. if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
  156. return 0;
  157. xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
  158. xen_events_irq = irq_of_parse_and_map(node, 0);
  159. pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
  160. version, xen_events_irq, xen_hvm_resume_frames);
  161. xen_domain_type = XEN_HVM_DOMAIN;
  162. xen_setup_features();
  163. if (xen_feature(XENFEAT_dom0))
  164. xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
  165. else
  166. xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
  167. if (!shared_info_page)
  168. shared_info_page = (struct shared_info *)
  169. get_zeroed_page(GFP_KERNEL);
  170. if (!shared_info_page) {
  171. pr_err("not enough memory\n");
  172. return -ENOMEM;
  173. }
  174. xatp.domid = DOMID_SELF;
  175. xatp.idx = 0;
  176. xatp.space = XENMAPSPACE_shared_info;
  177. xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
  178. if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  179. BUG();
  180. HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
  181. /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
  182. * page, we use it in the event channel upcall and in some pvclock
  183. * related functions. We don't need the vcpu_info placement
  184. * optimizations because we don't use any pv_mmu or pv_irq op on
  185. * HVM.
  186. * The shared info contains exactly 1 CPU (the boot CPU). The guest
  187. * is required to use VCPUOP_register_vcpu_info to place vcpu info
  188. * for secondary CPUs as they are brought up. */
  189. per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
  190. gnttab_init();
  191. if (!xen_initial_domain())
  192. xenbus_probe(NULL);
  193. return 0;
  194. }
  195. core_initcall(xen_guest_init);
  196. static irqreturn_t xen_arm_callback(int irq, void *arg)
  197. {
  198. xen_hvm_evtchn_do_upcall();
  199. return IRQ_HANDLED;
  200. }
  201. static int __init xen_init_events(void)
  202. {
  203. if (!xen_domain() || xen_events_irq < 0)
  204. return -ENODEV;
  205. xen_init_IRQ();
  206. if (request_percpu_irq(xen_events_irq, xen_arm_callback,
  207. "events", xen_vcpu)) {
  208. pr_err("Error requesting IRQ %d\n", xen_events_irq);
  209. return -EINVAL;
  210. }
  211. enable_percpu_irq(xen_events_irq, 0);
  212. return 0;
  213. }
  214. postcore_initcall(xen_init_events);
  215. /* In the hypervisor.S file. */
  216. EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
  217. EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
  218. EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
  219. EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
  220. EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
  221. EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
  222. EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
  223. EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
  224. EXPORT_SYMBOL_GPL(privcmd_call);