enlighten.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #include <xen/xen.h>
  2. #include <xen/events.h>
  3. #include <xen/grant_table.h>
  4. #include <xen/hvm.h>
  5. #include <xen/interface/vcpu.h>
  6. #include <xen/interface/xen.h>
  7. #include <xen/interface/memory.h>
  8. #include <xen/interface/hvm/params.h>
  9. #include <xen/features.h>
  10. #include <xen/platform_pci.h>
  11. #include <xen/xenbus.h>
  12. #include <xen/page.h>
  13. #include <xen/interface/sched.h>
  14. #include <xen/xen-ops.h>
  15. #include <asm/xen/hypervisor.h>
  16. #include <asm/xen/hypercall.h>
  17. #include <asm/system_misc.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/irqreturn.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/of_address.h>
  24. #include <linux/mm.h>
  25. struct start_info _xen_start_info;
  26. struct start_info *xen_start_info = &_xen_start_info;
  27. EXPORT_SYMBOL_GPL(xen_start_info);
  28. enum xen_domain_type xen_domain_type = XEN_NATIVE;
  29. EXPORT_SYMBOL_GPL(xen_domain_type);
  30. struct shared_info xen_dummy_shared_info;
  31. struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  32. DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
  33. static struct vcpu_info __percpu *xen_vcpu_info;
  34. /* These are unused until we support booting "pre-ballooned" */
  35. unsigned long xen_released_pages;
  36. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  37. /* TODO: to be removed */
  38. __read_mostly int xen_have_vector_callback;
  39. EXPORT_SYMBOL_GPL(xen_have_vector_callback);
  40. int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
  41. EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
  42. static __read_mostly int xen_events_irq = -1;
  43. /* map fgmfn of domid to lpfn in the current domain */
  44. static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
  45. unsigned int domid)
  46. {
  47. int rc;
  48. struct xen_add_to_physmap_range xatp = {
  49. .domid = DOMID_SELF,
  50. .foreign_domid = domid,
  51. .size = 1,
  52. .space = XENMAPSPACE_gmfn_foreign,
  53. };
  54. xen_ulong_t idx = fgmfn;
  55. xen_pfn_t gpfn = lpfn;
  56. int err = 0;
  57. set_xen_guest_handle(xatp.idxs, &idx);
  58. set_xen_guest_handle(xatp.gpfns, &gpfn);
  59. set_xen_guest_handle(xatp.errs, &err);
  60. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
  61. if (rc || err) {
  62. pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
  63. rc, err, lpfn, fgmfn);
  64. return 1;
  65. }
  66. return 0;
  67. }
  68. struct remap_data {
  69. xen_pfn_t fgmfn; /* foreign domain's gmfn */
  70. pgprot_t prot;
  71. domid_t domid;
  72. struct vm_area_struct *vma;
  73. int index;
  74. struct page **pages;
  75. struct xen_remap_mfn_info *info;
  76. };
  77. static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
  78. void *data)
  79. {
  80. struct remap_data *info = data;
  81. struct page *page = info->pages[info->index++];
  82. unsigned long pfn = page_to_pfn(page);
  83. pte_t pte = pfn_pte(pfn, info->prot);
  84. if (map_foreign_page(pfn, info->fgmfn, info->domid))
  85. return -EFAULT;
  86. set_pte_at(info->vma->vm_mm, addr, ptep, pte);
  87. return 0;
  88. }
  89. int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
  90. unsigned long addr,
  91. xen_pfn_t mfn, int nr,
  92. pgprot_t prot, unsigned domid,
  93. struct page **pages)
  94. {
  95. int err;
  96. struct remap_data data;
  97. /* TBD: Batching, current sole caller only does page at a time */
  98. if (nr > 1)
  99. return -EINVAL;
  100. data.fgmfn = mfn;
  101. data.prot = prot;
  102. data.domid = domid;
  103. data.vma = vma;
  104. data.index = 0;
  105. data.pages = pages;
  106. err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
  107. remap_pte_fn, &data);
  108. return err;
  109. }
  110. EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
  111. int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
  112. int nr, struct page **pages)
  113. {
  114. int i;
  115. for (i = 0; i < nr; i++) {
  116. struct xen_remove_from_physmap xrp;
  117. unsigned long rc, pfn;
  118. pfn = page_to_pfn(pages[i]);
  119. xrp.domid = DOMID_SELF;
  120. xrp.gpfn = pfn;
  121. rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
  122. if (rc) {
  123. pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
  124. pfn, rc);
  125. return rc;
  126. }
  127. }
  128. return 0;
  129. }
  130. EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
  131. static void __init xen_percpu_init(void *unused)
  132. {
  133. struct vcpu_register_vcpu_info info;
  134. struct vcpu_info *vcpup;
  135. int err;
  136. int cpu = get_cpu();
  137. pr_info("Xen: initializing cpu%d\n", cpu);
  138. vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
  139. info.mfn = __pa(vcpup) >> PAGE_SHIFT;
  140. info.offset = offset_in_page(vcpup);
  141. err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
  142. BUG_ON(err);
  143. per_cpu(xen_vcpu, cpu) = vcpup;
  144. enable_percpu_irq(xen_events_irq, 0);
  145. }
  146. static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
  147. {
  148. struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
  149. int rc;
  150. rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
  151. if (rc)
  152. BUG();
  153. }
  154. static void xen_power_off(void)
  155. {
  156. struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
  157. int rc;
  158. rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
  159. if (rc)
  160. BUG();
  161. }
  162. /*
  163. * see Documentation/devicetree/bindings/arm/xen.txt for the
  164. * documentation of the Xen Device Tree format.
  165. */
  166. #define GRANT_TABLE_PHYSADDR 0
  167. static int __init xen_guest_init(void)
  168. {
  169. struct xen_add_to_physmap xatp;
  170. static struct shared_info *shared_info_page = 0;
  171. struct device_node *node;
  172. int len;
  173. const char *s = NULL;
  174. const char *version = NULL;
  175. const char *xen_prefix = "xen,xen-";
  176. struct resource res;
  177. node = of_find_compatible_node(NULL, NULL, "xen,xen");
  178. if (!node) {
  179. pr_debug("No Xen support\n");
  180. return 0;
  181. }
  182. s = of_get_property(node, "compatible", &len);
  183. if (strlen(xen_prefix) + 3 < len &&
  184. !strncmp(xen_prefix, s, strlen(xen_prefix)))
  185. version = s + strlen(xen_prefix);
  186. if (version == NULL) {
  187. pr_debug("Xen version not found\n");
  188. return 0;
  189. }
  190. if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
  191. return 0;
  192. xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
  193. xen_events_irq = irq_of_parse_and_map(node, 0);
  194. pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
  195. version, xen_events_irq, xen_hvm_resume_frames);
  196. xen_domain_type = XEN_HVM_DOMAIN;
  197. xen_setup_features();
  198. if (xen_feature(XENFEAT_dom0))
  199. xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
  200. else
  201. xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
  202. if (!shared_info_page)
  203. shared_info_page = (struct shared_info *)
  204. get_zeroed_page(GFP_KERNEL);
  205. if (!shared_info_page) {
  206. pr_err("not enough memory\n");
  207. return -ENOMEM;
  208. }
  209. xatp.domid = DOMID_SELF;
  210. xatp.idx = 0;
  211. xatp.space = XENMAPSPACE_shared_info;
  212. xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
  213. if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  214. BUG();
  215. HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
  216. /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
  217. * page, we use it in the event channel upcall and in some pvclock
  218. * related functions.
  219. * The shared info contains exactly 1 CPU (the boot CPU). The guest
  220. * is required to use VCPUOP_register_vcpu_info to place vcpu info
  221. * for secondary CPUs as they are brought up.
  222. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
  223. */
  224. xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
  225. sizeof(struct vcpu_info));
  226. if (xen_vcpu_info == NULL)
  227. return -ENOMEM;
  228. gnttab_init();
  229. if (!xen_initial_domain())
  230. xenbus_probe(NULL);
  231. return 0;
  232. }
  233. core_initcall(xen_guest_init);
  234. static int __init xen_pm_init(void)
  235. {
  236. pm_power_off = xen_power_off;
  237. arm_pm_restart = xen_restart;
  238. return 0;
  239. }
  240. subsys_initcall(xen_pm_init);
  241. static irqreturn_t xen_arm_callback(int irq, void *arg)
  242. {
  243. xen_hvm_evtchn_do_upcall();
  244. return IRQ_HANDLED;
  245. }
  246. static int __init xen_init_events(void)
  247. {
  248. if (!xen_domain() || xen_events_irq < 0)
  249. return -ENODEV;
  250. xen_init_IRQ();
  251. if (request_percpu_irq(xen_events_irq, xen_arm_callback,
  252. "events", &xen_vcpu)) {
  253. pr_err("Error requesting IRQ %d\n", xen_events_irq);
  254. return -EINVAL;
  255. }
  256. on_each_cpu(xen_percpu_init, NULL, 0);
  257. return 0;
  258. }
  259. postcore_initcall(xen_init_events);
  260. /* In the hypervisor.S file. */
  261. EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
  262. EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
  263. EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
  264. EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
  265. EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
  266. EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
  267. EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
  268. EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
  269. EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
  270. EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
  271. EXPORT_SYMBOL_GPL(privcmd_call);