enlighten.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. #include <xen/xen.h>
  2. #include <xen/events.h>
  3. #include <xen/grant_table.h>
  4. #include <xen/hvm.h>
  5. #include <xen/interface/vcpu.h>
  6. #include <xen/interface/xen.h>
  7. #include <xen/interface/memory.h>
  8. #include <xen/interface/hvm/params.h>
  9. #include <xen/features.h>
  10. #include <xen/platform_pci.h>
  11. #include <xen/xenbus.h>
  12. #include <xen/page.h>
  13. #include <xen/interface/sched.h>
  14. #include <xen/xen-ops.h>
  15. #include <asm/xen/hypervisor.h>
  16. #include <asm/xen/hypercall.h>
  17. #include <asm/system_misc.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/irqreturn.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/of_address.h>
  24. #include <linux/cpuidle.h>
  25. #include <linux/cpufreq.h>
  26. #include <linux/mm.h>
  27. struct start_info _xen_start_info;
  28. struct start_info *xen_start_info = &_xen_start_info;
  29. EXPORT_SYMBOL_GPL(xen_start_info);
  30. enum xen_domain_type xen_domain_type = XEN_NATIVE;
  31. EXPORT_SYMBOL_GPL(xen_domain_type);
  32. struct shared_info xen_dummy_shared_info;
  33. struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  34. DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
  35. static struct vcpu_info __percpu *xen_vcpu_info;
  36. /* These are unused until we support booting "pre-ballooned" */
  37. unsigned long xen_released_pages;
  38. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  39. /* TODO: to be removed */
  40. __read_mostly int xen_have_vector_callback;
  41. EXPORT_SYMBOL_GPL(xen_have_vector_callback);
  42. int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
  43. EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
  44. static __read_mostly int xen_events_irq = -1;
  45. /* map fgmfn of domid to lpfn in the current domain */
  46. static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
  47. unsigned int domid)
  48. {
  49. int rc;
  50. struct xen_add_to_physmap_range xatp = {
  51. .domid = DOMID_SELF,
  52. .foreign_domid = domid,
  53. .size = 1,
  54. .space = XENMAPSPACE_gmfn_foreign,
  55. };
  56. xen_ulong_t idx = fgmfn;
  57. xen_pfn_t gpfn = lpfn;
  58. int err = 0;
  59. set_xen_guest_handle(xatp.idxs, &idx);
  60. set_xen_guest_handle(xatp.gpfns, &gpfn);
  61. set_xen_guest_handle(xatp.errs, &err);
  62. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
  63. if (rc || err) {
  64. pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
  65. rc, err, lpfn, fgmfn);
  66. return 1;
  67. }
  68. return 0;
  69. }
  70. struct remap_data {
  71. xen_pfn_t fgmfn; /* foreign domain's gmfn */
  72. pgprot_t prot;
  73. domid_t domid;
  74. struct vm_area_struct *vma;
  75. int index;
  76. struct page **pages;
  77. struct xen_remap_mfn_info *info;
  78. };
  79. static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
  80. void *data)
  81. {
  82. struct remap_data *info = data;
  83. struct page *page = info->pages[info->index++];
  84. unsigned long pfn = page_to_pfn(page);
  85. pte_t pte = pfn_pte(pfn, info->prot);
  86. if (map_foreign_page(pfn, info->fgmfn, info->domid))
  87. return -EFAULT;
  88. set_pte_at(info->vma->vm_mm, addr, ptep, pte);
  89. return 0;
  90. }
  91. int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
  92. unsigned long addr,
  93. xen_pfn_t mfn, int nr,
  94. pgprot_t prot, unsigned domid,
  95. struct page **pages)
  96. {
  97. int err;
  98. struct remap_data data;
  99. /* TBD: Batching, current sole caller only does page at a time */
  100. if (nr > 1)
  101. return -EINVAL;
  102. data.fgmfn = mfn;
  103. data.prot = prot;
  104. data.domid = domid;
  105. data.vma = vma;
  106. data.index = 0;
  107. data.pages = pages;
  108. err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
  109. remap_pte_fn, &data);
  110. return err;
  111. }
  112. EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
  113. int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
  114. int nr, struct page **pages)
  115. {
  116. int i;
  117. for (i = 0; i < nr; i++) {
  118. struct xen_remove_from_physmap xrp;
  119. unsigned long rc, pfn;
  120. pfn = page_to_pfn(pages[i]);
  121. xrp.domid = DOMID_SELF;
  122. xrp.gpfn = pfn;
  123. rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
  124. if (rc) {
  125. pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
  126. pfn, rc);
  127. return rc;
  128. }
  129. }
  130. return 0;
  131. }
  132. EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
  133. static void __init xen_percpu_init(void *unused)
  134. {
  135. struct vcpu_register_vcpu_info info;
  136. struct vcpu_info *vcpup;
  137. int err;
  138. int cpu = get_cpu();
  139. pr_info("Xen: initializing cpu%d\n", cpu);
  140. vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
  141. info.mfn = __pa(vcpup) >> PAGE_SHIFT;
  142. info.offset = offset_in_page(vcpup);
  143. err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
  144. BUG_ON(err);
  145. per_cpu(xen_vcpu, cpu) = vcpup;
  146. enable_percpu_irq(xen_events_irq, 0);
  147. put_cpu();
  148. }
  149. static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
  150. {
  151. struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
  152. int rc;
  153. rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
  154. if (rc)
  155. BUG();
  156. }
  157. static void xen_power_off(void)
  158. {
  159. struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
  160. int rc;
  161. rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
  162. if (rc)
  163. BUG();
  164. }
  165. /*
  166. * see Documentation/devicetree/bindings/arm/xen.txt for the
  167. * documentation of the Xen Device Tree format.
  168. */
  169. #define GRANT_TABLE_PHYSADDR 0
  170. static int __init xen_guest_init(void)
  171. {
  172. struct xen_add_to_physmap xatp;
  173. static struct shared_info *shared_info_page = 0;
  174. struct device_node *node;
  175. int len;
  176. const char *s = NULL;
  177. const char *version = NULL;
  178. const char *xen_prefix = "xen,xen-";
  179. struct resource res;
  180. node = of_find_compatible_node(NULL, NULL, "xen,xen");
  181. if (!node) {
  182. pr_debug("No Xen support\n");
  183. return 0;
  184. }
  185. s = of_get_property(node, "compatible", &len);
  186. if (strlen(xen_prefix) + 3 < len &&
  187. !strncmp(xen_prefix, s, strlen(xen_prefix)))
  188. version = s + strlen(xen_prefix);
  189. if (version == NULL) {
  190. pr_debug("Xen version not found\n");
  191. return 0;
  192. }
  193. if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
  194. return 0;
  195. xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
  196. xen_events_irq = irq_of_parse_and_map(node, 0);
  197. pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
  198. version, xen_events_irq, xen_hvm_resume_frames);
  199. xen_domain_type = XEN_HVM_DOMAIN;
  200. xen_setup_features();
  201. if (xen_feature(XENFEAT_dom0))
  202. xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
  203. else
  204. xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
  205. if (!shared_info_page)
  206. shared_info_page = (struct shared_info *)
  207. get_zeroed_page(GFP_KERNEL);
  208. if (!shared_info_page) {
  209. pr_err("not enough memory\n");
  210. return -ENOMEM;
  211. }
  212. xatp.domid = DOMID_SELF;
  213. xatp.idx = 0;
  214. xatp.space = XENMAPSPACE_shared_info;
  215. xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
  216. if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  217. BUG();
  218. HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
  219. /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
  220. * page, we use it in the event channel upcall and in some pvclock
  221. * related functions.
  222. * The shared info contains exactly 1 CPU (the boot CPU). The guest
  223. * is required to use VCPUOP_register_vcpu_info to place vcpu info
  224. * for secondary CPUs as they are brought up.
  225. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
  226. */
  227. xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
  228. sizeof(struct vcpu_info));
  229. if (xen_vcpu_info == NULL)
  230. return -ENOMEM;
  231. gnttab_init();
  232. if (!xen_initial_domain())
  233. xenbus_probe(NULL);
  234. /*
  235. * Making sure board specific code will not set up ops for
  236. * cpu idle and cpu freq.
  237. */
  238. disable_cpuidle();
  239. disable_cpufreq();
  240. return 0;
  241. }
  242. core_initcall(xen_guest_init);
  243. static int __init xen_pm_init(void)
  244. {
  245. if (!xen_domain())
  246. return -ENODEV;
  247. pm_power_off = xen_power_off;
  248. arm_pm_restart = xen_restart;
  249. return 0;
  250. }
  251. late_initcall(xen_pm_init);
  252. static irqreturn_t xen_arm_callback(int irq, void *arg)
  253. {
  254. xen_hvm_evtchn_do_upcall();
  255. return IRQ_HANDLED;
  256. }
  257. static int __init xen_init_events(void)
  258. {
  259. if (!xen_domain() || xen_events_irq < 0)
  260. return -ENODEV;
  261. xen_init_IRQ();
  262. if (request_percpu_irq(xen_events_irq, xen_arm_callback,
  263. "events", &xen_vcpu)) {
  264. pr_err("Error requesting IRQ %d\n", xen_events_irq);
  265. return -EINVAL;
  266. }
  267. on_each_cpu(xen_percpu_init, NULL, 0);
  268. return 0;
  269. }
  270. postcore_initcall(xen_init_events);
  271. /* In the hypervisor.S file. */
  272. EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
  273. EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
  274. EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
  275. EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
  276. EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
  277. EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
  278. EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
  279. EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
  280. EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
  281. EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
  282. EXPORT_SYMBOL_GPL(privcmd_call);