xen_pv_ops.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /******************************************************************************
  2. * arch/ia64/xen/xen_pv_ops.c
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/console.h>
  23. #include <linux/irq.h>
  24. #include <linux/kernel.h>
  25. #include <linux/pm.h>
  26. #include <linux/unistd.h>
  27. #include <asm/xen/hypervisor.h>
  28. #include <asm/xen/xencomm.h>
  29. #include <asm/xen/privop.h>
  30. #include "irq_xen.h"
  31. #include "time.h"
  32. /***************************************************************************
  33. * general info
  34. */
  35. static struct pv_info xen_info __initdata = {
  36. .kernel_rpl = 2, /* or 1: determin at runtime */
  37. .paravirt_enabled = 1,
  38. .name = "Xen/ia64",
  39. };
  40. #define IA64_RSC_PL_SHIFT 2
  41. #define IA64_RSC_PL_BIT_SIZE 2
  42. #define IA64_RSC_PL_MASK \
  43. (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
  44. static void __init
  45. xen_info_init(void)
  46. {
  47. /* Xenified Linux/ia64 may run on pl = 1 or 2.
  48. * determin at run time. */
  49. unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
  50. unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
  51. xen_info.kernel_rpl = rpl;
  52. }
  53. /***************************************************************************
  54. * pv_init_ops
  55. * initialization hooks.
  56. */
  57. static void
  58. xen_panic_hypercall(struct unw_frame_info *info, void *arg)
  59. {
  60. current->thread.ksp = (__u64)info->sw - 16;
  61. HYPERVISOR_shutdown(SHUTDOWN_crash);
  62. /* we're never actually going to get here... */
  63. }
  64. static int
  65. xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
  66. {
  67. unw_init_running(xen_panic_hypercall, NULL);
  68. /* we're never actually going to get here... */
  69. return NOTIFY_DONE;
  70. }
  71. static struct notifier_block xen_panic_block = {
  72. xen_panic_event, NULL, 0 /* try to go last */
  73. };
  74. static void xen_pm_power_off(void)
  75. {
  76. local_irq_disable();
  77. HYPERVISOR_shutdown(SHUTDOWN_poweroff);
  78. }
  79. static void __init
  80. xen_banner(void)
  81. {
  82. printk(KERN_INFO
  83. "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
  84. "flags=0x%x\n",
  85. xen_info.kernel_rpl,
  86. HYPERVISOR_shared_info->arch.start_info_pfn,
  87. xen_start_info->nr_pages, xen_start_info->flags);
  88. }
  89. static int __init
  90. xen_reserve_memory(struct rsvd_region *region)
  91. {
  92. region->start = (unsigned long)__va(
  93. (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
  94. region->end = region->start + PAGE_SIZE;
  95. return 1;
  96. }
  97. static void __init
  98. xen_arch_setup_early(void)
  99. {
  100. struct shared_info *s;
  101. BUG_ON(!xen_pv_domain());
  102. s = HYPERVISOR_shared_info;
  103. xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
  104. /* Must be done before any hypercall. */
  105. xencomm_initialize();
  106. xen_setup_features();
  107. /* Register a call for panic conditions. */
  108. atomic_notifier_chain_register(&panic_notifier_list,
  109. &xen_panic_block);
  110. pm_power_off = xen_pm_power_off;
  111. xen_ia64_enable_opt_feature();
  112. }
  113. static void __init
  114. xen_arch_setup_console(char **cmdline_p)
  115. {
  116. add_preferred_console("xenboot", 0, NULL);
  117. add_preferred_console("tty", 0, NULL);
  118. /* use hvc_xen */
  119. add_preferred_console("hvc", 0, NULL);
  120. #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
  121. conswitchp = NULL;
  122. #endif
  123. }
  124. static int __init
  125. xen_arch_setup_nomca(void)
  126. {
  127. return 1;
  128. }
  129. static void __init
  130. xen_post_smp_prepare_boot_cpu(void)
  131. {
  132. xen_setup_vcpu_info_placement();
  133. }
  134. #ifdef ASM_SUPPORTED
  135. static unsigned long __init_or_module
  136. xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
  137. #endif
  138. static void __init
  139. xen_patch_branch(unsigned long tag, unsigned long type);
  140. static const struct pv_init_ops xen_init_ops __initconst = {
  141. .banner = xen_banner,
  142. .reserve_memory = xen_reserve_memory,
  143. .arch_setup_early = xen_arch_setup_early,
  144. .arch_setup_console = xen_arch_setup_console,
  145. .arch_setup_nomca = xen_arch_setup_nomca,
  146. .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
  147. #ifdef ASM_SUPPORTED
  148. .patch_bundle = xen_patch_bundle,
  149. #endif
  150. .patch_branch = xen_patch_branch,
  151. };
  152. /***************************************************************************
  153. * pv_fsys_data
  154. * addresses for fsys
  155. */
  156. extern unsigned long xen_fsyscall_table[NR_syscalls];
  157. extern char xen_fsys_bubble_down[];
  158. struct pv_fsys_data xen_fsys_data __initdata = {
  159. .fsyscall_table = (unsigned long *)xen_fsyscall_table,
  160. .fsys_bubble_down = (void *)xen_fsys_bubble_down,
  161. };
  162. /***************************************************************************
  163. * pv_patchdata
  164. * patchdata addresses
  165. */
  166. #define DECLARE(name) \
  167. extern unsigned long __xen_start_gate_##name##_patchlist[]; \
  168. extern unsigned long __xen_end_gate_##name##_patchlist[]
  169. DECLARE(fsyscall);
  170. DECLARE(brl_fsys_bubble_down);
  171. DECLARE(vtop);
  172. DECLARE(mckinley_e9);
  173. extern unsigned long __xen_start_gate_section[];
  174. #define ASSIGN(name) \
  175. .start_##name##_patchlist = \
  176. (unsigned long)__xen_start_gate_##name##_patchlist, \
  177. .end_##name##_patchlist = \
  178. (unsigned long)__xen_end_gate_##name##_patchlist
  179. static struct pv_patchdata xen_patchdata __initdata = {
  180. ASSIGN(fsyscall),
  181. ASSIGN(brl_fsys_bubble_down),
  182. ASSIGN(vtop),
  183. ASSIGN(mckinley_e9),
  184. .gate_section = (void*)__xen_start_gate_section,
  185. };
  186. /***************************************************************************
  187. * pv_cpu_ops
  188. * intrinsics hooks.
  189. */
  190. #ifndef ASM_SUPPORTED
  191. static void
  192. xen_set_itm_with_offset(unsigned long val)
  193. {
  194. /* ia64_cpu_local_tick() calls this with interrupt enabled. */
  195. /* WARN_ON(!irqs_disabled()); */
  196. xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
  197. }
  198. static unsigned long
  199. xen_get_itm_with_offset(void)
  200. {
  201. /* unused at this moment */
  202. printk(KERN_DEBUG "%s is called.\n", __func__);
  203. WARN_ON(!irqs_disabled());
  204. return ia64_native_getreg(_IA64_REG_CR_ITM) +
  205. XEN_MAPPEDREGS->itc_offset;
  206. }
  207. /* ia64_set_itc() is only called by
  208. * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
  209. * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
  210. */
  211. static void
  212. xen_set_itc(unsigned long val)
  213. {
  214. unsigned long mitc;
  215. WARN_ON(!irqs_disabled());
  216. mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
  217. XEN_MAPPEDREGS->itc_offset = val - mitc;
  218. XEN_MAPPEDREGS->itc_last = val;
  219. }
  220. static unsigned long
  221. xen_get_itc(void)
  222. {
  223. unsigned long res;
  224. unsigned long itc_offset;
  225. unsigned long itc_last;
  226. unsigned long ret_itc_last;
  227. itc_offset = XEN_MAPPEDREGS->itc_offset;
  228. do {
  229. itc_last = XEN_MAPPEDREGS->itc_last;
  230. res = ia64_native_getreg(_IA64_REG_AR_ITC);
  231. res += itc_offset;
  232. if (itc_last >= res)
  233. res = itc_last + 1;
  234. ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
  235. itc_last, res);
  236. } while (unlikely(ret_itc_last != itc_last));
  237. return res;
  238. #if 0
  239. /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
  240. Should it be paravirtualized instead? */
  241. WARN_ON(!irqs_disabled());
  242. itc_offset = XEN_MAPPEDREGS->itc_offset;
  243. itc_last = XEN_MAPPEDREGS->itc_last;
  244. res = ia64_native_getreg(_IA64_REG_AR_ITC);
  245. res += itc_offset;
  246. if (itc_last >= res)
  247. res = itc_last + 1;
  248. XEN_MAPPEDREGS->itc_last = res;
  249. return res;
  250. #endif
  251. }
  252. static void xen_setreg(int regnum, unsigned long val)
  253. {
  254. switch (regnum) {
  255. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
  256. xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
  257. break;
  258. #ifdef CONFIG_IA32_SUPPORT
  259. case _IA64_REG_AR_EFLAG:
  260. xen_set_eflag(val);
  261. break;
  262. #endif
  263. case _IA64_REG_AR_ITC:
  264. xen_set_itc(val);
  265. break;
  266. case _IA64_REG_CR_TPR:
  267. xen_set_tpr(val);
  268. break;
  269. case _IA64_REG_CR_ITM:
  270. xen_set_itm_with_offset(val);
  271. break;
  272. case _IA64_REG_CR_EOI:
  273. xen_eoi(val);
  274. break;
  275. default:
  276. ia64_native_setreg_func(regnum, val);
  277. break;
  278. }
  279. }
  280. static unsigned long xen_getreg(int regnum)
  281. {
  282. unsigned long res;
  283. switch (regnum) {
  284. case _IA64_REG_PSR:
  285. res = xen_get_psr();
  286. break;
  287. #ifdef CONFIG_IA32_SUPPORT
  288. case _IA64_REG_AR_EFLAG:
  289. res = xen_get_eflag();
  290. break;
  291. #endif
  292. case _IA64_REG_AR_ITC:
  293. res = xen_get_itc();
  294. break;
  295. case _IA64_REG_CR_ITM:
  296. res = xen_get_itm_with_offset();
  297. break;
  298. case _IA64_REG_CR_IVR:
  299. res = xen_get_ivr();
  300. break;
  301. case _IA64_REG_CR_TPR:
  302. res = xen_get_tpr();
  303. break;
  304. default:
  305. res = ia64_native_getreg_func(regnum);
  306. break;
  307. }
  308. return res;
  309. }
  310. /* turning on interrupts is a bit more complicated.. write to the
  311. * memory-mapped virtual psr.i bit first (to avoid race condition),
  312. * then if any interrupts were pending, we have to execute a hyperprivop
  313. * to ensure the pending interrupt gets delivered; else we're done! */
  314. static void
  315. xen_ssm_i(void)
  316. {
  317. int old = xen_get_virtual_psr_i();
  318. xen_set_virtual_psr_i(1);
  319. barrier();
  320. if (!old && xen_get_virtual_pend())
  321. xen_hyper_ssm_i();
  322. }
  323. /* turning off interrupts can be paravirtualized simply by writing
  324. * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
  325. static void
  326. xen_rsm_i(void)
  327. {
  328. xen_set_virtual_psr_i(0);
  329. barrier();
  330. }
  331. static unsigned long
  332. xen_get_psr_i(void)
  333. {
  334. return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
  335. }
  336. static void
  337. xen_intrin_local_irq_restore(unsigned long mask)
  338. {
  339. if (mask & IA64_PSR_I)
  340. xen_ssm_i();
  341. else
  342. xen_rsm_i();
  343. }
  344. #else
  345. #define __DEFINE_FUNC(name, code) \
  346. extern const char xen_ ## name ## _direct_start[]; \
  347. extern const char xen_ ## name ## _direct_end[]; \
  348. asm (".align 32\n" \
  349. ".proc xen_" #name "\n" \
  350. "xen_" #name ":\n" \
  351. "xen_" #name "_direct_start:\n" \
  352. code \
  353. "xen_" #name "_direct_end:\n" \
  354. "br.cond.sptk.many b6\n" \
  355. ".endp xen_" #name "\n")
  356. #define DEFINE_VOID_FUNC0(name, code) \
  357. extern void \
  358. xen_ ## name (void); \
  359. __DEFINE_FUNC(name, code)
  360. #define DEFINE_VOID_FUNC1(name, code) \
  361. extern void \
  362. xen_ ## name (unsigned long arg); \
  363. __DEFINE_FUNC(name, code)
  364. #define DEFINE_VOID_FUNC1_VOID(name, code) \
  365. extern void \
  366. xen_ ## name (void *arg); \
  367. __DEFINE_FUNC(name, code)
  368. #define DEFINE_VOID_FUNC2(name, code) \
  369. extern void \
  370. xen_ ## name (unsigned long arg0, \
  371. unsigned long arg1); \
  372. __DEFINE_FUNC(name, code)
  373. #define DEFINE_FUNC0(name, code) \
  374. extern unsigned long \
  375. xen_ ## name (void); \
  376. __DEFINE_FUNC(name, code)
  377. #define DEFINE_FUNC1(name, type, code) \
  378. extern unsigned long \
  379. xen_ ## name (type arg); \
  380. __DEFINE_FUNC(name, code)
  381. #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
  382. /*
  383. * static void xen_set_itm_with_offset(unsigned long val)
  384. * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
  385. */
  386. /* 2 bundles */
  387. DEFINE_VOID_FUNC1(set_itm_with_offset,
  388. "mov r2 = " __stringify(XSI_BASE) " + "
  389. __stringify(XSI_ITC_OFFSET_OFS) "\n"
  390. ";;\n"
  391. "ld8 r3 = [r2]\n"
  392. ";;\n"
  393. "sub r8 = r8, r3\n"
  394. "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
  395. /*
  396. * static unsigned long xen_get_itm_with_offset(void)
  397. * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
  398. */
  399. /* 2 bundles */
  400. DEFINE_FUNC0(get_itm_with_offset,
  401. "mov r2 = " __stringify(XSI_BASE) " + "
  402. __stringify(XSI_ITC_OFFSET_OFS) "\n"
  403. ";;\n"
  404. "ld8 r3 = [r2]\n"
  405. "mov r8 = cr.itm\n"
  406. ";;\n"
  407. "add r8 = r8, r2\n");
  408. /*
  409. * static void xen_set_itc(unsigned long val)
  410. * unsigned long mitc;
  411. *
  412. * WARN_ON(!irqs_disabled());
  413. * mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
  414. * XEN_MAPPEDREGS->itc_offset = val - mitc;
  415. * XEN_MAPPEDREGS->itc_last = val;
  416. */
  417. /* 2 bundles */
  418. DEFINE_VOID_FUNC1(set_itc,
  419. "mov r2 = " __stringify(XSI_BASE) " + "
  420. __stringify(XSI_ITC_LAST_OFS) "\n"
  421. "mov r3 = ar.itc\n"
  422. ";;\n"
  423. "sub r3 = r8, r3\n"
  424. "st8 [r2] = r8, "
  425. __stringify(XSI_ITC_LAST_OFS) " - "
  426. __stringify(XSI_ITC_OFFSET_OFS) "\n"
  427. ";;\n"
  428. "st8 [r2] = r3\n");
  429. /*
  430. * static unsigned long xen_get_itc(void)
  431. * unsigned long res;
  432. * unsigned long itc_offset;
  433. * unsigned long itc_last;
  434. * unsigned long ret_itc_last;
  435. *
  436. * itc_offset = XEN_MAPPEDREGS->itc_offset;
  437. * do {
  438. * itc_last = XEN_MAPPEDREGS->itc_last;
  439. * res = ia64_native_getreg(_IA64_REG_AR_ITC);
  440. * res += itc_offset;
  441. * if (itc_last >= res)
  442. * res = itc_last + 1;
  443. * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
  444. * itc_last, res);
  445. * } while (unlikely(ret_itc_last != itc_last));
  446. * return res;
  447. */
  448. /* 5 bundles */
  449. DEFINE_FUNC0(get_itc,
  450. "mov r2 = " __stringify(XSI_BASE) " + "
  451. __stringify(XSI_ITC_OFFSET_OFS) "\n"
  452. ";;\n"
  453. "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
  454. __stringify(XSI_ITC_OFFSET_OFS) "\n"
  455. /* r9 = itc_offset */
  456. /* r2 = XSI_ITC_OFFSET */
  457. "888:\n"
  458. "mov r8 = ar.itc\n" /* res = ar.itc */
  459. ";;\n"
  460. "ld8 r3 = [r2]\n" /* r3 = itc_last */
  461. "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
  462. ";;\n"
  463. "cmp.gtu p6, p0 = r3, r8\n"
  464. ";;\n"
  465. "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
  466. ";;\n"
  467. "mov ar.ccv = r8\n"
  468. ";;\n"
  469. "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
  470. ";;\n"
  471. "cmp.ne p6, p0 = r10, r3\n"
  472. "(p6) hint @pause\n"
  473. "(p6) br.cond.spnt 888b\n");
  474. DEFINE_VOID_FUNC1_VOID(fc,
  475. "break " __stringify(HYPERPRIVOP_FC) "\n");
  476. /*
  477. * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
  478. * masked_addr = *psr_i_addr_addr
  479. * pending_intr_addr = masked_addr - 1
  480. * if (val & IA64_PSR_I) {
  481. * masked = *masked_addr
  482. * *masked_addr = 0:xen_set_virtual_psr_i(1)
  483. * compiler barrier
  484. * if (masked) {
  485. * uint8_t pending = *pending_intr_addr;
  486. * if (pending)
  487. * XEN_HYPER_SSM_I
  488. * }
  489. * } else {
  490. * *masked_addr = 1:xen_set_virtual_psr_i(0)
  491. * }
  492. */
  493. /* 6 bundles */
  494. DEFINE_VOID_FUNC1(intrin_local_irq_restore,
  495. /* r8 = input value: 0 or IA64_PSR_I
  496. * p6 = (flags & IA64_PSR_I)
  497. * = if clause
  498. * p7 = !(flags & IA64_PSR_I)
  499. * = else clause
  500. */
  501. "cmp.ne p6, p7 = r8, r0\n"
  502. "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
  503. ";;\n"
  504. /* r9 = XEN_PSR_I_ADDR */
  505. "ld8 r9 = [r9]\n"
  506. ";;\n"
  507. /* r10 = masked previous value */
  508. "(p6) ld1.acq r10 = [r9]\n"
  509. ";;\n"
  510. /* p8 = !masked interrupt masked previously? */
  511. "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
  512. /* p7 = else clause */
  513. "(p7) mov r11 = 1\n"
  514. ";;\n"
  515. /* masked = 1 */
  516. "(p7) st1.rel [r9] = r11\n"
  517. /* p6 = if clause */
  518. /* masked = 0
  519. * r9 = masked_addr - 1
  520. * = pending_intr_addr
  521. */
  522. "(p8) st1.rel [r9] = r0, -1\n"
  523. ";;\n"
  524. /* r8 = pending_intr */
  525. "(p8) ld1.acq r11 = [r9]\n"
  526. ";;\n"
  527. /* p9 = interrupt pending? */
  528. "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
  529. ";;\n"
  530. "(p10) mf\n"
  531. /* issue hypercall to trigger interrupt */
  532. "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
  533. DEFINE_VOID_FUNC2(ptcga,
  534. "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
  535. DEFINE_VOID_FUNC2(set_rr,
  536. "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
  537. /*
  538. * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
  539. * tmp = *tmp
  540. * tmp = *tmp;
  541. * psr_i = tmp? 0: IA64_PSR_I;
  542. */
  543. /* 4 bundles */
  544. DEFINE_FUNC0(get_psr_i,
  545. "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
  546. ";;\n"
  547. "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
  548. "mov r8 = 0\n" /* psr_i = 0 */
  549. ";;\n"
  550. "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
  551. ";;\n"
  552. "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
  553. ";;\n"
  554. "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
  555. DEFINE_FUNC1(thash, unsigned long,
  556. "break " __stringify(HYPERPRIVOP_THASH) "\n");
  557. DEFINE_FUNC1(get_cpuid, int,
  558. "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
  559. DEFINE_FUNC1(get_pmd, int,
  560. "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
  561. DEFINE_FUNC1(get_rr, unsigned long,
  562. "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
  563. /*
  564. * void xen_privop_ssm_i(void)
  565. *
  566. * int masked = !xen_get_virtual_psr_i();
  567. * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
  568. * xen_set_virtual_psr_i(1)
  569. * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
  570. * // compiler barrier
  571. * if (masked) {
  572. * uint8_t* pend_int_addr =
  573. * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
  574. * uint8_t pending = *pend_int_addr;
  575. * if (pending)
  576. * XEN_HYPER_SSM_I
  577. * }
  578. */
  579. /* 4 bundles */
  580. DEFINE_VOID_FUNC0(ssm_i,
  581. "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
  582. ";;\n"
  583. "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
  584. ";;\n"
  585. "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
  586. ";;\n"
  587. "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
  588. * r8 = XEN_PSR_I_ADDR - 1
  589. * = pend_int_addr
  590. */
  591. "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
  592. * previously interrupt
  593. * masked?
  594. */
  595. ";;\n"
  596. "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
  597. ";;\n"
  598. "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
  599. ";;\n"
  600. /* issue hypercall to get interrupt */
  601. "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
  602. ";;\n");
  603. /*
  604. * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
  605. * = XEN_PSR_I_ADDR_ADDR;
  606. * psr_i_addr = *psr_i_addr_addr;
  607. * *psr_i_addr = 1;
  608. */
  609. /* 2 bundles */
  610. DEFINE_VOID_FUNC0(rsm_i,
  611. "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
  612. /* r8 = XEN_PSR_I_ADDR */
  613. "mov r9 = 1\n"
  614. ";;\n"
  615. "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
  616. ";;\n"
  617. "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
  618. extern void
  619. xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
  620. unsigned long val2, unsigned long val3,
  621. unsigned long val4);
  622. __DEFINE_FUNC(set_rr0_to_rr4,
  623. "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
  624. extern unsigned long xen_getreg(int regnum);
  625. #define __DEFINE_GET_REG(id, privop) \
  626. "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
  627. ";;\n" \
  628. "cmp.eq p6, p0 = r2, r8\n" \
  629. ";;\n" \
  630. "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
  631. "(p6) br.cond.sptk.many b6\n" \
  632. ";;\n"
  633. __DEFINE_FUNC(getreg,
  634. __DEFINE_GET_REG(PSR, PSR)
  635. #ifdef CONFIG_IA32_SUPPORT
  636. __DEFINE_GET_REG(AR_EFLAG, EFLAG)
  637. #endif
  638. /* get_itc */
  639. "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
  640. ";;\n"
  641. "cmp.eq p6, p0 = r2, r8\n"
  642. ";;\n"
  643. "(p6) br.cond.spnt xen_get_itc\n"
  644. ";;\n"
  645. /* get itm */
  646. "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
  647. ";;\n"
  648. "cmp.eq p6, p0 = r2, r8\n"
  649. ";;\n"
  650. "(p6) br.cond.spnt xen_get_itm_with_offset\n"
  651. ";;\n"
  652. __DEFINE_GET_REG(CR_IVR, IVR)
  653. __DEFINE_GET_REG(CR_TPR, TPR)
  654. /* fall back */
  655. "movl r2 = ia64_native_getreg_func\n"
  656. ";;\n"
  657. "mov b7 = r2\n"
  658. ";;\n"
  659. "br.cond.sptk.many b7\n");
  660. extern void xen_setreg(int regnum, unsigned long val);
  661. #define __DEFINE_SET_REG(id, privop) \
  662. "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
  663. ";;\n" \
  664. "cmp.eq p6, p0 = r2, r9\n" \
  665. ";;\n" \
  666. "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
  667. "(p6) br.cond.sptk.many b6\n" \
  668. ";;\n"
  669. __DEFINE_FUNC(setreg,
  670. /* kr0 .. kr 7*/
  671. /*
  672. * if (_IA64_REG_AR_KR0 <= regnum &&
  673. * regnum <= _IA64_REG_AR_KR7) {
  674. * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
  675. * register __val asm ("r9") = val
  676. * "break HYPERPRIVOP_SET_KR"
  677. * }
  678. */
  679. "mov r17 = r9\n"
  680. "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
  681. ";;\n"
  682. "cmp.ge p6, p0 = r9, r2\n"
  683. "sub r17 = r17, r2\n"
  684. ";;\n"
  685. "(p6) cmp.ge.unc p7, p0 = "
  686. __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
  687. ", r17\n"
  688. ";;\n"
  689. "(p7) mov r9 = r8\n"
  690. ";;\n"
  691. "(p7) mov r8 = r17\n"
  692. "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
  693. /* set itm */
  694. "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
  695. ";;\n"
  696. "cmp.eq p6, p0 = r2, r8\n"
  697. ";;\n"
  698. "(p6) br.cond.spnt xen_set_itm_with_offset\n"
  699. /* set itc */
  700. "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
  701. ";;\n"
  702. "cmp.eq p6, p0 = r2, r8\n"
  703. ";;\n"
  704. "(p6) br.cond.spnt xen_set_itc\n"
  705. #ifdef CONFIG_IA32_SUPPORT
  706. __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
  707. #endif
  708. __DEFINE_SET_REG(CR_TPR, SET_TPR)
  709. __DEFINE_SET_REG(CR_EOI, EOI)
  710. /* fall back */
  711. "movl r2 = ia64_native_setreg_func\n"
  712. ";;\n"
  713. "mov b7 = r2\n"
  714. ";;\n"
  715. "br.cond.sptk.many b7\n");
  716. #endif
  717. static const struct pv_cpu_ops xen_cpu_ops __initconst = {
  718. .fc = xen_fc,
  719. .thash = xen_thash,
  720. .get_cpuid = xen_get_cpuid,
  721. .get_pmd = xen_get_pmd,
  722. .getreg = xen_getreg,
  723. .setreg = xen_setreg,
  724. .ptcga = xen_ptcga,
  725. .get_rr = xen_get_rr,
  726. .set_rr = xen_set_rr,
  727. .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
  728. .ssm_i = xen_ssm_i,
  729. .rsm_i = xen_rsm_i,
  730. .get_psr_i = xen_get_psr_i,
  731. .intrin_local_irq_restore
  732. = xen_intrin_local_irq_restore,
  733. };
  734. /******************************************************************************
  735. * replacement of hand written assembly codes.
  736. */
  737. extern char xen_switch_to;
  738. extern char xen_leave_syscall;
  739. extern char xen_work_processed_syscall;
  740. extern char xen_leave_kernel;
  741. const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
  742. .switch_to = (unsigned long)&xen_switch_to,
  743. .leave_syscall = (unsigned long)&xen_leave_syscall,
  744. .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
  745. .leave_kernel = (unsigned long)&xen_leave_kernel,
  746. };
  747. /***************************************************************************
  748. * pv_iosapic_ops
  749. * iosapic read/write hooks.
  750. */
  751. static void
  752. xen_pcat_compat_init(void)
  753. {
  754. /* nothing */
  755. }
  756. static struct irq_chip*
  757. xen_iosapic_get_irq_chip(unsigned long trigger)
  758. {
  759. return NULL;
  760. }
  761. static unsigned int
  762. xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
  763. {
  764. struct physdev_apic apic_op;
  765. int ret;
  766. apic_op.apic_physbase = (unsigned long)iosapic -
  767. __IA64_UNCACHED_OFFSET;
  768. apic_op.reg = reg;
  769. ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
  770. if (ret)
  771. return ret;
  772. return apic_op.value;
  773. }
  774. static void
  775. xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
  776. {
  777. struct physdev_apic apic_op;
  778. apic_op.apic_physbase = (unsigned long)iosapic -
  779. __IA64_UNCACHED_OFFSET;
  780. apic_op.reg = reg;
  781. apic_op.value = val;
  782. HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
  783. }
  784. static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
  785. .pcat_compat_init = xen_pcat_compat_init,
  786. .__get_irq_chip = xen_iosapic_get_irq_chip,
  787. .__read = xen_iosapic_read,
  788. .__write = xen_iosapic_write,
  789. };
  790. /***************************************************************************
  791. * pv_ops initialization
  792. */
  793. void __init
  794. xen_setup_pv_ops(void)
  795. {
  796. xen_info_init();
  797. pv_info = xen_info;
  798. pv_init_ops = xen_init_ops;
  799. pv_fsys_data = xen_fsys_data;
  800. pv_patchdata = xen_patchdata;
  801. pv_cpu_ops = xen_cpu_ops;
  802. pv_iosapic_ops = xen_iosapic_ops;
  803. pv_irq_ops = xen_irq_ops;
  804. pv_time_ops = xen_time_ops;
  805. paravirt_cpu_asm_init(&xen_cpu_asm_switch);
  806. }
  807. #ifdef ASM_SUPPORTED
  808. /***************************************************************************
  809. * binary pacthing
  810. * pv_init_ops.patch_bundle
  811. */
  812. #define DEFINE_FUNC_GETREG(name, privop) \
  813. DEFINE_FUNC0(get_ ## name, \
  814. "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
  815. DEFINE_FUNC_GETREG(psr, PSR);
  816. DEFINE_FUNC_GETREG(eflag, EFLAG);
  817. DEFINE_FUNC_GETREG(ivr, IVR);
  818. DEFINE_FUNC_GETREG(tpr, TPR);
  819. #define DEFINE_FUNC_SET_KR(n) \
  820. DEFINE_VOID_FUNC0(set_kr ## n, \
  821. ";;\n" \
  822. "mov r9 = r8\n" \
  823. "mov r8 = " #n "\n" \
  824. "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
  825. DEFINE_FUNC_SET_KR(0);
  826. DEFINE_FUNC_SET_KR(1);
  827. DEFINE_FUNC_SET_KR(2);
  828. DEFINE_FUNC_SET_KR(3);
  829. DEFINE_FUNC_SET_KR(4);
  830. DEFINE_FUNC_SET_KR(5);
  831. DEFINE_FUNC_SET_KR(6);
  832. DEFINE_FUNC_SET_KR(7);
  833. #define __DEFINE_FUNC_SETREG(name, privop) \
  834. DEFINE_VOID_FUNC0(name, \
  835. "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
  836. #define DEFINE_FUNC_SETREG(name, privop) \
  837. __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
  838. DEFINE_FUNC_SETREG(eflag, EFLAG);
  839. DEFINE_FUNC_SETREG(tpr, TPR);
  840. __DEFINE_FUNC_SETREG(eoi, EOI);
  841. extern const char xen_check_events[];
  842. extern const char __xen_intrin_local_irq_restore_direct_start[];
  843. extern const char __xen_intrin_local_irq_restore_direct_end[];
  844. extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
  845. asm (
  846. ".align 32\n"
  847. ".proc xen_check_events\n"
  848. "xen_check_events:\n"
  849. /* masked = 0
  850. * r9 = masked_addr - 1
  851. * = pending_intr_addr
  852. */
  853. "st1.rel [r9] = r0, -1\n"
  854. ";;\n"
  855. /* r8 = pending_intr */
  856. "ld1.acq r11 = [r9]\n"
  857. ";;\n"
  858. /* p9 = interrupt pending? */
  859. "cmp.ne p9, p10 = r11, r0\n"
  860. ";;\n"
  861. "(p10) mf\n"
  862. /* issue hypercall to trigger interrupt */
  863. "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
  864. "br.cond.sptk.many b6\n"
  865. ".endp xen_check_events\n"
  866. "\n"
  867. ".align 32\n"
  868. ".proc __xen_intrin_local_irq_restore_direct\n"
  869. "__xen_intrin_local_irq_restore_direct:\n"
  870. "__xen_intrin_local_irq_restore_direct_start:\n"
  871. "1:\n"
  872. "{\n"
  873. "cmp.ne p6, p7 = r8, r0\n"
  874. "mov r17 = ip\n" /* get ip to calc return address */
  875. "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
  876. ";;\n"
  877. "}\n"
  878. "{\n"
  879. /* r9 = XEN_PSR_I_ADDR */
  880. "ld8 r9 = [r9]\n"
  881. ";;\n"
  882. /* r10 = masked previous value */
  883. "(p6) ld1.acq r10 = [r9]\n"
  884. "adds r17 = 1f - 1b, r17\n" /* calculate return address */
  885. ";;\n"
  886. "}\n"
  887. "{\n"
  888. /* p8 = !masked interrupt masked previously? */
  889. "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
  890. "\n"
  891. /* p7 = else clause */
  892. "(p7) mov r11 = 1\n"
  893. ";;\n"
  894. "(p8) mov b6 = r17\n" /* set return address */
  895. "}\n"
  896. "{\n"
  897. /* masked = 1 */
  898. "(p7) st1.rel [r9] = r11\n"
  899. "\n"
  900. "[99:]\n"
  901. "(p8) brl.cond.dptk.few xen_check_events\n"
  902. "}\n"
  903. /* pv calling stub is 5 bundles. fill nop to adjust return address */
  904. "{\n"
  905. "nop 0\n"
  906. "nop 0\n"
  907. "nop 0\n"
  908. "}\n"
  909. "1:\n"
  910. "__xen_intrin_local_irq_restore_direct_end:\n"
  911. ".endp __xen_intrin_local_irq_restore_direct\n"
  912. "\n"
  913. ".align 8\n"
  914. "__xen_intrin_local_irq_restore_direct_reloc:\n"
  915. "data8 99b\n"
  916. );
  917. static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
  918. __initdata_or_module =
  919. {
  920. #define XEN_PATCH_BUNDLE_ELEM(name, type) \
  921. { \
  922. (void*)xen_ ## name ## _direct_start, \
  923. (void*)xen_ ## name ## _direct_end, \
  924. PARAVIRT_PATCH_TYPE_ ## type, \
  925. }
  926. XEN_PATCH_BUNDLE_ELEM(fc, FC),
  927. XEN_PATCH_BUNDLE_ELEM(thash, THASH),
  928. XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
  929. XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
  930. XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
  931. XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
  932. XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
  933. XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
  934. XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
  935. XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
  936. XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
  937. {
  938. (void*)__xen_intrin_local_irq_restore_direct_start,
  939. (void*)__xen_intrin_local_irq_restore_direct_end,
  940. PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
  941. },
  942. #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
  943. { \
  944. xen_get_ ## name ## _direct_start, \
  945. xen_get_ ## name ## _direct_end, \
  946. PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
  947. }
  948. XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
  949. XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
  950. XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
  951. XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
  952. XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
  953. XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
  954. #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
  955. { \
  956. xen_ ## name ## _direct_start, \
  957. xen_ ## name ## _direct_end, \
  958. PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
  959. }
  960. #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
  961. __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
  962. XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
  963. XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
  964. XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
  965. XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
  966. XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
  967. XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
  968. XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
  969. XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
  970. XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
  971. XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
  972. __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
  973. XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
  974. XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
  975. };
  976. static unsigned long __init_or_module
  977. xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
  978. {
  979. const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
  980. sizeof(xen_patch_bundle_elems[0]);
  981. unsigned long used;
  982. const struct paravirt_patch_bundle_elem *found;
  983. used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
  984. xen_patch_bundle_elems, nelems,
  985. &found);
  986. if (found == NULL)
  987. /* fallback */
  988. return ia64_native_patch_bundle(sbundle, ebundle, type);
  989. if (used == 0)
  990. return used;
  991. /* relocation */
  992. switch (type) {
  993. case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
  994. unsigned long reloc =
  995. __xen_intrin_local_irq_restore_direct_reloc;
  996. unsigned long reloc_offset = reloc - (unsigned long)
  997. __xen_intrin_local_irq_restore_direct_start;
  998. unsigned long tag = (unsigned long)sbundle + reloc_offset;
  999. paravirt_patch_reloc_brl(tag, xen_check_events);
  1000. break;
  1001. }
  1002. default:
  1003. /* nothing */
  1004. break;
  1005. }
  1006. return used;
  1007. }
  1008. #endif /* ASM_SUPPOTED */
  1009. const struct paravirt_patch_branch_target xen_branch_target[]
  1010. __initconst = {
  1011. #define PARAVIRT_BR_TARGET(name, type) \
  1012. { \
  1013. &xen_ ## name, \
  1014. PARAVIRT_PATCH_TYPE_BR_ ## type, \
  1015. }
  1016. PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
  1017. PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
  1018. PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
  1019. PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
  1020. };
  1021. static void __init
  1022. xen_patch_branch(unsigned long tag, unsigned long type)
  1023. {
  1024. const unsigned long nelem =
  1025. sizeof(xen_branch_target) / sizeof(xen_branch_target[0]);
  1026. __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
  1027. }