kvm_fw.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * PAL/SAL call delegation
  3. *
  4. * Copyright (c) 2004 Li Susie <susie.li@intel.com>
  5. * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
  6. * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  19. * Place - Suite 330, Boston, MA 02111-1307 USA.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/smp.h>
  23. #include "vti.h"
  24. #include "misc.h"
  25. #include <asm/pal.h>
  26. #include <asm/sal.h>
  27. #include <asm/tlb.h>
  28. /*
  29. * Handy macros to make sure that the PAL return values start out
  30. * as something meaningful.
  31. */
  32. #define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
  33. { \
  34. x.status = PAL_STATUS_UNIMPLEMENTED; \
  35. x.v0 = 0; \
  36. x.v1 = 0; \
  37. x.v2 = 0; \
  38. }
  39. #define INIT_PAL_STATUS_SUCCESS(x) \
  40. { \
  41. x.status = PAL_STATUS_SUCCESS; \
  42. x.v0 = 0; \
  43. x.v1 = 0; \
  44. x.v2 = 0; \
  45. }
  46. static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
  47. u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
  48. struct exit_ctl_data *p;
  49. if (vcpu) {
  50. p = &vcpu->arch.exit_data;
  51. if (p->exit_reason == EXIT_REASON_PAL_CALL) {
  52. *gr28 = p->u.pal_data.gr28;
  53. *gr29 = p->u.pal_data.gr29;
  54. *gr30 = p->u.pal_data.gr30;
  55. *gr31 = p->u.pal_data.gr31;
  56. return ;
  57. }
  58. }
  59. printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
  60. }
  61. static void set_pal_result(struct kvm_vcpu *vcpu,
  62. struct ia64_pal_retval result) {
  63. struct exit_ctl_data *p;
  64. p = kvm_get_exit_data(vcpu);
  65. if (p && p->exit_reason == EXIT_REASON_PAL_CALL) {
  66. p->u.pal_data.ret = result;
  67. return ;
  68. }
  69. INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
  70. }
  71. static void set_sal_result(struct kvm_vcpu *vcpu,
  72. struct sal_ret_values result) {
  73. struct exit_ctl_data *p;
  74. p = kvm_get_exit_data(vcpu);
  75. if (p && p->exit_reason == EXIT_REASON_SAL_CALL) {
  76. p->u.sal_data.ret = result;
  77. return ;
  78. }
  79. printk(KERN_WARNING"Failed to set sal result!!\n");
  80. }
  81. struct cache_flush_args {
  82. u64 cache_type;
  83. u64 operation;
  84. u64 progress;
  85. long status;
  86. };
  87. cpumask_t cpu_cache_coherent_map;
  88. static void remote_pal_cache_flush(void *data)
  89. {
  90. struct cache_flush_args *args = data;
  91. long status;
  92. u64 progress = args->progress;
  93. status = ia64_pal_cache_flush(args->cache_type, args->operation,
  94. &progress, NULL);
  95. if (status != 0)
  96. args->status = status;
  97. }
  98. static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
  99. {
  100. u64 gr28, gr29, gr30, gr31;
  101. struct ia64_pal_retval result = {0, 0, 0, 0};
  102. struct cache_flush_args args = {0, 0, 0, 0};
  103. long psr;
  104. gr28 = gr29 = gr30 = gr31 = 0;
  105. kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
  106. if (gr31 != 0)
  107. printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
  108. /* Always call Host Pal in int=1 */
  109. gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
  110. args.cache_type = gr29;
  111. args.operation = gr30;
  112. smp_call_function(remote_pal_cache_flush,
  113. (void *)&args, 1);
  114. if (args.status != 0)
  115. printk(KERN_ERR"pal_cache_flush error!,"
  116. "status:0x%lx\n", args.status);
  117. /*
  118. * Call Host PAL cache flush
  119. * Clear psr.ic when call PAL_CACHE_FLUSH
  120. */
  121. local_irq_save(psr);
  122. result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
  123. &result.v0);
  124. local_irq_restore(psr);
  125. if (result.status != 0)
  126. printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
  127. "in1:%lx,in2:%lx\n",
  128. vcpu, result.status, gr29, gr30);
  129. #if 0
  130. if (gr29 == PAL_CACHE_TYPE_COHERENT) {
  131. cpus_setall(vcpu->arch.cache_coherent_map);
  132. cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
  133. cpus_setall(cpu_cache_coherent_map);
  134. cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
  135. }
  136. #endif
  137. return result;
  138. }
  139. struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
  140. {
  141. struct ia64_pal_retval result;
  142. PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
  143. return result;
  144. }
  145. static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
  146. {
  147. struct ia64_pal_retval result;
  148. PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
  149. /*
  150. * PAL_FREQ_BASE may not be implemented in some platforms,
  151. * call SAL instead.
  152. */
  153. if (result.v0 == 0) {
  154. result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
  155. &result.v0,
  156. &result.v1);
  157. result.v2 = 0;
  158. }
  159. return result;
  160. }
  161. static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
  162. {
  163. struct ia64_pal_retval result;
  164. PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
  165. return result;
  166. }
  167. static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
  168. {
  169. struct ia64_pal_retval result;
  170. INIT_PAL_STATUS_UNIMPLEMENTED(result);
  171. return result;
  172. }
  173. static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
  174. {
  175. struct ia64_pal_retval result;
  176. INIT_PAL_STATUS_SUCCESS(result);
  177. return result;
  178. }
  179. static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
  180. {
  181. struct ia64_pal_retval result = {0, 0, 0, 0};
  182. long in0, in1, in2, in3;
  183. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  184. result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
  185. &result.v2, in2);
  186. return result;
  187. }
  188. static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
  189. {
  190. struct ia64_pal_retval result = {0, 0, 0, 0};
  191. long in0, in1, in2, in3;
  192. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  193. result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
  194. return result;
  195. }
  196. static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
  197. {
  198. pal_cache_config_info_t ci;
  199. long status;
  200. unsigned long in0, in1, in2, in3, r9, r10;
  201. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  202. status = ia64_pal_cache_config_info(in1, in2, &ci);
  203. r9 = ci.pcci_info_1.pcci1_data;
  204. r10 = ci.pcci_info_2.pcci2_data;
  205. return ((struct ia64_pal_retval){status, r9, r10, 0});
  206. }
  207. #define GUEST_IMPL_VA_MSB 59
  208. #define GUEST_RID_BITS 18
  209. static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
  210. {
  211. pal_vm_info_1_u_t vminfo1;
  212. pal_vm_info_2_u_t vminfo2;
  213. struct ia64_pal_retval result;
  214. PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
  215. if (!result.status) {
  216. vminfo1.pvi1_val = result.v0;
  217. vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
  218. vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
  219. result.v0 = vminfo1.pvi1_val;
  220. vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
  221. vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
  222. result.v1 = vminfo2.pvi2_val;
  223. }
  224. return result;
  225. }
  226. static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
  227. {
  228. struct ia64_pal_retval result;
  229. unsigned long in0, in1, in2, in3;
  230. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  231. result.status = ia64_pal_vm_info(in1, in2,
  232. (pal_tc_info_u_t *)&result.v1, &result.v2);
  233. return result;
  234. }
  235. static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
  236. {
  237. u64 index = 0;
  238. struct exit_ctl_data *p;
  239. p = kvm_get_exit_data(vcpu);
  240. if (p && (p->exit_reason == EXIT_REASON_PAL_CALL))
  241. index = p->u.pal_data.gr28;
  242. return index;
  243. }
  244. static void prepare_for_halt(struct kvm_vcpu *vcpu)
  245. {
  246. vcpu->arch.timer_pending = 1;
  247. vcpu->arch.timer_fired = 0;
  248. }
  249. static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
  250. {
  251. long status;
  252. unsigned long in0, in1, in2, in3, r9;
  253. unsigned long pm_buffer[16];
  254. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  255. status = ia64_pal_perf_mon_info(pm_buffer,
  256. (pal_perf_mon_info_u_t *) &r9);
  257. if (status != 0) {
  258. printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
  259. } else {
  260. if (in1)
  261. memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
  262. else {
  263. status = PAL_STATUS_EINVAL;
  264. printk(KERN_WARNING"Invalid parameters "
  265. "for PAL call:0x%lx!\n", in0);
  266. }
  267. }
  268. return (struct ia64_pal_retval){status, r9, 0, 0};
  269. }
  270. static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
  271. {
  272. unsigned long in0, in1, in2, in3;
  273. long status;
  274. unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
  275. | (1UL << 61) | (1UL << 60);
  276. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  277. if (in1) {
  278. memcpy((void *)in1, &res, sizeof(res));
  279. status = 0;
  280. } else{
  281. status = PAL_STATUS_EINVAL;
  282. printk(KERN_WARNING"Invalid parameters "
  283. "for PAL call:0x%lx!\n", in0);
  284. }
  285. return (struct ia64_pal_retval){status, 0, 0, 0};
  286. }
  287. static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
  288. {
  289. unsigned long r9;
  290. long status;
  291. status = ia64_pal_mem_attrib(&r9);
  292. return (struct ia64_pal_retval){status, r9, 0, 0};
  293. }
  294. static void remote_pal_prefetch_visibility(void *v)
  295. {
  296. s64 trans_type = (s64)v;
  297. ia64_pal_prefetch_visibility(trans_type);
  298. }
  299. static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
  300. {
  301. struct ia64_pal_retval result = {0, 0, 0, 0};
  302. unsigned long in0, in1, in2, in3;
  303. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  304. result.status = ia64_pal_prefetch_visibility(in1);
  305. if (result.status == 0) {
  306. /* Must be performed on all remote processors
  307. in the coherence domain. */
  308. smp_call_function(remote_pal_prefetch_visibility,
  309. (void *)in1, 1);
  310. /* Unnecessary on remote processor for other vcpus!*/
  311. result.status = 1;
  312. }
  313. return result;
  314. }
  315. static void remote_pal_mc_drain(void *v)
  316. {
  317. ia64_pal_mc_drain();
  318. }
  319. static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
  320. {
  321. struct ia64_pal_retval result = {0, 0, 0, 0};
  322. unsigned long in0, in1, in2, in3;
  323. kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
  324. if (in1 == 0 && in2) {
  325. char brand_info[128];
  326. result.status = ia64_pal_get_brand_info(brand_info);
  327. if (result.status == PAL_STATUS_SUCCESS)
  328. memcpy((void *)in2, brand_info, 128);
  329. } else {
  330. result.status = PAL_STATUS_REQUIRES_MEMORY;
  331. printk(KERN_WARNING"Invalid parameters for "
  332. "PAL call:0x%lx!\n", in0);
  333. }
  334. return result;
  335. }
  336. int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
  337. {
  338. u64 gr28;
  339. struct ia64_pal_retval result;
  340. int ret = 1;
  341. gr28 = kvm_get_pal_call_index(vcpu);
  342. switch (gr28) {
  343. case PAL_CACHE_FLUSH:
  344. result = pal_cache_flush(vcpu);
  345. break;
  346. case PAL_MEM_ATTRIB:
  347. result = pal_mem_attrib(vcpu);
  348. break;
  349. case PAL_CACHE_SUMMARY:
  350. result = pal_cache_summary(vcpu);
  351. break;
  352. case PAL_PERF_MON_INFO:
  353. result = pal_perf_mon_info(vcpu);
  354. break;
  355. case PAL_HALT_INFO:
  356. result = pal_halt_info(vcpu);
  357. break;
  358. case PAL_HALT_LIGHT:
  359. {
  360. INIT_PAL_STATUS_SUCCESS(result);
  361. prepare_for_halt(vcpu);
  362. if (kvm_highest_pending_irq(vcpu) == -1)
  363. ret = kvm_emulate_halt(vcpu);
  364. }
  365. break;
  366. case PAL_PREFETCH_VISIBILITY:
  367. result = pal_prefetch_visibility(vcpu);
  368. break;
  369. case PAL_MC_DRAIN:
  370. result.status = ia64_pal_mc_drain();
  371. /* FIXME: All vcpus likely call PAL_MC_DRAIN.
  372. That causes the congestion. */
  373. smp_call_function(remote_pal_mc_drain, NULL, 1);
  374. break;
  375. case PAL_FREQ_RATIOS:
  376. result = pal_freq_ratios(vcpu);
  377. break;
  378. case PAL_FREQ_BASE:
  379. result = pal_freq_base(vcpu);
  380. break;
  381. case PAL_LOGICAL_TO_PHYSICAL :
  382. result = pal_logical_to_physica(vcpu);
  383. break;
  384. case PAL_VM_SUMMARY :
  385. result = pal_vm_summary(vcpu);
  386. break;
  387. case PAL_VM_INFO :
  388. result = pal_vm_info(vcpu);
  389. break;
  390. case PAL_PLATFORM_ADDR :
  391. result = pal_platform_addr(vcpu);
  392. break;
  393. case PAL_CACHE_INFO:
  394. result = pal_cache_info(vcpu);
  395. break;
  396. case PAL_PTCE_INFO:
  397. INIT_PAL_STATUS_SUCCESS(result);
  398. result.v1 = (1L << 32) | 1L;
  399. break;
  400. case PAL_REGISTER_INFO:
  401. result = pal_register_info(vcpu);
  402. break;
  403. case PAL_VM_PAGE_SIZE:
  404. result.status = ia64_pal_vm_page_size(&result.v0,
  405. &result.v1);
  406. break;
  407. case PAL_RSE_INFO:
  408. result.status = ia64_pal_rse_info(&result.v0,
  409. (pal_hints_u_t *)&result.v1);
  410. break;
  411. case PAL_PROC_GET_FEATURES:
  412. result = pal_proc_get_features(vcpu);
  413. break;
  414. case PAL_DEBUG_INFO:
  415. result.status = ia64_pal_debug_info(&result.v0,
  416. &result.v1);
  417. break;
  418. case PAL_VERSION:
  419. result.status = ia64_pal_version(
  420. (pal_version_u_t *)&result.v0,
  421. (pal_version_u_t *)&result.v1);
  422. break;
  423. case PAL_FIXED_ADDR:
  424. result.status = PAL_STATUS_SUCCESS;
  425. result.v0 = vcpu->vcpu_id;
  426. break;
  427. case PAL_BRAND_INFO:
  428. result = pal_get_brand_info(vcpu);
  429. break;
  430. case PAL_GET_PSTATE:
  431. case PAL_CACHE_SHARED_INFO:
  432. INIT_PAL_STATUS_UNIMPLEMENTED(result);
  433. break;
  434. default:
  435. INIT_PAL_STATUS_UNIMPLEMENTED(result);
  436. printk(KERN_WARNING"kvm: Unsupported pal call,"
  437. " index:0x%lx\n", gr28);
  438. }
  439. set_pal_result(vcpu, result);
  440. return ret;
  441. }
  442. static struct sal_ret_values sal_emulator(struct kvm *kvm,
  443. long index, unsigned long in1,
  444. unsigned long in2, unsigned long in3,
  445. unsigned long in4, unsigned long in5,
  446. unsigned long in6, unsigned long in7)
  447. {
  448. unsigned long r9 = 0;
  449. unsigned long r10 = 0;
  450. long r11 = 0;
  451. long status;
  452. status = 0;
  453. switch (index) {
  454. case SAL_FREQ_BASE:
  455. status = ia64_sal_freq_base(in1, &r9, &r10);
  456. break;
  457. case SAL_PCI_CONFIG_READ:
  458. printk(KERN_WARNING"kvm: Not allowed to call here!"
  459. " SAL_PCI_CONFIG_READ\n");
  460. break;
  461. case SAL_PCI_CONFIG_WRITE:
  462. printk(KERN_WARNING"kvm: Not allowed to call here!"
  463. " SAL_PCI_CONFIG_WRITE\n");
  464. break;
  465. case SAL_SET_VECTORS:
  466. if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
  467. if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
  468. status = -2;
  469. } else {
  470. kvm->arch.rdv_sal_data.boot_ip = in2;
  471. kvm->arch.rdv_sal_data.boot_gp = in3;
  472. }
  473. printk("Rendvous called! iip:%lx\n\n", in2);
  474. } else
  475. printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
  476. "ignored...\n", in1);
  477. break;
  478. case SAL_GET_STATE_INFO:
  479. /* No more info. */
  480. status = -5;
  481. r9 = 0;
  482. break;
  483. case SAL_GET_STATE_INFO_SIZE:
  484. /* Return a dummy size. */
  485. status = 0;
  486. r9 = 128;
  487. break;
  488. case SAL_CLEAR_STATE_INFO:
  489. /* Noop. */
  490. break;
  491. case SAL_MC_RENDEZ:
  492. printk(KERN_WARNING
  493. "kvm: called SAL_MC_RENDEZ. ignored...\n");
  494. break;
  495. case SAL_MC_SET_PARAMS:
  496. printk(KERN_WARNING
  497. "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
  498. break;
  499. case SAL_CACHE_FLUSH:
  500. if (1) {
  501. /*Flush using SAL.
  502. This method is faster but has a side
  503. effect on other vcpu running on
  504. this cpu. */
  505. status = ia64_sal_cache_flush(in1);
  506. } else {
  507. /*Maybe need to implement the method
  508. without side effect!*/
  509. status = 0;
  510. }
  511. break;
  512. case SAL_CACHE_INIT:
  513. printk(KERN_WARNING
  514. "kvm: called SAL_CACHE_INIT. ignored...\n");
  515. break;
  516. case SAL_UPDATE_PAL:
  517. printk(KERN_WARNING
  518. "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
  519. break;
  520. default:
  521. printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
  522. " index:%ld\n", index);
  523. status = -1;
  524. break;
  525. }
  526. return ((struct sal_ret_values) {status, r9, r10, r11});
  527. }
  528. static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
  529. u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
  530. struct exit_ctl_data *p;
  531. p = kvm_get_exit_data(vcpu);
  532. if (p) {
  533. if (p->exit_reason == EXIT_REASON_SAL_CALL) {
  534. *in0 = p->u.sal_data.in0;
  535. *in1 = p->u.sal_data.in1;
  536. *in2 = p->u.sal_data.in2;
  537. *in3 = p->u.sal_data.in3;
  538. *in4 = p->u.sal_data.in4;
  539. *in5 = p->u.sal_data.in5;
  540. *in6 = p->u.sal_data.in6;
  541. *in7 = p->u.sal_data.in7;
  542. return ;
  543. }
  544. }
  545. *in0 = 0;
  546. }
  547. void kvm_sal_emul(struct kvm_vcpu *vcpu)
  548. {
  549. struct sal_ret_values result;
  550. u64 index, in1, in2, in3, in4, in5, in6, in7;
  551. kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
  552. &in3, &in4, &in5, &in6, &in7);
  553. result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
  554. in4, in5, in6, in7);
  555. set_sal_result(vcpu, result);
  556. }