emulate.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #include <linux/jiffies.h>
  20. #include <linux/timer.h>
  21. #include <linux/types.h>
  22. #include <linux/string.h>
  23. #include <linux/kvm_host.h>
  24. #include <asm/dcr.h>
  25. #include <asm/dcr-regs.h>
  26. #include <asm/time.h>
  27. #include <asm/byteorder.h>
  28. #include <asm/kvm_ppc.h>
  29. /* Instruction decoding */
  30. static inline unsigned int get_op(u32 inst)
  31. {
  32. return inst >> 26;
  33. }
  34. static inline unsigned int get_xop(u32 inst)
  35. {
  36. return (inst >> 1) & 0x3ff;
  37. }
  38. static inline unsigned int get_sprn(u32 inst)
  39. {
  40. return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
  41. }
  42. static inline unsigned int get_dcrn(u32 inst)
  43. {
  44. return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
  45. }
  46. static inline unsigned int get_rt(u32 inst)
  47. {
  48. return (inst >> 21) & 0x1f;
  49. }
  50. static inline unsigned int get_rs(u32 inst)
  51. {
  52. return (inst >> 21) & 0x1f;
  53. }
  54. static inline unsigned int get_ra(u32 inst)
  55. {
  56. return (inst >> 16) & 0x1f;
  57. }
  58. static inline unsigned int get_rb(u32 inst)
  59. {
  60. return (inst >> 11) & 0x1f;
  61. }
  62. static inline unsigned int get_rc(u32 inst)
  63. {
  64. return inst & 0x1;
  65. }
  66. static inline unsigned int get_ws(u32 inst)
  67. {
  68. return (inst >> 11) & 0x1f;
  69. }
  70. static inline unsigned int get_d(u32 inst)
  71. {
  72. return inst & 0xffff;
  73. }
  74. static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
  75. {
  76. if (vcpu->arch.tcr & TCR_DIE) {
  77. /* The decrementer ticks at the same rate as the timebase, so
  78. * that's how we convert the guest DEC value to the number of
  79. * host ticks. */
  80. unsigned long nr_jiffies;
  81. nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
  82. mod_timer(&vcpu->arch.dec_timer,
  83. get_jiffies_64() + nr_jiffies);
  84. } else {
  85. del_timer(&vcpu->arch.dec_timer);
  86. }
  87. }
  88. static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
  89. {
  90. vcpu->arch.pc = vcpu->arch.srr0;
  91. kvmppc_set_msr(vcpu, vcpu->arch.srr1);
  92. }
  93. /* XXX to do:
  94. * lhax
  95. * lhaux
  96. * lswx
  97. * lswi
  98. * stswx
  99. * stswi
  100. * lha
  101. * lhau
  102. * lmw
  103. * stmw
  104. *
  105. * XXX is_bigendian should depend on MMU mapping or MSR[LE]
  106. */
  107. int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
  108. {
  109. u32 inst = vcpu->arch.last_inst;
  110. u32 ea;
  111. int ra;
  112. int rb;
  113. int rc;
  114. int rs;
  115. int rt;
  116. int ws;
  117. int sprn;
  118. int dcrn;
  119. enum emulation_result emulated = EMULATE_DONE;
  120. int advance = 1;
  121. switch (get_op(inst)) {
  122. case 3: /* trap */
  123. printk("trap!\n");
  124. kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
  125. advance = 0;
  126. break;
  127. case 19:
  128. switch (get_xop(inst)) {
  129. case 50: /* rfi */
  130. kvmppc_emul_rfi(vcpu);
  131. advance = 0;
  132. break;
  133. default:
  134. emulated = EMULATE_FAIL;
  135. break;
  136. }
  137. break;
  138. case 31:
  139. switch (get_xop(inst)) {
  140. case 23: /* lwzx */
  141. rt = get_rt(inst);
  142. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  143. break;
  144. case 83: /* mfmsr */
  145. rt = get_rt(inst);
  146. vcpu->arch.gpr[rt] = vcpu->arch.msr;
  147. break;
  148. case 87: /* lbzx */
  149. rt = get_rt(inst);
  150. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  151. break;
  152. case 131: /* wrtee */
  153. rs = get_rs(inst);
  154. vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
  155. | (vcpu->arch.gpr[rs] & MSR_EE);
  156. break;
  157. case 146: /* mtmsr */
  158. rs = get_rs(inst);
  159. kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
  160. break;
  161. case 151: /* stwx */
  162. rs = get_rs(inst);
  163. emulated = kvmppc_handle_store(run, vcpu,
  164. vcpu->arch.gpr[rs],
  165. 4, 1);
  166. break;
  167. case 163: /* wrteei */
  168. vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
  169. | (inst & MSR_EE);
  170. break;
  171. case 215: /* stbx */
  172. rs = get_rs(inst);
  173. emulated = kvmppc_handle_store(run, vcpu,
  174. vcpu->arch.gpr[rs],
  175. 1, 1);
  176. break;
  177. case 247: /* stbux */
  178. rs = get_rs(inst);
  179. ra = get_ra(inst);
  180. rb = get_rb(inst);
  181. ea = vcpu->arch.gpr[rb];
  182. if (ra)
  183. ea += vcpu->arch.gpr[ra];
  184. emulated = kvmppc_handle_store(run, vcpu,
  185. vcpu->arch.gpr[rs],
  186. 1, 1);
  187. vcpu->arch.gpr[rs] = ea;
  188. break;
  189. case 279: /* lhzx */
  190. rt = get_rt(inst);
  191. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  192. break;
  193. case 311: /* lhzux */
  194. rt = get_rt(inst);
  195. ra = get_ra(inst);
  196. rb = get_rb(inst);
  197. ea = vcpu->arch.gpr[rb];
  198. if (ra)
  199. ea += vcpu->arch.gpr[ra];
  200. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  201. vcpu->arch.gpr[ra] = ea;
  202. break;
  203. case 323: /* mfdcr */
  204. dcrn = get_dcrn(inst);
  205. rt = get_rt(inst);
  206. /* The guest may access CPR0 registers to determine the timebase
  207. * frequency, and it must know the real host frequency because it
  208. * can directly access the timebase registers.
  209. *
  210. * It would be possible to emulate those accesses in userspace,
  211. * but userspace can really only figure out the end frequency.
  212. * We could decompose that into the factors that compute it, but
  213. * that's tricky math, and it's easier to just report the real
  214. * CPR0 values.
  215. */
  216. switch (dcrn) {
  217. case DCRN_CPR0_CONFIG_ADDR:
  218. vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
  219. break;
  220. case DCRN_CPR0_CONFIG_DATA:
  221. local_irq_disable();
  222. mtdcr(DCRN_CPR0_CONFIG_ADDR,
  223. vcpu->arch.cpr0_cfgaddr);
  224. vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
  225. local_irq_enable();
  226. break;
  227. default:
  228. run->dcr.dcrn = dcrn;
  229. run->dcr.data = 0;
  230. run->dcr.is_write = 0;
  231. vcpu->arch.io_gpr = rt;
  232. vcpu->arch.dcr_needed = 1;
  233. emulated = EMULATE_DO_DCR;
  234. }
  235. break;
  236. case 339: /* mfspr */
  237. sprn = get_sprn(inst);
  238. rt = get_rt(inst);
  239. switch (sprn) {
  240. case SPRN_SRR0:
  241. vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
  242. case SPRN_SRR1:
  243. vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
  244. case SPRN_MMUCR:
  245. vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
  246. case SPRN_PID:
  247. vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
  248. case SPRN_IVPR:
  249. vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
  250. case SPRN_CCR0:
  251. vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
  252. case SPRN_CCR1:
  253. vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
  254. case SPRN_PVR:
  255. vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
  256. case SPRN_DEAR:
  257. vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
  258. case SPRN_ESR:
  259. vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
  260. case SPRN_DBCR0:
  261. vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
  262. case SPRN_DBCR1:
  263. vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
  264. /* Note: mftb and TBRL/TBWL are user-accessible, so
  265. * the guest can always access the real TB anyways.
  266. * In fact, we probably will never see these traps. */
  267. case SPRN_TBWL:
  268. vcpu->arch.gpr[rt] = mftbl(); break;
  269. case SPRN_TBWU:
  270. vcpu->arch.gpr[rt] = mftbu(); break;
  271. case SPRN_SPRG0:
  272. vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
  273. case SPRN_SPRG1:
  274. vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
  275. case SPRN_SPRG2:
  276. vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
  277. case SPRN_SPRG3:
  278. vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
  279. /* Note: SPRG4-7 are user-readable, so we don't get
  280. * a trap. */
  281. case SPRN_IVOR0:
  282. vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
  283. case SPRN_IVOR1:
  284. vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
  285. case SPRN_IVOR2:
  286. vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
  287. case SPRN_IVOR3:
  288. vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
  289. case SPRN_IVOR4:
  290. vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
  291. case SPRN_IVOR5:
  292. vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
  293. case SPRN_IVOR6:
  294. vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
  295. case SPRN_IVOR7:
  296. vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
  297. case SPRN_IVOR8:
  298. vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
  299. case SPRN_IVOR9:
  300. vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
  301. case SPRN_IVOR10:
  302. vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
  303. case SPRN_IVOR11:
  304. vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
  305. case SPRN_IVOR12:
  306. vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
  307. case SPRN_IVOR13:
  308. vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
  309. case SPRN_IVOR14:
  310. vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
  311. case SPRN_IVOR15:
  312. vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
  313. default:
  314. printk("mfspr: unknown spr %x\n", sprn);
  315. vcpu->arch.gpr[rt] = 0;
  316. break;
  317. }
  318. break;
  319. case 407: /* sthx */
  320. rs = get_rs(inst);
  321. ra = get_ra(inst);
  322. rb = get_rb(inst);
  323. emulated = kvmppc_handle_store(run, vcpu,
  324. vcpu->arch.gpr[rs],
  325. 2, 1);
  326. break;
  327. case 439: /* sthux */
  328. rs = get_rs(inst);
  329. ra = get_ra(inst);
  330. rb = get_rb(inst);
  331. ea = vcpu->arch.gpr[rb];
  332. if (ra)
  333. ea += vcpu->arch.gpr[ra];
  334. emulated = kvmppc_handle_store(run, vcpu,
  335. vcpu->arch.gpr[rs],
  336. 2, 1);
  337. vcpu->arch.gpr[ra] = ea;
  338. break;
  339. case 451: /* mtdcr */
  340. dcrn = get_dcrn(inst);
  341. rs = get_rs(inst);
  342. /* emulate some access in kernel */
  343. switch (dcrn) {
  344. case DCRN_CPR0_CONFIG_ADDR:
  345. vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
  346. break;
  347. default:
  348. run->dcr.dcrn = dcrn;
  349. run->dcr.data = vcpu->arch.gpr[rs];
  350. run->dcr.is_write = 1;
  351. vcpu->arch.dcr_needed = 1;
  352. emulated = EMULATE_DO_DCR;
  353. }
  354. break;
  355. case 467: /* mtspr */
  356. sprn = get_sprn(inst);
  357. rs = get_rs(inst);
  358. switch (sprn) {
  359. case SPRN_SRR0:
  360. vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
  361. case SPRN_SRR1:
  362. vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
  363. case SPRN_MMUCR:
  364. vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
  365. case SPRN_PID:
  366. kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
  367. case SPRN_CCR0:
  368. vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
  369. case SPRN_CCR1:
  370. vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
  371. case SPRN_DEAR:
  372. vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
  373. case SPRN_ESR:
  374. vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
  375. case SPRN_DBCR0:
  376. vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
  377. case SPRN_DBCR1:
  378. vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
  379. /* XXX We need to context-switch the timebase for
  380. * watchdog and FIT. */
  381. case SPRN_TBWL: break;
  382. case SPRN_TBWU: break;
  383. case SPRN_DEC:
  384. vcpu->arch.dec = vcpu->arch.gpr[rs];
  385. kvmppc_emulate_dec(vcpu);
  386. break;
  387. case SPRN_TSR:
  388. vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
  389. case SPRN_TCR:
  390. vcpu->arch.tcr = vcpu->arch.gpr[rs];
  391. kvmppc_emulate_dec(vcpu);
  392. break;
  393. case SPRN_SPRG0:
  394. vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
  395. case SPRN_SPRG1:
  396. vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
  397. case SPRN_SPRG2:
  398. vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
  399. case SPRN_SPRG3:
  400. vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
  401. /* Note: SPRG4-7 are user-readable. These values are
  402. * loaded into the real SPRGs when resuming the
  403. * guest. */
  404. case SPRN_SPRG4:
  405. vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
  406. case SPRN_SPRG5:
  407. vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
  408. case SPRN_SPRG6:
  409. vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
  410. case SPRN_SPRG7:
  411. vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
  412. case SPRN_IVPR:
  413. vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
  414. case SPRN_IVOR0:
  415. vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
  416. case SPRN_IVOR1:
  417. vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
  418. case SPRN_IVOR2:
  419. vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
  420. case SPRN_IVOR3:
  421. vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
  422. case SPRN_IVOR4:
  423. vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
  424. case SPRN_IVOR5:
  425. vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
  426. case SPRN_IVOR6:
  427. vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
  428. case SPRN_IVOR7:
  429. vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
  430. case SPRN_IVOR8:
  431. vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
  432. case SPRN_IVOR9:
  433. vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
  434. case SPRN_IVOR10:
  435. vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
  436. case SPRN_IVOR11:
  437. vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
  438. case SPRN_IVOR12:
  439. vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
  440. case SPRN_IVOR13:
  441. vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
  442. case SPRN_IVOR14:
  443. vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
  444. case SPRN_IVOR15:
  445. vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
  446. default:
  447. printk("mtspr: unknown spr %x\n", sprn);
  448. emulated = EMULATE_FAIL;
  449. break;
  450. }
  451. break;
  452. case 470: /* dcbi */
  453. /* Do nothing. The guest is performing dcbi because
  454. * hardware DMA is not snooped by the dcache, but
  455. * emulated DMA either goes through the dcache as
  456. * normal writes, or the host kernel has handled dcache
  457. * coherence. */
  458. break;
  459. case 534: /* lwbrx */
  460. rt = get_rt(inst);
  461. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
  462. break;
  463. case 566: /* tlbsync */
  464. break;
  465. case 662: /* stwbrx */
  466. rs = get_rs(inst);
  467. ra = get_ra(inst);
  468. rb = get_rb(inst);
  469. emulated = kvmppc_handle_store(run, vcpu,
  470. vcpu->arch.gpr[rs],
  471. 4, 0);
  472. break;
  473. case 978: /* tlbwe */
  474. ra = get_ra(inst);
  475. rs = get_rs(inst);
  476. ws = get_ws(inst);
  477. emulated = kvmppc_emul_tlbwe(vcpu, ra, rs, ws);
  478. break;
  479. case 914: /* tlbsx */
  480. rt = get_rt(inst);
  481. ra = get_ra(inst);
  482. rb = get_rb(inst);
  483. rc = get_rc(inst);
  484. emulated = kvmppc_emul_tlbsx(vcpu, rt, ra, rb, rc);
  485. break;
  486. case 790: /* lhbrx */
  487. rt = get_rt(inst);
  488. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
  489. break;
  490. case 918: /* sthbrx */
  491. rs = get_rs(inst);
  492. ra = get_ra(inst);
  493. rb = get_rb(inst);
  494. emulated = kvmppc_handle_store(run, vcpu,
  495. vcpu->arch.gpr[rs],
  496. 2, 0);
  497. break;
  498. case 966: /* iccci */
  499. break;
  500. default:
  501. printk("unknown: op %d xop %d\n", get_op(inst),
  502. get_xop(inst));
  503. emulated = EMULATE_FAIL;
  504. break;
  505. }
  506. break;
  507. case 32: /* lwz */
  508. rt = get_rt(inst);
  509. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  510. break;
  511. case 33: /* lwzu */
  512. ra = get_ra(inst);
  513. rt = get_rt(inst);
  514. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  515. vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  516. break;
  517. case 34: /* lbz */
  518. rt = get_rt(inst);
  519. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  520. break;
  521. case 35: /* lbzu */
  522. ra = get_ra(inst);
  523. rt = get_rt(inst);
  524. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  525. vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  526. break;
  527. case 36: /* stw */
  528. rs = get_rs(inst);
  529. emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  530. 4, 1);
  531. break;
  532. case 37: /* stwu */
  533. ra = get_ra(inst);
  534. rs = get_rs(inst);
  535. emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  536. 4, 1);
  537. vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  538. break;
  539. case 38: /* stb */
  540. rs = get_rs(inst);
  541. emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  542. 1, 1);
  543. break;
  544. case 39: /* stbu */
  545. ra = get_ra(inst);
  546. rs = get_rs(inst);
  547. emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  548. 1, 1);
  549. vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  550. break;
  551. case 40: /* lhz */
  552. rt = get_rt(inst);
  553. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  554. break;
  555. case 41: /* lhzu */
  556. ra = get_ra(inst);
  557. rt = get_rt(inst);
  558. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  559. vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  560. break;
  561. case 44: /* sth */
  562. rs = get_rs(inst);
  563. emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  564. 2, 1);
  565. break;
  566. case 45: /* sthu */
  567. ra = get_ra(inst);
  568. rs = get_rs(inst);
  569. emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  570. 2, 1);
  571. vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  572. break;
  573. default:
  574. printk("unknown op %d\n", get_op(inst));
  575. emulated = EMULATE_FAIL;
  576. break;
  577. }
  578. KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);
  579. if (advance)
  580. vcpu->arch.pc += 4; /* Advance past emulated instruction. */
  581. return emulated;
  582. }