mmio.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * mmio.c: MMIO emulation components.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  5. * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
  6. *
  7. * Copyright (c) 2007 Intel Corporation KVM support.
  8. * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
  9. * Xiantao Zhang (xiantao.zhang@intel.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include <linux/kvm_host.h>
  26. #include "vcpu.h"
  27. static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
  28. {
  29. VLSAPIC_XTP(v) = val;
  30. }
  31. /*
  32. * LSAPIC OFFSET
  33. */
  34. #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
  35. #define PIB_OFST_INTA 0x1E0000
  36. #define PIB_OFST_XTP 0x1E0008
  37. /*
  38. * execute write IPI op.
  39. */
  40. static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
  41. uint64_t addr, uint64_t data)
  42. {
  43. struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
  44. unsigned long psr;
  45. local_irq_save(psr);
  46. p->exit_reason = EXIT_REASON_IPI;
  47. p->u.ipi_data.addr.val = addr;
  48. p->u.ipi_data.data.val = data;
  49. vmm_transition(current_vcpu);
  50. local_irq_restore(psr);
  51. }
  52. void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
  53. unsigned long length, unsigned long val)
  54. {
  55. addr &= (PIB_SIZE - 1);
  56. switch (addr) {
  57. case PIB_OFST_INTA:
  58. /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
  59. panic_vm(v);
  60. break;
  61. case PIB_OFST_XTP:
  62. if (length == 1) {
  63. vlsapic_write_xtp(v, val);
  64. } else {
  65. /*panic_domain(NULL,
  66. "Undefined write on PIB XTP\n");*/
  67. panic_vm(v);
  68. }
  69. break;
  70. default:
  71. if (PIB_LOW_HALF(addr)) {
  72. /*lower half */
  73. if (length != 8)
  74. /*panic_domain(NULL,
  75. "Can't LHF write with size %ld!\n",
  76. length);*/
  77. panic_vm(v);
  78. else
  79. vlsapic_write_ipi(v, addr, val);
  80. } else { /* upper half
  81. printk("IPI-UHF write %lx\n",addr);*/
  82. panic_vm(v);
  83. }
  84. break;
  85. }
  86. }
  87. unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
  88. unsigned long length)
  89. {
  90. uint64_t result = 0;
  91. addr &= (PIB_SIZE - 1);
  92. switch (addr) {
  93. case PIB_OFST_INTA:
  94. if (length == 1) /* 1 byte load */
  95. ; /* There is no i8259, there is no INTA access*/
  96. else
  97. /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
  98. panic_vm(v);
  99. break;
  100. case PIB_OFST_XTP:
  101. if (length == 1) {
  102. result = VLSAPIC_XTP(v);
  103. /* printk("read xtp %lx\n", result); */
  104. } else {
  105. /*panic_domain(NULL,
  106. "Undefined read on PIB XTP\n");*/
  107. panic_vm(v);
  108. }
  109. break;
  110. default:
  111. panic_vm(v);
  112. break;
  113. }
  114. return result;
  115. }
  116. static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
  117. u16 s, int ma, int dir)
  118. {
  119. unsigned long iot;
  120. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  121. unsigned long psr;
  122. iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
  123. local_irq_save(psr);
  124. /*Intercept the acces for PIB range*/
  125. if (iot == GPFN_PIB) {
  126. if (!dir)
  127. lsapic_write(vcpu, src_pa, s, *dest);
  128. else
  129. *dest = lsapic_read(vcpu, src_pa, s);
  130. goto out;
  131. }
  132. p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
  133. p->u.ioreq.addr = src_pa;
  134. p->u.ioreq.size = s;
  135. p->u.ioreq.dir = dir;
  136. if (dir == IOREQ_WRITE)
  137. p->u.ioreq.data = *dest;
  138. p->u.ioreq.state = STATE_IOREQ_READY;
  139. vmm_transition(vcpu);
  140. if (p->u.ioreq.state == STATE_IORESP_READY) {
  141. if (dir == IOREQ_READ)
  142. *dest = p->u.ioreq.data;
  143. } else
  144. panic_vm(vcpu);
  145. out:
  146. local_irq_restore(psr);
  147. return ;
  148. }
  149. /*
  150. dir 1: read 0:write
  151. inst_type 0:integer 1:floating point
  152. */
  153. #define SL_INTEGER 0 /* store/load interger*/
  154. #define SL_FLOATING 1 /* store/load floating*/
  155. void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
  156. {
  157. struct kvm_pt_regs *regs;
  158. IA64_BUNDLE bundle;
  159. int slot, dir = 0;
  160. int inst_type = -1;
  161. u16 size = 0;
  162. u64 data, slot1a, slot1b, temp, update_reg;
  163. s32 imm;
  164. INST64 inst;
  165. regs = vcpu_regs(vcpu);
  166. if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
  167. /* if fetch code fail, return and try again */
  168. return;
  169. }
  170. slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
  171. if (!slot)
  172. inst.inst = bundle.slot0;
  173. else if (slot == 1) {
  174. slot1a = bundle.slot1a;
  175. slot1b = bundle.slot1b;
  176. inst.inst = slot1a + (slot1b << 18);
  177. } else if (slot == 2)
  178. inst.inst = bundle.slot2;
  179. /* Integer Load/Store */
  180. if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
  181. inst_type = SL_INTEGER;
  182. size = (inst.M1.x6 & 0x3);
  183. if ((inst.M1.x6 >> 2) > 0xb) {
  184. /*write*/
  185. dir = IOREQ_WRITE;
  186. data = vcpu_get_gr(vcpu, inst.M4.r2);
  187. } else if ((inst.M1.x6 >> 2) < 0xb) {
  188. /*read*/
  189. dir = IOREQ_READ;
  190. }
  191. } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
  192. /* Integer Load + Reg update */
  193. inst_type = SL_INTEGER;
  194. dir = IOREQ_READ;
  195. size = (inst.M2.x6 & 0x3);
  196. temp = vcpu_get_gr(vcpu, inst.M2.r3);
  197. update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
  198. temp += update_reg;
  199. vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
  200. } else if (inst.M3.major == 5) {
  201. /*Integer Load/Store + Imm update*/
  202. inst_type = SL_INTEGER;
  203. size = (inst.M3.x6&0x3);
  204. if ((inst.M5.x6 >> 2) > 0xb) {
  205. /*write*/
  206. dir = IOREQ_WRITE;
  207. data = vcpu_get_gr(vcpu, inst.M5.r2);
  208. temp = vcpu_get_gr(vcpu, inst.M5.r3);
  209. imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
  210. (inst.M5.imm7 << 23);
  211. temp += imm >> 23;
  212. vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
  213. } else if ((inst.M3.x6 >> 2) < 0xb) {
  214. /*read*/
  215. dir = IOREQ_READ;
  216. temp = vcpu_get_gr(vcpu, inst.M3.r3);
  217. imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
  218. (inst.M3.imm7 << 23);
  219. temp += imm >> 23;
  220. vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
  221. }
  222. } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
  223. && inst.M9.m == 0 && inst.M9.x == 0) {
  224. /* Floating-point spill*/
  225. struct ia64_fpreg v;
  226. inst_type = SL_FLOATING;
  227. dir = IOREQ_WRITE;
  228. vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
  229. /* Write high word. FIXME: this is a kludge! */
  230. v.u.bits[1] &= 0x3ffff;
  231. mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
  232. data = v.u.bits[0];
  233. size = 3;
  234. } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
  235. /* Floating-point spill + Imm update */
  236. struct ia64_fpreg v;
  237. inst_type = SL_FLOATING;
  238. dir = IOREQ_WRITE;
  239. vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
  240. temp = vcpu_get_gr(vcpu, inst.M10.r3);
  241. imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
  242. (inst.M10.imm7 << 23);
  243. temp += imm >> 23;
  244. vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
  245. /* Write high word.FIXME: this is a kludge! */
  246. v.u.bits[1] &= 0x3ffff;
  247. mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
  248. data = v.u.bits[0];
  249. size = 3;
  250. } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
  251. /* Floating-point stf8 + Imm update */
  252. struct ia64_fpreg v;
  253. inst_type = SL_FLOATING;
  254. dir = IOREQ_WRITE;
  255. size = 3;
  256. vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
  257. data = v.u.bits[0]; /* Significand. */
  258. temp = vcpu_get_gr(vcpu, inst.M10.r3);
  259. imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
  260. (inst.M10.imm7 << 23);
  261. temp += imm >> 23;
  262. vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
  263. } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
  264. && inst.M15.x6 <= 0x2f) {
  265. temp = vcpu_get_gr(vcpu, inst.M15.r3);
  266. imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
  267. (inst.M15.imm7 << 23);
  268. temp += imm >> 23;
  269. vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
  270. vcpu_increment_iip(vcpu);
  271. return;
  272. } else if (inst.M12.major == 6 && inst.M12.m == 1
  273. && inst.M12.x == 1 && inst.M12.x6 == 1) {
  274. /* Floating-point Load Pair + Imm ldfp8 M12*/
  275. struct ia64_fpreg v;
  276. inst_type = SL_FLOATING;
  277. dir = IOREQ_READ;
  278. size = 8; /*ldfd*/
  279. mmio_access(vcpu, padr, &data, size, ma, dir);
  280. v.u.bits[0] = data;
  281. v.u.bits[1] = 0x1003E;
  282. vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
  283. padr += 8;
  284. mmio_access(vcpu, padr, &data, size, ma, dir);
  285. v.u.bits[0] = data;
  286. v.u.bits[1] = 0x1003E;
  287. vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
  288. padr += 8;
  289. vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
  290. vcpu_increment_iip(vcpu);
  291. return;
  292. } else {
  293. inst_type = -1;
  294. panic_vm(vcpu);
  295. }
  296. size = 1 << size;
  297. if (dir == IOREQ_WRITE) {
  298. mmio_access(vcpu, padr, &data, size, ma, dir);
  299. } else {
  300. mmio_access(vcpu, padr, &data, size, ma, dir);
  301. if (inst_type == SL_INTEGER)
  302. vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
  303. else
  304. panic_vm(vcpu);
  305. }
  306. vcpu_increment_iip(vcpu);
  307. }