mmio.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /*
  2. * mmio.c: MMIO emulation components.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  5. * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
  6. *
  7. * Copyright (c) 2007 Intel Corporation KVM support.
  8. * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
  9. * Xiantao Zhang (xiantao.zhang@intel.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include <linux/kvm_host.h>
  26. #include "vcpu.h"
  27. static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
  28. {
  29. VLSAPIC_XTP(v) = val;
  30. }
  31. /*
  32. * LSAPIC OFFSET
  33. */
  34. #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
  35. #define PIB_OFST_INTA 0x1E0000
  36. #define PIB_OFST_XTP 0x1E0008
  37. /*
  38. * execute write IPI op.
  39. */
  40. static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
  41. uint64_t addr, uint64_t data)
  42. {
  43. struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
  44. unsigned long psr;
  45. local_irq_save(psr);
  46. p->exit_reason = EXIT_REASON_IPI;
  47. p->u.ipi_data.addr.val = addr;
  48. p->u.ipi_data.data.val = data;
  49. vmm_transition(current_vcpu);
  50. local_irq_restore(psr);
  51. }
  52. void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
  53. unsigned long length, unsigned long val)
  54. {
  55. addr &= (PIB_SIZE - 1);
  56. switch (addr) {
  57. case PIB_OFST_INTA:
  58. /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
  59. panic_vm(v);
  60. break;
  61. case PIB_OFST_XTP:
  62. if (length == 1) {
  63. vlsapic_write_xtp(v, val);
  64. } else {
  65. /*panic_domain(NULL,
  66. "Undefined write on PIB XTP\n");*/
  67. panic_vm(v);
  68. }
  69. break;
  70. default:
  71. if (PIB_LOW_HALF(addr)) {
  72. /*lower half */
  73. if (length != 8)
  74. /*panic_domain(NULL,
  75. "Can't LHF write with size %ld!\n",
  76. length);*/
  77. panic_vm(v);
  78. else
  79. vlsapic_write_ipi(v, addr, val);
  80. } else { /* upper half
  81. printk("IPI-UHF write %lx\n",addr);*/
  82. panic_vm(v);
  83. }
  84. break;
  85. }
  86. }
  87. unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
  88. unsigned long length)
  89. {
  90. uint64_t result = 0;
  91. addr &= (PIB_SIZE - 1);
  92. switch (addr) {
  93. case PIB_OFST_INTA:
  94. if (length == 1) /* 1 byte load */
  95. ; /* There is no i8259, there is no INTA access*/
  96. else
  97. /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
  98. panic_vm(v);
  99. break;
  100. case PIB_OFST_XTP:
  101. if (length == 1) {
  102. result = VLSAPIC_XTP(v);
  103. /* printk("read xtp %lx\n", result); */
  104. } else {
  105. /*panic_domain(NULL,
  106. "Undefined read on PIB XTP\n");*/
  107. panic_vm(v);
  108. }
  109. break;
  110. default:
  111. panic_vm(v);
  112. break;
  113. }
  114. return result;
  115. }
  116. static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
  117. u16 s, int ma, int dir)
  118. {
  119. unsigned long iot;
  120. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  121. unsigned long psr;
  122. iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
  123. local_irq_save(psr);
  124. /*Intercept the acces for PIB range*/
  125. if (iot == GPFN_PIB) {
  126. if (!dir)
  127. lsapic_write(vcpu, src_pa, s, *dest);
  128. else
  129. *dest = lsapic_read(vcpu, src_pa, s);
  130. goto out;
  131. }
  132. p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
  133. p->u.ioreq.addr = src_pa;
  134. p->u.ioreq.size = s;
  135. p->u.ioreq.dir = dir;
  136. if (dir == IOREQ_WRITE)
  137. p->u.ioreq.data = *dest;
  138. p->u.ioreq.state = STATE_IOREQ_READY;
  139. vmm_transition(vcpu);
  140. if (p->u.ioreq.state == STATE_IORESP_READY) {
  141. if (dir == IOREQ_READ)
  142. /* it's necessary to ensure zero extending */
  143. *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
  144. } else
  145. panic_vm(vcpu);
  146. out:
  147. local_irq_restore(psr);
  148. return ;
  149. }
  150. /*
  151. dir 1: read 0:write
  152. inst_type 0:integer 1:floating point
  153. */
  154. #define SL_INTEGER 0 /* store/load interger*/
  155. #define SL_FLOATING 1 /* store/load floating*/
  156. void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
  157. {
  158. struct kvm_pt_regs *regs;
  159. IA64_BUNDLE bundle;
  160. int slot, dir = 0;
  161. int inst_type = -1;
  162. u16 size = 0;
  163. u64 data, slot1a, slot1b, temp, update_reg;
  164. s32 imm;
  165. INST64 inst;
  166. regs = vcpu_regs(vcpu);
  167. if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
  168. /* if fetch code fail, return and try again */
  169. return;
  170. }
  171. slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
  172. if (!slot)
  173. inst.inst = bundle.slot0;
  174. else if (slot == 1) {
  175. slot1a = bundle.slot1a;
  176. slot1b = bundle.slot1b;
  177. inst.inst = slot1a + (slot1b << 18);
  178. } else if (slot == 2)
  179. inst.inst = bundle.slot2;
  180. /* Integer Load/Store */
  181. if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
  182. inst_type = SL_INTEGER;
  183. size = (inst.M1.x6 & 0x3);
  184. if ((inst.M1.x6 >> 2) > 0xb) {
  185. /*write*/
  186. dir = IOREQ_WRITE;
  187. data = vcpu_get_gr(vcpu, inst.M4.r2);
  188. } else if ((inst.M1.x6 >> 2) < 0xb) {
  189. /*read*/
  190. dir = IOREQ_READ;
  191. }
  192. } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
  193. /* Integer Load + Reg update */
  194. inst_type = SL_INTEGER;
  195. dir = IOREQ_READ;
  196. size = (inst.M2.x6 & 0x3);
  197. temp = vcpu_get_gr(vcpu, inst.M2.r3);
  198. update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
  199. temp += update_reg;
  200. vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
  201. } else if (inst.M3.major == 5) {
  202. /*Integer Load/Store + Imm update*/
  203. inst_type = SL_INTEGER;
  204. size = (inst.M3.x6&0x3);
  205. if ((inst.M5.x6 >> 2) > 0xb) {
  206. /*write*/
  207. dir = IOREQ_WRITE;
  208. data = vcpu_get_gr(vcpu, inst.M5.r2);
  209. temp = vcpu_get_gr(vcpu, inst.M5.r3);
  210. imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
  211. (inst.M5.imm7 << 23);
  212. temp += imm >> 23;
  213. vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
  214. } else if ((inst.M3.x6 >> 2) < 0xb) {
  215. /*read*/
  216. dir = IOREQ_READ;
  217. temp = vcpu_get_gr(vcpu, inst.M3.r3);
  218. imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
  219. (inst.M3.imm7 << 23);
  220. temp += imm >> 23;
  221. vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
  222. }
  223. } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
  224. && inst.M9.m == 0 && inst.M9.x == 0) {
  225. /* Floating-point spill*/
  226. struct ia64_fpreg v;
  227. inst_type = SL_FLOATING;
  228. dir = IOREQ_WRITE;
  229. vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
  230. /* Write high word. FIXME: this is a kludge! */
  231. v.u.bits[1] &= 0x3ffff;
  232. mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
  233. data = v.u.bits[0];
  234. size = 3;
  235. } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
  236. /* Floating-point spill + Imm update */
  237. struct ia64_fpreg v;
  238. inst_type = SL_FLOATING;
  239. dir = IOREQ_WRITE;
  240. vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
  241. temp = vcpu_get_gr(vcpu, inst.M10.r3);
  242. imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
  243. (inst.M10.imm7 << 23);
  244. temp += imm >> 23;
  245. vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
  246. /* Write high word.FIXME: this is a kludge! */
  247. v.u.bits[1] &= 0x3ffff;
  248. mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
  249. data = v.u.bits[0];
  250. size = 3;
  251. } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
  252. /* Floating-point stf8 + Imm update */
  253. struct ia64_fpreg v;
  254. inst_type = SL_FLOATING;
  255. dir = IOREQ_WRITE;
  256. size = 3;
  257. vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
  258. data = v.u.bits[0]; /* Significand. */
  259. temp = vcpu_get_gr(vcpu, inst.M10.r3);
  260. imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
  261. (inst.M10.imm7 << 23);
  262. temp += imm >> 23;
  263. vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
  264. } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
  265. && inst.M15.x6 <= 0x2f) {
  266. temp = vcpu_get_gr(vcpu, inst.M15.r3);
  267. imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
  268. (inst.M15.imm7 << 23);
  269. temp += imm >> 23;
  270. vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
  271. vcpu_increment_iip(vcpu);
  272. return;
  273. } else if (inst.M12.major == 6 && inst.M12.m == 1
  274. && inst.M12.x == 1 && inst.M12.x6 == 1) {
  275. /* Floating-point Load Pair + Imm ldfp8 M12*/
  276. struct ia64_fpreg v;
  277. inst_type = SL_FLOATING;
  278. dir = IOREQ_READ;
  279. size = 8; /*ldfd*/
  280. mmio_access(vcpu, padr, &data, size, ma, dir);
  281. v.u.bits[0] = data;
  282. v.u.bits[1] = 0x1003E;
  283. vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
  284. padr += 8;
  285. mmio_access(vcpu, padr, &data, size, ma, dir);
  286. v.u.bits[0] = data;
  287. v.u.bits[1] = 0x1003E;
  288. vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
  289. padr += 8;
  290. vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
  291. vcpu_increment_iip(vcpu);
  292. return;
  293. } else {
  294. inst_type = -1;
  295. panic_vm(vcpu);
  296. }
  297. size = 1 << size;
  298. if (dir == IOREQ_WRITE) {
  299. mmio_access(vcpu, padr, &data, size, ma, dir);
  300. } else {
  301. mmio_access(vcpu, padr, &data, size, ma, dir);
  302. if (inst_type == SL_INTEGER)
  303. vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
  304. else
  305. panic_vm(vcpu);
  306. }
  307. vcpu_increment_iip(vcpu);
  308. }