sstep.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635
  1. /*
  2. * Single-step support.
  3. *
  4. * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/ptrace.h>
  14. #include <asm/sstep.h>
  15. #include <asm/processor.h>
  16. #include <asm/uaccess.h>
  17. #include <asm/cputable.h>
  18. extern char system_call_common[];
  19. #ifdef CONFIG_PPC64
  20. /* Bits in SRR1 that are copied from MSR */
  21. #define MSR_MASK 0xffffffff87c0ffffUL
  22. #else
  23. #define MSR_MASK 0x87c0ffff
  24. #endif
  25. /* Bits in XER */
  26. #define XER_SO 0x80000000U
  27. #define XER_OV 0x40000000U
  28. #define XER_CA 0x20000000U
  29. #ifdef CONFIG_PPC_FPU
  30. /*
  31. * Functions in ldstfp.S
  32. */
  33. extern int do_lfs(int rn, unsigned long ea);
  34. extern int do_lfd(int rn, unsigned long ea);
  35. extern int do_stfs(int rn, unsigned long ea);
  36. extern int do_stfd(int rn, unsigned long ea);
  37. extern int do_lvx(int rn, unsigned long ea);
  38. extern int do_stvx(int rn, unsigned long ea);
  39. extern int do_lxvd2x(int rn, unsigned long ea);
  40. extern int do_stxvd2x(int rn, unsigned long ea);
  41. #endif
  42. /*
  43. * Determine whether a conditional branch instruction would branch.
  44. */
  45. static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
  46. {
  47. unsigned int bo = (instr >> 21) & 0x1f;
  48. unsigned int bi;
  49. if ((bo & 4) == 0) {
  50. /* decrement counter */
  51. --regs->ctr;
  52. if (((bo >> 1) & 1) ^ (regs->ctr == 0))
  53. return 0;
  54. }
  55. if ((bo & 0x10) == 0) {
  56. /* check bit from CR */
  57. bi = (instr >> 16) & 0x1f;
  58. if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
  59. return 0;
  60. }
  61. return 1;
  62. }
  63. static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
  64. {
  65. if (!user_mode(regs))
  66. return 1;
  67. return __access_ok(ea, nb, USER_DS);
  68. }
  69. /*
  70. * Calculate effective address for a D-form instruction
  71. */
  72. static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
  73. {
  74. int ra;
  75. unsigned long ea;
  76. ra = (instr >> 16) & 0x1f;
  77. ea = (signed short) instr; /* sign-extend */
  78. if (ra) {
  79. ea += regs->gpr[ra];
  80. if (instr & 0x04000000) /* update forms */
  81. regs->gpr[ra] = ea;
  82. }
  83. #ifdef __powerpc64__
  84. if (!(regs->msr & MSR_SF))
  85. ea &= 0xffffffffUL;
  86. #endif
  87. return ea;
  88. }
  89. #ifdef __powerpc64__
  90. /*
  91. * Calculate effective address for a DS-form instruction
  92. */
  93. static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
  94. {
  95. int ra;
  96. unsigned long ea;
  97. ra = (instr >> 16) & 0x1f;
  98. ea = (signed short) (instr & ~3); /* sign-extend */
  99. if (ra) {
  100. ea += regs->gpr[ra];
  101. if ((instr & 3) == 1) /* update forms */
  102. regs->gpr[ra] = ea;
  103. }
  104. if (!(regs->msr & MSR_SF))
  105. ea &= 0xffffffffUL;
  106. return ea;
  107. }
  108. #endif /* __powerpc64 */
  109. /*
  110. * Calculate effective address for an X-form instruction
  111. */
  112. static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
  113. int do_update)
  114. {
  115. int ra, rb;
  116. unsigned long ea;
  117. ra = (instr >> 16) & 0x1f;
  118. rb = (instr >> 11) & 0x1f;
  119. ea = regs->gpr[rb];
  120. if (ra) {
  121. ea += regs->gpr[ra];
  122. if (do_update) /* update forms */
  123. regs->gpr[ra] = ea;
  124. }
  125. #ifdef __powerpc64__
  126. if (!(regs->msr & MSR_SF))
  127. ea &= 0xffffffffUL;
  128. #endif
  129. return ea;
  130. }
  131. /*
  132. * Return the largest power of 2, not greater than sizeof(unsigned long),
  133. * such that x is a multiple of it.
  134. */
  135. static inline unsigned long max_align(unsigned long x)
  136. {
  137. x |= sizeof(unsigned long);
  138. return x & -x; /* isolates rightmost bit */
  139. }
  140. static inline unsigned long byterev_2(unsigned long x)
  141. {
  142. return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
  143. }
  144. static inline unsigned long byterev_4(unsigned long x)
  145. {
  146. return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
  147. ((x & 0xff00) << 8) | ((x & 0xff) << 24);
  148. }
  149. #ifdef __powerpc64__
  150. static inline unsigned long byterev_8(unsigned long x)
  151. {
  152. return (byterev_4(x) << 32) | byterev_4(x >> 32);
  153. }
  154. #endif
  155. static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
  156. int nb)
  157. {
  158. int err = 0;
  159. unsigned long x = 0;
  160. switch (nb) {
  161. case 1:
  162. err = __get_user(x, (unsigned char __user *) ea);
  163. break;
  164. case 2:
  165. err = __get_user(x, (unsigned short __user *) ea);
  166. break;
  167. case 4:
  168. err = __get_user(x, (unsigned int __user *) ea);
  169. break;
  170. #ifdef __powerpc64__
  171. case 8:
  172. err = __get_user(x, (unsigned long __user *) ea);
  173. break;
  174. #endif
  175. }
  176. if (!err)
  177. *dest = x;
  178. return err;
  179. }
  180. static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
  181. int nb, struct pt_regs *regs)
  182. {
  183. int err;
  184. unsigned long x, b, c;
  185. /* unaligned, do this in pieces */
  186. x = 0;
  187. for (; nb > 0; nb -= c) {
  188. c = max_align(ea);
  189. if (c > nb)
  190. c = max_align(nb);
  191. err = read_mem_aligned(&b, ea, c);
  192. if (err)
  193. return err;
  194. x = (x << (8 * c)) + b;
  195. ea += c;
  196. }
  197. *dest = x;
  198. return 0;
  199. }
  200. /*
  201. * Read memory at address ea for nb bytes, return 0 for success
  202. * or -EFAULT if an error occurred.
  203. */
  204. static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
  205. struct pt_regs *regs)
  206. {
  207. if (!address_ok(regs, ea, nb))
  208. return -EFAULT;
  209. if ((ea & (nb - 1)) == 0)
  210. return read_mem_aligned(dest, ea, nb);
  211. return read_mem_unaligned(dest, ea, nb, regs);
  212. }
  213. static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
  214. int nb)
  215. {
  216. int err = 0;
  217. switch (nb) {
  218. case 1:
  219. err = __put_user(val, (unsigned char __user *) ea);
  220. break;
  221. case 2:
  222. err = __put_user(val, (unsigned short __user *) ea);
  223. break;
  224. case 4:
  225. err = __put_user(val, (unsigned int __user *) ea);
  226. break;
  227. #ifdef __powerpc64__
  228. case 8:
  229. err = __put_user(val, (unsigned long __user *) ea);
  230. break;
  231. #endif
  232. }
  233. return err;
  234. }
  235. static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
  236. int nb, struct pt_regs *regs)
  237. {
  238. int err;
  239. unsigned long c;
  240. /* unaligned or little-endian, do this in pieces */
  241. for (; nb > 0; nb -= c) {
  242. c = max_align(ea);
  243. if (c > nb)
  244. c = max_align(nb);
  245. err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
  246. if (err)
  247. return err;
  248. ++ea;
  249. }
  250. return 0;
  251. }
  252. /*
  253. * Write memory at address ea for nb bytes, return 0 for success
  254. * or -EFAULT if an error occurred.
  255. */
  256. static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
  257. struct pt_regs *regs)
  258. {
  259. if (!address_ok(regs, ea, nb))
  260. return -EFAULT;
  261. if ((ea & (nb - 1)) == 0)
  262. return write_mem_aligned(val, ea, nb);
  263. return write_mem_unaligned(val, ea, nb, regs);
  264. }
  265. #ifdef CONFIG_PPC_FPU
  266. /*
  267. * Check the address and alignment, and call func to do the actual
  268. * load or store.
  269. */
  270. static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
  271. unsigned long ea, int nb,
  272. struct pt_regs *regs)
  273. {
  274. int err;
  275. unsigned long val[sizeof(double) / sizeof(long)];
  276. unsigned long ptr;
  277. if (!address_ok(regs, ea, nb))
  278. return -EFAULT;
  279. if ((ea & 3) == 0)
  280. return (*func)(rn, ea);
  281. ptr = (unsigned long) &val[0];
  282. if (sizeof(unsigned long) == 8 || nb == 4) {
  283. err = read_mem_unaligned(&val[0], ea, nb, regs);
  284. ptr += sizeof(unsigned long) - nb;
  285. } else {
  286. /* reading a double on 32-bit */
  287. err = read_mem_unaligned(&val[0], ea, 4, regs);
  288. if (!err)
  289. err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
  290. }
  291. if (err)
  292. return err;
  293. return (*func)(rn, ptr);
  294. }
  295. static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
  296. unsigned long ea, int nb,
  297. struct pt_regs *regs)
  298. {
  299. int err;
  300. unsigned long val[sizeof(double) / sizeof(long)];
  301. unsigned long ptr;
  302. if (!address_ok(regs, ea, nb))
  303. return -EFAULT;
  304. if ((ea & 3) == 0)
  305. return (*func)(rn, ea);
  306. ptr = (unsigned long) &val[0];
  307. if (sizeof(unsigned long) == 8 || nb == 4) {
  308. ptr += sizeof(unsigned long) - nb;
  309. err = (*func)(rn, ptr);
  310. if (err)
  311. return err;
  312. err = write_mem_unaligned(val[0], ea, nb, regs);
  313. } else {
  314. /* writing a double on 32-bit */
  315. err = (*func)(rn, ptr);
  316. if (err)
  317. return err;
  318. err = write_mem_unaligned(val[0], ea, 4, regs);
  319. if (!err)
  320. err = write_mem_unaligned(val[1], ea + 4, 4, regs);
  321. }
  322. return err;
  323. }
  324. #endif
  325. #ifdef CONFIG_ALTIVEC
  326. /* For Altivec/VMX, no need to worry about alignment */
  327. static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
  328. unsigned long ea, struct pt_regs *regs)
  329. {
  330. if (!address_ok(regs, ea & ~0xfUL, 16))
  331. return -EFAULT;
  332. return (*func)(rn, ea);
  333. }
  334. static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
  335. unsigned long ea, struct pt_regs *regs)
  336. {
  337. if (!address_ok(regs, ea & ~0xfUL, 16))
  338. return -EFAULT;
  339. return (*func)(rn, ea);
  340. }
  341. #endif /* CONFIG_ALTIVEC */
  342. #ifdef CONFIG_VSX
  343. static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
  344. unsigned long ea, struct pt_regs *regs)
  345. {
  346. int err;
  347. unsigned long val[2];
  348. if (!address_ok(regs, ea, 16))
  349. return -EFAULT;
  350. if ((ea & 3) == 0)
  351. return (*func)(rn, ea);
  352. err = read_mem_unaligned(&val[0], ea, 8, regs);
  353. if (!err)
  354. err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
  355. if (!err)
  356. err = (*func)(rn, (unsigned long) &val[0]);
  357. return err;
  358. }
  359. static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
  360. unsigned long ea, struct pt_regs *regs)
  361. {
  362. int err;
  363. unsigned long val[2];
  364. if (!address_ok(regs, ea, 16))
  365. return -EFAULT;
  366. if ((ea & 3) == 0)
  367. return (*func)(rn, ea);
  368. err = (*func)(rn, (unsigned long) &val[0]);
  369. if (err)
  370. return err;
  371. err = write_mem_unaligned(val[0], ea, 8, regs);
  372. if (!err)
  373. err = write_mem_unaligned(val[1], ea + 8, 8, regs);
  374. return err;
  375. }
  376. #endif /* CONFIG_VSX */
  377. #define __put_user_asmx(x, addr, err, op, cr) \
  378. __asm__ __volatile__( \
  379. "1: " op " %2,0,%3\n" \
  380. " mfcr %1\n" \
  381. "2:\n" \
  382. ".section .fixup,\"ax\"\n" \
  383. "3: li %0,%4\n" \
  384. " b 2b\n" \
  385. ".previous\n" \
  386. ".section __ex_table,\"a\"\n" \
  387. PPC_LONG_ALIGN "\n" \
  388. PPC_LONG "1b,3b\n" \
  389. ".previous" \
  390. : "=r" (err), "=r" (cr) \
  391. : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
  392. #define __get_user_asmx(x, addr, err, op) \
  393. __asm__ __volatile__( \
  394. "1: "op" %1,0,%2\n" \
  395. "2:\n" \
  396. ".section .fixup,\"ax\"\n" \
  397. "3: li %0,%3\n" \
  398. " b 2b\n" \
  399. ".previous\n" \
  400. ".section __ex_table,\"a\"\n" \
  401. PPC_LONG_ALIGN "\n" \
  402. PPC_LONG "1b,3b\n" \
  403. ".previous" \
  404. : "=r" (err), "=r" (x) \
  405. : "r" (addr), "i" (-EFAULT), "0" (err))
  406. #define __cacheop_user_asmx(addr, err, op) \
  407. __asm__ __volatile__( \
  408. "1: "op" 0,%1\n" \
  409. "2:\n" \
  410. ".section .fixup,\"ax\"\n" \
  411. "3: li %0,%3\n" \
  412. " b 2b\n" \
  413. ".previous\n" \
  414. ".section __ex_table,\"a\"\n" \
  415. PPC_LONG_ALIGN "\n" \
  416. PPC_LONG "1b,3b\n" \
  417. ".previous" \
  418. : "=r" (err) \
  419. : "r" (addr), "i" (-EFAULT), "0" (err))
  420. static void __kprobes set_cr0(struct pt_regs *regs, int rd)
  421. {
  422. long val = regs->gpr[rd];
  423. regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
  424. #ifdef __powerpc64__
  425. if (!(regs->msr & MSR_SF))
  426. val = (int) val;
  427. #endif
  428. if (val < 0)
  429. regs->ccr |= 0x80000000;
  430. else if (val > 0)
  431. regs->ccr |= 0x40000000;
  432. else
  433. regs->ccr |= 0x20000000;
  434. }
  435. static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
  436. unsigned long val1, unsigned long val2,
  437. unsigned long carry_in)
  438. {
  439. unsigned long val = val1 + val2;
  440. if (carry_in)
  441. ++val;
  442. regs->gpr[rd] = val;
  443. #ifdef __powerpc64__
  444. if (!(regs->msr & MSR_SF)) {
  445. val = (unsigned int) val;
  446. val1 = (unsigned int) val1;
  447. }
  448. #endif
  449. if (val < val1 || (carry_in && val == val1))
  450. regs->xer |= XER_CA;
  451. else
  452. regs->xer &= ~XER_CA;
  453. }
  454. static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
  455. int crfld)
  456. {
  457. unsigned int crval, shift;
  458. crval = (regs->xer >> 31) & 1; /* get SO bit */
  459. if (v1 < v2)
  460. crval |= 8;
  461. else if (v1 > v2)
  462. crval |= 4;
  463. else
  464. crval |= 2;
  465. shift = (7 - crfld) * 4;
  466. regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  467. }
  468. static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
  469. unsigned long v2, int crfld)
  470. {
  471. unsigned int crval, shift;
  472. crval = (regs->xer >> 31) & 1; /* get SO bit */
  473. if (v1 < v2)
  474. crval |= 8;
  475. else if (v1 > v2)
  476. crval |= 4;
  477. else
  478. crval |= 2;
  479. shift = (7 - crfld) * 4;
  480. regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  481. }
  482. /*
  483. * Elements of 32-bit rotate and mask instructions.
  484. */
  485. #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
  486. ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
  487. #ifdef __powerpc64__
  488. #define MASK64_L(mb) (~0UL >> (mb))
  489. #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
  490. #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
  491. #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
  492. #else
  493. #define DATA32(x) (x)
  494. #endif
  495. #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
  496. /*
  497. * Emulate instructions that cause a transfer of control,
  498. * loads and stores, and a few other instructions.
  499. * Returns 1 if the step was emulated, 0 if not,
  500. * or -1 if the instruction is one that should not be stepped,
  501. * such as an rfid, or a mtmsrd that would clear MSR_RI.
  502. */
  503. int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
  504. {
  505. unsigned int opcode, ra, rb, rd, spr, u;
  506. unsigned long int imm;
  507. unsigned long int val, val2;
  508. unsigned long int ea;
  509. unsigned int cr, mb, me, sh;
  510. int err;
  511. unsigned long old_ra;
  512. long ival;
  513. opcode = instr >> 26;
  514. switch (opcode) {
  515. case 16: /* bc */
  516. imm = (signed short)(instr & 0xfffc);
  517. if ((instr & 2) == 0)
  518. imm += regs->nip;
  519. regs->nip += 4;
  520. if ((regs->msr & MSR_SF) == 0)
  521. regs->nip &= 0xffffffffUL;
  522. if (instr & 1)
  523. regs->link = regs->nip;
  524. if (branch_taken(instr, regs))
  525. regs->nip = imm;
  526. return 1;
  527. #ifdef CONFIG_PPC64
  528. case 17: /* sc */
  529. /*
  530. * N.B. this uses knowledge about how the syscall
  531. * entry code works. If that is changed, this will
  532. * need to be changed also.
  533. */
  534. if (regs->gpr[0] == 0x1ebe &&
  535. cpu_has_feature(CPU_FTR_REAL_LE)) {
  536. regs->msr ^= MSR_LE;
  537. goto instr_done;
  538. }
  539. regs->gpr[9] = regs->gpr[13];
  540. regs->gpr[10] = MSR_KERNEL;
  541. regs->gpr[11] = regs->nip + 4;
  542. regs->gpr[12] = regs->msr & MSR_MASK;
  543. regs->gpr[13] = (unsigned long) get_paca();
  544. regs->nip = (unsigned long) &system_call_common;
  545. regs->msr = MSR_KERNEL;
  546. return 1;
  547. #endif
  548. case 18: /* b */
  549. imm = instr & 0x03fffffc;
  550. if (imm & 0x02000000)
  551. imm -= 0x04000000;
  552. if ((instr & 2) == 0)
  553. imm += regs->nip;
  554. if (instr & 1) {
  555. regs->link = regs->nip + 4;
  556. if ((regs->msr & MSR_SF) == 0)
  557. regs->link &= 0xffffffffUL;
  558. }
  559. if ((regs->msr & MSR_SF) == 0)
  560. imm &= 0xffffffffUL;
  561. regs->nip = imm;
  562. return 1;
  563. case 19:
  564. switch ((instr >> 1) & 0x3ff) {
  565. case 16: /* bclr */
  566. case 528: /* bcctr */
  567. imm = (instr & 0x400)? regs->ctr: regs->link;
  568. regs->nip += 4;
  569. if ((regs->msr & MSR_SF) == 0) {
  570. regs->nip &= 0xffffffffUL;
  571. imm &= 0xffffffffUL;
  572. }
  573. if (instr & 1)
  574. regs->link = regs->nip;
  575. if (branch_taken(instr, regs))
  576. regs->nip = imm;
  577. return 1;
  578. case 18: /* rfid, scary */
  579. return -1;
  580. case 150: /* isync */
  581. isync();
  582. goto instr_done;
  583. case 33: /* crnor */
  584. case 129: /* crandc */
  585. case 193: /* crxor */
  586. case 225: /* crnand */
  587. case 257: /* crand */
  588. case 289: /* creqv */
  589. case 417: /* crorc */
  590. case 449: /* cror */
  591. ra = (instr >> 16) & 0x1f;
  592. rb = (instr >> 11) & 0x1f;
  593. rd = (instr >> 21) & 0x1f;
  594. ra = (regs->ccr >> (31 - ra)) & 1;
  595. rb = (regs->ccr >> (31 - rb)) & 1;
  596. val = (instr >> (6 + ra * 2 + rb)) & 1;
  597. regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
  598. (val << (31 - rd));
  599. goto instr_done;
  600. }
  601. break;
  602. case 31:
  603. switch ((instr >> 1) & 0x3ff) {
  604. case 598: /* sync */
  605. #ifdef __powerpc64__
  606. switch ((instr >> 21) & 3) {
  607. case 1: /* lwsync */
  608. asm volatile("lwsync" : : : "memory");
  609. goto instr_done;
  610. case 2: /* ptesync */
  611. asm volatile("ptesync" : : : "memory");
  612. goto instr_done;
  613. }
  614. #endif
  615. mb();
  616. goto instr_done;
  617. case 854: /* eieio */
  618. eieio();
  619. goto instr_done;
  620. }
  621. break;
  622. }
  623. /* Following cases refer to regs->gpr[], so we need all regs */
  624. if (!FULL_REGS(regs))
  625. return 0;
  626. rd = (instr >> 21) & 0x1f;
  627. ra = (instr >> 16) & 0x1f;
  628. rb = (instr >> 11) & 0x1f;
  629. switch (opcode) {
  630. case 7: /* mulli */
  631. regs->gpr[rd] = regs->gpr[ra] * (short) instr;
  632. goto instr_done;
  633. case 8: /* subfic */
  634. imm = (short) instr;
  635. add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
  636. goto instr_done;
  637. case 10: /* cmpli */
  638. imm = (unsigned short) instr;
  639. val = regs->gpr[ra];
  640. #ifdef __powerpc64__
  641. if ((rd & 1) == 0)
  642. val = (unsigned int) val;
  643. #endif
  644. do_cmp_unsigned(regs, val, imm, rd >> 2);
  645. goto instr_done;
  646. case 11: /* cmpi */
  647. imm = (short) instr;
  648. val = regs->gpr[ra];
  649. #ifdef __powerpc64__
  650. if ((rd & 1) == 0)
  651. val = (int) val;
  652. #endif
  653. do_cmp_signed(regs, val, imm, rd >> 2);
  654. goto instr_done;
  655. case 12: /* addic */
  656. imm = (short) instr;
  657. add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
  658. goto instr_done;
  659. case 13: /* addic. */
  660. imm = (short) instr;
  661. add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
  662. set_cr0(regs, rd);
  663. goto instr_done;
  664. case 14: /* addi */
  665. imm = (short) instr;
  666. if (ra)
  667. imm += regs->gpr[ra];
  668. regs->gpr[rd] = imm;
  669. goto instr_done;
  670. case 15: /* addis */
  671. imm = ((short) instr) << 16;
  672. if (ra)
  673. imm += regs->gpr[ra];
  674. regs->gpr[rd] = imm;
  675. goto instr_done;
  676. case 20: /* rlwimi */
  677. mb = (instr >> 6) & 0x1f;
  678. me = (instr >> 1) & 0x1f;
  679. val = DATA32(regs->gpr[rd]);
  680. imm = MASK32(mb, me);
  681. regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
  682. goto logical_done;
  683. case 21: /* rlwinm */
  684. mb = (instr >> 6) & 0x1f;
  685. me = (instr >> 1) & 0x1f;
  686. val = DATA32(regs->gpr[rd]);
  687. regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
  688. goto logical_done;
  689. case 23: /* rlwnm */
  690. mb = (instr >> 6) & 0x1f;
  691. me = (instr >> 1) & 0x1f;
  692. rb = regs->gpr[rb] & 0x1f;
  693. val = DATA32(regs->gpr[rd]);
  694. regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
  695. goto logical_done;
  696. case 24: /* ori */
  697. imm = (unsigned short) instr;
  698. regs->gpr[ra] = regs->gpr[rd] | imm;
  699. goto instr_done;
  700. case 25: /* oris */
  701. imm = (unsigned short) instr;
  702. regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
  703. goto instr_done;
  704. case 26: /* xori */
  705. imm = (unsigned short) instr;
  706. regs->gpr[ra] = regs->gpr[rd] ^ imm;
  707. goto instr_done;
  708. case 27: /* xoris */
  709. imm = (unsigned short) instr;
  710. regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
  711. goto instr_done;
  712. case 28: /* andi. */
  713. imm = (unsigned short) instr;
  714. regs->gpr[ra] = regs->gpr[rd] & imm;
  715. set_cr0(regs, ra);
  716. goto instr_done;
  717. case 29: /* andis. */
  718. imm = (unsigned short) instr;
  719. regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
  720. set_cr0(regs, ra);
  721. goto instr_done;
  722. #ifdef __powerpc64__
  723. case 30: /* rld* */
  724. mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
  725. val = regs->gpr[rd];
  726. if ((instr & 0x10) == 0) {
  727. sh = rb | ((instr & 2) << 4);
  728. val = ROTATE(val, sh);
  729. switch ((instr >> 2) & 3) {
  730. case 0: /* rldicl */
  731. regs->gpr[ra] = val & MASK64_L(mb);
  732. goto logical_done;
  733. case 1: /* rldicr */
  734. regs->gpr[ra] = val & MASK64_R(mb);
  735. goto logical_done;
  736. case 2: /* rldic */
  737. regs->gpr[ra] = val & MASK64(mb, 63 - sh);
  738. goto logical_done;
  739. case 3: /* rldimi */
  740. imm = MASK64(mb, 63 - sh);
  741. regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
  742. (val & imm);
  743. goto logical_done;
  744. }
  745. } else {
  746. sh = regs->gpr[rb] & 0x3f;
  747. val = ROTATE(val, sh);
  748. switch ((instr >> 1) & 7) {
  749. case 0: /* rldcl */
  750. regs->gpr[ra] = val & MASK64_L(mb);
  751. goto logical_done;
  752. case 1: /* rldcr */
  753. regs->gpr[ra] = val & MASK64_R(mb);
  754. goto logical_done;
  755. }
  756. }
  757. #endif
  758. case 31:
  759. switch ((instr >> 1) & 0x3ff) {
  760. case 83: /* mfmsr */
  761. if (regs->msr & MSR_PR)
  762. break;
  763. regs->gpr[rd] = regs->msr & MSR_MASK;
  764. goto instr_done;
  765. case 146: /* mtmsr */
  766. if (regs->msr & MSR_PR)
  767. break;
  768. imm = regs->gpr[rd];
  769. if ((imm & MSR_RI) == 0)
  770. /* can't step mtmsr that would clear MSR_RI */
  771. return -1;
  772. regs->msr = imm;
  773. goto instr_done;
  774. #ifdef CONFIG_PPC64
  775. case 178: /* mtmsrd */
  776. /* only MSR_EE and MSR_RI get changed if bit 15 set */
  777. /* mtmsrd doesn't change MSR_HV and MSR_ME */
  778. if (regs->msr & MSR_PR)
  779. break;
  780. imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
  781. imm = (regs->msr & MSR_MASK & ~imm)
  782. | (regs->gpr[rd] & imm);
  783. if ((imm & MSR_RI) == 0)
  784. /* can't step mtmsrd that would clear MSR_RI */
  785. return -1;
  786. regs->msr = imm;
  787. goto instr_done;
  788. #endif
  789. case 19: /* mfcr */
  790. regs->gpr[rd] = regs->ccr;
  791. regs->gpr[rd] &= 0xffffffffUL;
  792. goto instr_done;
  793. case 144: /* mtcrf */
  794. imm = 0xf0000000UL;
  795. val = regs->gpr[rd];
  796. for (sh = 0; sh < 8; ++sh) {
  797. if (instr & (0x80000 >> sh))
  798. regs->ccr = (regs->ccr & ~imm) |
  799. (val & imm);
  800. imm >>= 4;
  801. }
  802. goto instr_done;
  803. case 339: /* mfspr */
  804. spr = (instr >> 11) & 0x3ff;
  805. switch (spr) {
  806. case 0x20: /* mfxer */
  807. regs->gpr[rd] = regs->xer;
  808. regs->gpr[rd] &= 0xffffffffUL;
  809. goto instr_done;
  810. case 0x100: /* mflr */
  811. regs->gpr[rd] = regs->link;
  812. goto instr_done;
  813. case 0x120: /* mfctr */
  814. regs->gpr[rd] = regs->ctr;
  815. goto instr_done;
  816. }
  817. break;
  818. case 467: /* mtspr */
  819. spr = (instr >> 11) & 0x3ff;
  820. switch (spr) {
  821. case 0x20: /* mtxer */
  822. regs->xer = (regs->gpr[rd] & 0xffffffffUL);
  823. goto instr_done;
  824. case 0x100: /* mtlr */
  825. regs->link = regs->gpr[rd];
  826. goto instr_done;
  827. case 0x120: /* mtctr */
  828. regs->ctr = regs->gpr[rd];
  829. goto instr_done;
  830. }
  831. break;
  832. /*
  833. * Compare instructions
  834. */
  835. case 0: /* cmp */
  836. val = regs->gpr[ra];
  837. val2 = regs->gpr[rb];
  838. #ifdef __powerpc64__
  839. if ((rd & 1) == 0) {
  840. /* word (32-bit) compare */
  841. val = (int) val;
  842. val2 = (int) val2;
  843. }
  844. #endif
  845. do_cmp_signed(regs, val, val2, rd >> 2);
  846. goto instr_done;
  847. case 32: /* cmpl */
  848. val = regs->gpr[ra];
  849. val2 = regs->gpr[rb];
  850. #ifdef __powerpc64__
  851. if ((rd & 1) == 0) {
  852. /* word (32-bit) compare */
  853. val = (unsigned int) val;
  854. val2 = (unsigned int) val2;
  855. }
  856. #endif
  857. do_cmp_unsigned(regs, val, val2, rd >> 2);
  858. goto instr_done;
  859. /*
  860. * Arithmetic instructions
  861. */
  862. case 8: /* subfc */
  863. add_with_carry(regs, rd, ~regs->gpr[ra],
  864. regs->gpr[rb], 1);
  865. goto arith_done;
  866. #ifdef __powerpc64__
  867. case 9: /* mulhdu */
  868. asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
  869. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  870. goto arith_done;
  871. #endif
  872. case 10: /* addc */
  873. add_with_carry(regs, rd, regs->gpr[ra],
  874. regs->gpr[rb], 0);
  875. goto arith_done;
  876. case 11: /* mulhwu */
  877. asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
  878. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  879. goto arith_done;
  880. case 40: /* subf */
  881. regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
  882. goto arith_done;
  883. #ifdef __powerpc64__
  884. case 73: /* mulhd */
  885. asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
  886. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  887. goto arith_done;
  888. #endif
  889. case 75: /* mulhw */
  890. asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
  891. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  892. goto arith_done;
  893. case 104: /* neg */
  894. regs->gpr[rd] = -regs->gpr[ra];
  895. goto arith_done;
  896. case 136: /* subfe */
  897. add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
  898. regs->xer & XER_CA);
  899. goto arith_done;
  900. case 138: /* adde */
  901. add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
  902. regs->xer & XER_CA);
  903. goto arith_done;
  904. case 200: /* subfze */
  905. add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
  906. regs->xer & XER_CA);
  907. goto arith_done;
  908. case 202: /* addze */
  909. add_with_carry(regs, rd, regs->gpr[ra], 0L,
  910. regs->xer & XER_CA);
  911. goto arith_done;
  912. case 232: /* subfme */
  913. add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
  914. regs->xer & XER_CA);
  915. goto arith_done;
  916. #ifdef __powerpc64__
  917. case 233: /* mulld */
  918. regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
  919. goto arith_done;
  920. #endif
  921. case 234: /* addme */
  922. add_with_carry(regs, rd, regs->gpr[ra], -1L,
  923. regs->xer & XER_CA);
  924. goto arith_done;
  925. case 235: /* mullw */
  926. regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
  927. (unsigned int) regs->gpr[rb];
  928. goto arith_done;
  929. case 266: /* add */
  930. regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
  931. goto arith_done;
  932. #ifdef __powerpc64__
  933. case 457: /* divdu */
  934. regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
  935. goto arith_done;
  936. #endif
  937. case 459: /* divwu */
  938. regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
  939. (unsigned int) regs->gpr[rb];
  940. goto arith_done;
  941. #ifdef __powerpc64__
  942. case 489: /* divd */
  943. regs->gpr[rd] = (long int) regs->gpr[ra] /
  944. (long int) regs->gpr[rb];
  945. goto arith_done;
  946. #endif
  947. case 491: /* divw */
  948. regs->gpr[rd] = (int) regs->gpr[ra] /
  949. (int) regs->gpr[rb];
  950. goto arith_done;
  951. /*
  952. * Logical instructions
  953. */
  954. case 26: /* cntlzw */
  955. asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
  956. "r" (regs->gpr[rd]));
  957. goto logical_done;
  958. #ifdef __powerpc64__
  959. case 58: /* cntlzd */
  960. asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
  961. "r" (regs->gpr[rd]));
  962. goto logical_done;
  963. #endif
  964. case 28: /* and */
  965. regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
  966. goto logical_done;
  967. case 60: /* andc */
  968. regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
  969. goto logical_done;
  970. case 124: /* nor */
  971. regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
  972. goto logical_done;
  973. case 284: /* xor */
  974. regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
  975. goto logical_done;
  976. case 316: /* xor */
  977. regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
  978. goto logical_done;
  979. case 412: /* orc */
  980. regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
  981. goto logical_done;
  982. case 444: /* or */
  983. regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
  984. goto logical_done;
  985. case 476: /* nand */
  986. regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
  987. goto logical_done;
  988. case 922: /* extsh */
  989. regs->gpr[ra] = (signed short) regs->gpr[rd];
  990. goto logical_done;
  991. case 954: /* extsb */
  992. regs->gpr[ra] = (signed char) regs->gpr[rd];
  993. goto logical_done;
  994. #ifdef __powerpc64__
  995. case 986: /* extsw */
  996. regs->gpr[ra] = (signed int) regs->gpr[rd];
  997. goto logical_done;
  998. #endif
  999. /*
  1000. * Shift instructions
  1001. */
  1002. case 24: /* slw */
  1003. sh = regs->gpr[rb] & 0x3f;
  1004. if (sh < 32)
  1005. regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
  1006. else
  1007. regs->gpr[ra] = 0;
  1008. goto logical_done;
  1009. case 536: /* srw */
  1010. sh = regs->gpr[rb] & 0x3f;
  1011. if (sh < 32)
  1012. regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
  1013. else
  1014. regs->gpr[ra] = 0;
  1015. goto logical_done;
  1016. case 792: /* sraw */
  1017. sh = regs->gpr[rb] & 0x3f;
  1018. ival = (signed int) regs->gpr[rd];
  1019. regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
  1020. if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
  1021. regs->xer |= XER_CA;
  1022. else
  1023. regs->xer &= ~XER_CA;
  1024. goto logical_done;
  1025. case 824: /* srawi */
  1026. sh = rb;
  1027. ival = (signed int) regs->gpr[rd];
  1028. regs->gpr[ra] = ival >> sh;
  1029. if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
  1030. regs->xer |= XER_CA;
  1031. else
  1032. regs->xer &= ~XER_CA;
  1033. goto logical_done;
  1034. #ifdef __powerpc64__
  1035. case 27: /* sld */
  1036. sh = regs->gpr[rd] & 0x7f;
  1037. if (sh < 64)
  1038. regs->gpr[ra] = regs->gpr[rd] << sh;
  1039. else
  1040. regs->gpr[ra] = 0;
  1041. goto logical_done;
  1042. case 539: /* srd */
  1043. sh = regs->gpr[rb] & 0x7f;
  1044. if (sh < 64)
  1045. regs->gpr[ra] = regs->gpr[rd] >> sh;
  1046. else
  1047. regs->gpr[ra] = 0;
  1048. goto logical_done;
  1049. case 794: /* srad */
  1050. sh = regs->gpr[rb] & 0x7f;
  1051. ival = (signed long int) regs->gpr[rd];
  1052. regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
  1053. if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
  1054. regs->xer |= XER_CA;
  1055. else
  1056. regs->xer &= ~XER_CA;
  1057. goto logical_done;
  1058. case 826: /* sradi with sh_5 = 0 */
  1059. case 827: /* sradi with sh_5 = 1 */
  1060. sh = rb | ((instr & 2) << 4);
  1061. ival = (signed long int) regs->gpr[rd];
  1062. regs->gpr[ra] = ival >> sh;
  1063. if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
  1064. regs->xer |= XER_CA;
  1065. else
  1066. regs->xer &= ~XER_CA;
  1067. goto logical_done;
  1068. #endif /* __powerpc64__ */
  1069. /*
  1070. * Cache instructions
  1071. */
  1072. case 54: /* dcbst */
  1073. ea = xform_ea(instr, regs, 0);
  1074. if (!address_ok(regs, ea, 8))
  1075. return 0;
  1076. err = 0;
  1077. __cacheop_user_asmx(ea, err, "dcbst");
  1078. if (err)
  1079. return 0;
  1080. goto instr_done;
  1081. case 86: /* dcbf */
  1082. ea = xform_ea(instr, regs, 0);
  1083. if (!address_ok(regs, ea, 8))
  1084. return 0;
  1085. err = 0;
  1086. __cacheop_user_asmx(ea, err, "dcbf");
  1087. if (err)
  1088. return 0;
  1089. goto instr_done;
  1090. case 246: /* dcbtst */
  1091. if (rd == 0) {
  1092. ea = xform_ea(instr, regs, 0);
  1093. prefetchw((void *) ea);
  1094. }
  1095. goto instr_done;
  1096. case 278: /* dcbt */
  1097. if (rd == 0) {
  1098. ea = xform_ea(instr, regs, 0);
  1099. prefetch((void *) ea);
  1100. }
  1101. goto instr_done;
  1102. }
  1103. break;
  1104. }
  1105. /*
  1106. * Following cases are for loads and stores, so bail out
  1107. * if we're in little-endian mode.
  1108. */
  1109. if (regs->msr & MSR_LE)
  1110. return 0;
  1111. /*
  1112. * Save register RA in case it's an update form load or store
  1113. * and the access faults.
  1114. */
  1115. old_ra = regs->gpr[ra];
  1116. switch (opcode) {
  1117. case 31:
  1118. u = instr & 0x40;
  1119. switch ((instr >> 1) & 0x3ff) {
  1120. case 20: /* lwarx */
  1121. ea = xform_ea(instr, regs, 0);
  1122. if (ea & 3)
  1123. break; /* can't handle misaligned */
  1124. err = -EFAULT;
  1125. if (!address_ok(regs, ea, 4))
  1126. goto ldst_done;
  1127. err = 0;
  1128. __get_user_asmx(val, ea, err, "lwarx");
  1129. if (!err)
  1130. regs->gpr[rd] = val;
  1131. goto ldst_done;
  1132. case 150: /* stwcx. */
  1133. ea = xform_ea(instr, regs, 0);
  1134. if (ea & 3)
  1135. break; /* can't handle misaligned */
  1136. err = -EFAULT;
  1137. if (!address_ok(regs, ea, 4))
  1138. goto ldst_done;
  1139. err = 0;
  1140. __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
  1141. if (!err)
  1142. regs->ccr = (regs->ccr & 0x0fffffff) |
  1143. (cr & 0xe0000000) |
  1144. ((regs->xer >> 3) & 0x10000000);
  1145. goto ldst_done;
  1146. #ifdef __powerpc64__
  1147. case 84: /* ldarx */
  1148. ea = xform_ea(instr, regs, 0);
  1149. if (ea & 7)
  1150. break; /* can't handle misaligned */
  1151. err = -EFAULT;
  1152. if (!address_ok(regs, ea, 8))
  1153. goto ldst_done;
  1154. err = 0;
  1155. __get_user_asmx(val, ea, err, "ldarx");
  1156. if (!err)
  1157. regs->gpr[rd] = val;
  1158. goto ldst_done;
  1159. case 214: /* stdcx. */
  1160. ea = xform_ea(instr, regs, 0);
  1161. if (ea & 7)
  1162. break; /* can't handle misaligned */
  1163. err = -EFAULT;
  1164. if (!address_ok(regs, ea, 8))
  1165. goto ldst_done;
  1166. err = 0;
  1167. __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
  1168. if (!err)
  1169. regs->ccr = (regs->ccr & 0x0fffffff) |
  1170. (cr & 0xe0000000) |
  1171. ((regs->xer >> 3) & 0x10000000);
  1172. goto ldst_done;
  1173. case 21: /* ldx */
  1174. case 53: /* ldux */
  1175. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1176. 8, regs);
  1177. goto ldst_done;
  1178. #endif
  1179. case 23: /* lwzx */
  1180. case 55: /* lwzux */
  1181. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1182. 4, regs);
  1183. goto ldst_done;
  1184. case 87: /* lbzx */
  1185. case 119: /* lbzux */
  1186. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1187. 1, regs);
  1188. goto ldst_done;
  1189. #ifdef CONFIG_ALTIVEC
  1190. case 103: /* lvx */
  1191. case 359: /* lvxl */
  1192. if (!(regs->msr & MSR_VEC))
  1193. break;
  1194. ea = xform_ea(instr, regs, 0);
  1195. err = do_vec_load(rd, do_lvx, ea, regs);
  1196. goto ldst_done;
  1197. case 231: /* stvx */
  1198. case 487: /* stvxl */
  1199. if (!(regs->msr & MSR_VEC))
  1200. break;
  1201. ea = xform_ea(instr, regs, 0);
  1202. err = do_vec_store(rd, do_stvx, ea, regs);
  1203. goto ldst_done;
  1204. #endif /* CONFIG_ALTIVEC */
  1205. #ifdef __powerpc64__
  1206. case 149: /* stdx */
  1207. case 181: /* stdux */
  1208. val = regs->gpr[rd];
  1209. err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
  1210. goto ldst_done;
  1211. #endif
  1212. case 151: /* stwx */
  1213. case 183: /* stwux */
  1214. val = regs->gpr[rd];
  1215. err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
  1216. goto ldst_done;
  1217. case 215: /* stbx */
  1218. case 247: /* stbux */
  1219. val = regs->gpr[rd];
  1220. err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
  1221. goto ldst_done;
  1222. case 279: /* lhzx */
  1223. case 311: /* lhzux */
  1224. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1225. 2, regs);
  1226. goto ldst_done;
  1227. #ifdef __powerpc64__
  1228. case 341: /* lwax */
  1229. case 373: /* lwaux */
  1230. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1231. 4, regs);
  1232. if (!err)
  1233. regs->gpr[rd] = (signed int) regs->gpr[rd];
  1234. goto ldst_done;
  1235. #endif
  1236. case 343: /* lhax */
  1237. case 375: /* lhaux */
  1238. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1239. 2, regs);
  1240. if (!err)
  1241. regs->gpr[rd] = (signed short) regs->gpr[rd];
  1242. goto ldst_done;
  1243. case 407: /* sthx */
  1244. case 439: /* sthux */
  1245. val = regs->gpr[rd];
  1246. err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
  1247. goto ldst_done;
  1248. #ifdef __powerpc64__
  1249. case 532: /* ldbrx */
  1250. err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
  1251. if (!err)
  1252. regs->gpr[rd] = byterev_8(val);
  1253. goto ldst_done;
  1254. #endif
  1255. case 534: /* lwbrx */
  1256. err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
  1257. if (!err)
  1258. regs->gpr[rd] = byterev_4(val);
  1259. goto ldst_done;
  1260. #ifdef CONFIG_PPC_CPU
  1261. case 535: /* lfsx */
  1262. case 567: /* lfsux */
  1263. if (!(regs->msr & MSR_FP))
  1264. break;
  1265. ea = xform_ea(instr, regs, u);
  1266. err = do_fp_load(rd, do_lfs, ea, 4, regs);
  1267. goto ldst_done;
  1268. case 599: /* lfdx */
  1269. case 631: /* lfdux */
  1270. if (!(regs->msr & MSR_FP))
  1271. break;
  1272. ea = xform_ea(instr, regs, u);
  1273. err = do_fp_load(rd, do_lfd, ea, 8, regs);
  1274. goto ldst_done;
  1275. case 663: /* stfsx */
  1276. case 695: /* stfsux */
  1277. if (!(regs->msr & MSR_FP))
  1278. break;
  1279. ea = xform_ea(instr, regs, u);
  1280. err = do_fp_store(rd, do_stfs, ea, 4, regs);
  1281. goto ldst_done;
  1282. case 727: /* stfdx */
  1283. case 759: /* stfdux */
  1284. if (!(regs->msr & MSR_FP))
  1285. break;
  1286. ea = xform_ea(instr, regs, u);
  1287. err = do_fp_store(rd, do_stfd, ea, 8, regs);
  1288. goto ldst_done;
  1289. #endif
  1290. #ifdef __powerpc64__
  1291. case 660: /* stdbrx */
  1292. val = byterev_8(regs->gpr[rd]);
  1293. err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
  1294. goto ldst_done;
  1295. #endif
  1296. case 662: /* stwbrx */
  1297. val = byterev_4(regs->gpr[rd]);
  1298. err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
  1299. goto ldst_done;
  1300. case 790: /* lhbrx */
  1301. err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
  1302. if (!err)
  1303. regs->gpr[rd] = byterev_2(val);
  1304. goto ldst_done;
  1305. case 918: /* sthbrx */
  1306. val = byterev_2(regs->gpr[rd]);
  1307. err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
  1308. goto ldst_done;
  1309. #ifdef CONFIG_VSX
  1310. case 844: /* lxvd2x */
  1311. case 876: /* lxvd2ux */
  1312. if (!(regs->msr & MSR_VSX))
  1313. break;
  1314. rd |= (instr & 1) << 5;
  1315. ea = xform_ea(instr, regs, u);
  1316. err = do_vsx_load(rd, do_lxvd2x, ea, regs);
  1317. goto ldst_done;
  1318. case 972: /* stxvd2x */
  1319. case 1004: /* stxvd2ux */
  1320. if (!(regs->msr & MSR_VSX))
  1321. break;
  1322. rd |= (instr & 1) << 5;
  1323. ea = xform_ea(instr, regs, u);
  1324. err = do_vsx_store(rd, do_stxvd2x, ea, regs);
  1325. goto ldst_done;
  1326. #endif /* CONFIG_VSX */
  1327. }
  1328. break;
  1329. case 32: /* lwz */
  1330. case 33: /* lwzu */
  1331. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
  1332. goto ldst_done;
  1333. case 34: /* lbz */
  1334. case 35: /* lbzu */
  1335. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
  1336. goto ldst_done;
  1337. case 36: /* stw */
  1338. case 37: /* stwu */
  1339. val = regs->gpr[rd];
  1340. err = write_mem(val, dform_ea(instr, regs), 4, regs);
  1341. goto ldst_done;
  1342. case 38: /* stb */
  1343. case 39: /* stbu */
  1344. val = regs->gpr[rd];
  1345. err = write_mem(val, dform_ea(instr, regs), 1, regs);
  1346. goto ldst_done;
  1347. case 40: /* lhz */
  1348. case 41: /* lhzu */
  1349. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
  1350. goto ldst_done;
  1351. case 42: /* lha */
  1352. case 43: /* lhau */
  1353. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
  1354. if (!err)
  1355. regs->gpr[rd] = (signed short) regs->gpr[rd];
  1356. goto ldst_done;
  1357. case 44: /* sth */
  1358. case 45: /* sthu */
  1359. val = regs->gpr[rd];
  1360. err = write_mem(val, dform_ea(instr, regs), 2, regs);
  1361. goto ldst_done;
  1362. case 46: /* lmw */
  1363. ra = (instr >> 16) & 0x1f;
  1364. if (ra >= rd)
  1365. break; /* invalid form, ra in range to load */
  1366. ea = dform_ea(instr, regs);
  1367. do {
  1368. err = read_mem(&regs->gpr[rd], ea, 4, regs);
  1369. if (err)
  1370. return 0;
  1371. ea += 4;
  1372. } while (++rd < 32);
  1373. goto instr_done;
  1374. case 47: /* stmw */
  1375. ea = dform_ea(instr, regs);
  1376. do {
  1377. err = write_mem(regs->gpr[rd], ea, 4, regs);
  1378. if (err)
  1379. return 0;
  1380. ea += 4;
  1381. } while (++rd < 32);
  1382. goto instr_done;
  1383. #ifdef CONFIG_PPC_FPU
  1384. case 48: /* lfs */
  1385. case 49: /* lfsu */
  1386. if (!(regs->msr & MSR_FP))
  1387. break;
  1388. ea = dform_ea(instr, regs);
  1389. err = do_fp_load(rd, do_lfs, ea, 4, regs);
  1390. goto ldst_done;
  1391. case 50: /* lfd */
  1392. case 51: /* lfdu */
  1393. if (!(regs->msr & MSR_FP))
  1394. break;
  1395. ea = dform_ea(instr, regs);
  1396. err = do_fp_load(rd, do_lfd, ea, 8, regs);
  1397. goto ldst_done;
  1398. case 52: /* stfs */
  1399. case 53: /* stfsu */
  1400. if (!(regs->msr & MSR_FP))
  1401. break;
  1402. ea = dform_ea(instr, regs);
  1403. err = do_fp_store(rd, do_stfs, ea, 4, regs);
  1404. goto ldst_done;
  1405. case 54: /* stfd */
  1406. case 55: /* stfdu */
  1407. if (!(regs->msr & MSR_FP))
  1408. break;
  1409. ea = dform_ea(instr, regs);
  1410. err = do_fp_store(rd, do_stfd, ea, 8, regs);
  1411. goto ldst_done;
  1412. #endif
  1413. #ifdef __powerpc64__
  1414. case 58: /* ld[u], lwa */
  1415. switch (instr & 3) {
  1416. case 0: /* ld */
  1417. err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
  1418. 8, regs);
  1419. goto ldst_done;
  1420. case 1: /* ldu */
  1421. err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
  1422. 8, regs);
  1423. goto ldst_done;
  1424. case 2: /* lwa */
  1425. err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
  1426. 4, regs);
  1427. if (!err)
  1428. regs->gpr[rd] = (signed int) regs->gpr[rd];
  1429. goto ldst_done;
  1430. }
  1431. break;
  1432. case 62: /* std[u] */
  1433. val = regs->gpr[rd];
  1434. switch (instr & 3) {
  1435. case 0: /* std */
  1436. err = write_mem(val, dsform_ea(instr, regs), 8, regs);
  1437. goto ldst_done;
  1438. case 1: /* stdu */
  1439. err = write_mem(val, dsform_ea(instr, regs), 8, regs);
  1440. goto ldst_done;
  1441. }
  1442. break;
  1443. #endif /* __powerpc64__ */
  1444. }
  1445. err = -EINVAL;
  1446. ldst_done:
  1447. if (err) {
  1448. regs->gpr[ra] = old_ra;
  1449. return 0; /* invoke DSI if -EFAULT? */
  1450. }
  1451. instr_done:
  1452. regs->nip += 4;
  1453. #ifdef __powerpc64__
  1454. if ((regs->msr & MSR_SF) == 0)
  1455. regs->nip &= 0xffffffffUL;
  1456. #endif
  1457. return 1;
  1458. logical_done:
  1459. if (instr & 1)
  1460. set_cr0(regs, ra);
  1461. goto instr_done;
  1462. arith_done:
  1463. if (instr & 1)
  1464. set_cr0(regs, rd);
  1465. goto instr_done;
  1466. }