sstep.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709
  1. /*
  2. * Single-step support.
  3. *
  4. * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/prefetch.h>
  15. #include <asm/sstep.h>
  16. #include <asm/processor.h>
  17. #include <asm/uaccess.h>
  18. #include <asm/cputable.h>
  19. extern char system_call_common[];
  20. #ifdef CONFIG_PPC64
  21. /* Bits in SRR1 that are copied from MSR */
  22. #define MSR_MASK 0xffffffff87c0ffffUL
  23. #else
  24. #define MSR_MASK 0x87c0ffff
  25. #endif
  26. /* Bits in XER */
  27. #define XER_SO 0x80000000U
  28. #define XER_OV 0x40000000U
  29. #define XER_CA 0x20000000U
  30. #ifdef CONFIG_PPC_FPU
  31. /*
  32. * Functions in ldstfp.S
  33. */
  34. extern int do_lfs(int rn, unsigned long ea);
  35. extern int do_lfd(int rn, unsigned long ea);
  36. extern int do_stfs(int rn, unsigned long ea);
  37. extern int do_stfd(int rn, unsigned long ea);
  38. extern int do_lvx(int rn, unsigned long ea);
  39. extern int do_stvx(int rn, unsigned long ea);
  40. extern int do_lxvd2x(int rn, unsigned long ea);
  41. extern int do_stxvd2x(int rn, unsigned long ea);
  42. #endif
  43. /*
  44. * Emulate the truncation of 64 bit values in 32-bit mode.
  45. */
  46. static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
  47. {
  48. #ifdef __powerpc64__
  49. if ((msr & MSR_64BIT) == 0)
  50. val &= 0xffffffffUL;
  51. #endif
  52. return val;
  53. }
  54. /*
  55. * Determine whether a conditional branch instruction would branch.
  56. */
  57. static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
  58. {
  59. unsigned int bo = (instr >> 21) & 0x1f;
  60. unsigned int bi;
  61. if ((bo & 4) == 0) {
  62. /* decrement counter */
  63. --regs->ctr;
  64. if (((bo >> 1) & 1) ^ (regs->ctr == 0))
  65. return 0;
  66. }
  67. if ((bo & 0x10) == 0) {
  68. /* check bit from CR */
  69. bi = (instr >> 16) & 0x1f;
  70. if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
  71. return 0;
  72. }
  73. return 1;
  74. }
  75. static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
  76. {
  77. if (!user_mode(regs))
  78. return 1;
  79. return __access_ok(ea, nb, USER_DS);
  80. }
  81. /*
  82. * Calculate effective address for a D-form instruction
  83. */
  84. static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
  85. {
  86. int ra;
  87. unsigned long ea;
  88. ra = (instr >> 16) & 0x1f;
  89. ea = (signed short) instr; /* sign-extend */
  90. if (ra) {
  91. ea += regs->gpr[ra];
  92. if (instr & 0x04000000) { /* update forms */
  93. if ((instr>>26) != 47) /* stmw is not an update form */
  94. regs->gpr[ra] = ea;
  95. }
  96. }
  97. return truncate_if_32bit(regs->msr, ea);
  98. }
  99. #ifdef __powerpc64__
  100. /*
  101. * Calculate effective address for a DS-form instruction
  102. */
  103. static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
  104. {
  105. int ra;
  106. unsigned long ea;
  107. ra = (instr >> 16) & 0x1f;
  108. ea = (signed short) (instr & ~3); /* sign-extend */
  109. if (ra) {
  110. ea += regs->gpr[ra];
  111. if ((instr & 3) == 1) /* update forms */
  112. regs->gpr[ra] = ea;
  113. }
  114. return truncate_if_32bit(regs->msr, ea);
  115. }
  116. #endif /* __powerpc64 */
  117. /*
  118. * Calculate effective address for an X-form instruction
  119. */
  120. static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
  121. int do_update)
  122. {
  123. int ra, rb;
  124. unsigned long ea;
  125. ra = (instr >> 16) & 0x1f;
  126. rb = (instr >> 11) & 0x1f;
  127. ea = regs->gpr[rb];
  128. if (ra) {
  129. ea += regs->gpr[ra];
  130. if (do_update) /* update forms */
  131. regs->gpr[ra] = ea;
  132. }
  133. return truncate_if_32bit(regs->msr, ea);
  134. }
  135. /*
  136. * Return the largest power of 2, not greater than sizeof(unsigned long),
  137. * such that x is a multiple of it.
  138. */
  139. static inline unsigned long max_align(unsigned long x)
  140. {
  141. x |= sizeof(unsigned long);
  142. return x & -x; /* isolates rightmost bit */
  143. }
  144. static inline unsigned long byterev_2(unsigned long x)
  145. {
  146. return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
  147. }
  148. static inline unsigned long byterev_4(unsigned long x)
  149. {
  150. return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
  151. ((x & 0xff00) << 8) | ((x & 0xff) << 24);
  152. }
  153. #ifdef __powerpc64__
  154. static inline unsigned long byterev_8(unsigned long x)
  155. {
  156. return (byterev_4(x) << 32) | byterev_4(x >> 32);
  157. }
  158. #endif
  159. static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
  160. int nb)
  161. {
  162. int err = 0;
  163. unsigned long x = 0;
  164. switch (nb) {
  165. case 1:
  166. err = __get_user(x, (unsigned char __user *) ea);
  167. break;
  168. case 2:
  169. err = __get_user(x, (unsigned short __user *) ea);
  170. break;
  171. case 4:
  172. err = __get_user(x, (unsigned int __user *) ea);
  173. break;
  174. #ifdef __powerpc64__
  175. case 8:
  176. err = __get_user(x, (unsigned long __user *) ea);
  177. break;
  178. #endif
  179. }
  180. if (!err)
  181. *dest = x;
  182. return err;
  183. }
  184. static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
  185. int nb, struct pt_regs *regs)
  186. {
  187. int err;
  188. unsigned long x, b, c;
  189. #ifdef __LITTLE_ENDIAN__
  190. int len = nb; /* save a copy of the length for byte reversal */
  191. #endif
  192. /* unaligned, do this in pieces */
  193. x = 0;
  194. for (; nb > 0; nb -= c) {
  195. #ifdef __LITTLE_ENDIAN__
  196. c = 1;
  197. #endif
  198. #ifdef __BIG_ENDIAN__
  199. c = max_align(ea);
  200. #endif
  201. if (c > nb)
  202. c = max_align(nb);
  203. err = read_mem_aligned(&b, ea, c);
  204. if (err)
  205. return err;
  206. x = (x << (8 * c)) + b;
  207. ea += c;
  208. }
  209. #ifdef __LITTLE_ENDIAN__
  210. switch (len) {
  211. case 2:
  212. *dest = byterev_2(x);
  213. break;
  214. case 4:
  215. *dest = byterev_4(x);
  216. break;
  217. #ifdef __powerpc64__
  218. case 8:
  219. *dest = byterev_8(x);
  220. break;
  221. #endif
  222. }
  223. #endif
  224. #ifdef __BIG_ENDIAN__
  225. *dest = x;
  226. #endif
  227. return 0;
  228. }
  229. /*
  230. * Read memory at address ea for nb bytes, return 0 for success
  231. * or -EFAULT if an error occurred.
  232. */
  233. static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
  234. struct pt_regs *regs)
  235. {
  236. if (!address_ok(regs, ea, nb))
  237. return -EFAULT;
  238. if ((ea & (nb - 1)) == 0)
  239. return read_mem_aligned(dest, ea, nb);
  240. return read_mem_unaligned(dest, ea, nb, regs);
  241. }
  242. static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
  243. int nb)
  244. {
  245. int err = 0;
  246. switch (nb) {
  247. case 1:
  248. err = __put_user(val, (unsigned char __user *) ea);
  249. break;
  250. case 2:
  251. err = __put_user(val, (unsigned short __user *) ea);
  252. break;
  253. case 4:
  254. err = __put_user(val, (unsigned int __user *) ea);
  255. break;
  256. #ifdef __powerpc64__
  257. case 8:
  258. err = __put_user(val, (unsigned long __user *) ea);
  259. break;
  260. #endif
  261. }
  262. return err;
  263. }
  264. static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
  265. int nb, struct pt_regs *regs)
  266. {
  267. int err;
  268. unsigned long c;
  269. #ifdef __LITTLE_ENDIAN__
  270. switch (nb) {
  271. case 2:
  272. val = byterev_2(val);
  273. break;
  274. case 4:
  275. val = byterev_4(val);
  276. break;
  277. #ifdef __powerpc64__
  278. case 8:
  279. val = byterev_8(val);
  280. break;
  281. #endif
  282. }
  283. #endif
  284. /* unaligned or little-endian, do this in pieces */
  285. for (; nb > 0; nb -= c) {
  286. #ifdef __LITTLE_ENDIAN__
  287. c = 1;
  288. #endif
  289. #ifdef __BIG_ENDIAN__
  290. c = max_align(ea);
  291. #endif
  292. if (c > nb)
  293. c = max_align(nb);
  294. err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
  295. if (err)
  296. return err;
  297. ea += c;
  298. }
  299. return 0;
  300. }
  301. /*
  302. * Write memory at address ea for nb bytes, return 0 for success
  303. * or -EFAULT if an error occurred.
  304. */
  305. static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
  306. struct pt_regs *regs)
  307. {
  308. if (!address_ok(regs, ea, nb))
  309. return -EFAULT;
  310. if ((ea & (nb - 1)) == 0)
  311. return write_mem_aligned(val, ea, nb);
  312. return write_mem_unaligned(val, ea, nb, regs);
  313. }
  314. #ifdef CONFIG_PPC_FPU
  315. /*
  316. * Check the address and alignment, and call func to do the actual
  317. * load or store.
  318. */
  319. static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
  320. unsigned long ea, int nb,
  321. struct pt_regs *regs)
  322. {
  323. int err;
  324. unsigned long val[sizeof(double) / sizeof(long)];
  325. unsigned long ptr;
  326. if (!address_ok(regs, ea, nb))
  327. return -EFAULT;
  328. if ((ea & 3) == 0)
  329. return (*func)(rn, ea);
  330. ptr = (unsigned long) &val[0];
  331. if (sizeof(unsigned long) == 8 || nb == 4) {
  332. err = read_mem_unaligned(&val[0], ea, nb, regs);
  333. ptr += sizeof(unsigned long) - nb;
  334. } else {
  335. /* reading a double on 32-bit */
  336. err = read_mem_unaligned(&val[0], ea, 4, regs);
  337. if (!err)
  338. err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
  339. }
  340. if (err)
  341. return err;
  342. return (*func)(rn, ptr);
  343. }
  344. static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
  345. unsigned long ea, int nb,
  346. struct pt_regs *regs)
  347. {
  348. int err;
  349. unsigned long val[sizeof(double) / sizeof(long)];
  350. unsigned long ptr;
  351. if (!address_ok(regs, ea, nb))
  352. return -EFAULT;
  353. if ((ea & 3) == 0)
  354. return (*func)(rn, ea);
  355. ptr = (unsigned long) &val[0];
  356. if (sizeof(unsigned long) == 8 || nb == 4) {
  357. ptr += sizeof(unsigned long) - nb;
  358. err = (*func)(rn, ptr);
  359. if (err)
  360. return err;
  361. err = write_mem_unaligned(val[0], ea, nb, regs);
  362. } else {
  363. /* writing a double on 32-bit */
  364. err = (*func)(rn, ptr);
  365. if (err)
  366. return err;
  367. err = write_mem_unaligned(val[0], ea, 4, regs);
  368. if (!err)
  369. err = write_mem_unaligned(val[1], ea + 4, 4, regs);
  370. }
  371. return err;
  372. }
  373. #endif
  374. #ifdef CONFIG_ALTIVEC
  375. /* For Altivec/VMX, no need to worry about alignment */
  376. static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
  377. unsigned long ea, struct pt_regs *regs)
  378. {
  379. if (!address_ok(regs, ea & ~0xfUL, 16))
  380. return -EFAULT;
  381. return (*func)(rn, ea);
  382. }
  383. static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
  384. unsigned long ea, struct pt_regs *regs)
  385. {
  386. if (!address_ok(regs, ea & ~0xfUL, 16))
  387. return -EFAULT;
  388. return (*func)(rn, ea);
  389. }
  390. #endif /* CONFIG_ALTIVEC */
  391. #ifdef CONFIG_VSX
  392. static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
  393. unsigned long ea, struct pt_regs *regs)
  394. {
  395. int err;
  396. unsigned long val[2];
  397. if (!address_ok(regs, ea, 16))
  398. return -EFAULT;
  399. if ((ea & 3) == 0)
  400. return (*func)(rn, ea);
  401. err = read_mem_unaligned(&val[0], ea, 8, regs);
  402. if (!err)
  403. err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
  404. if (!err)
  405. err = (*func)(rn, (unsigned long) &val[0]);
  406. return err;
  407. }
  408. static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
  409. unsigned long ea, struct pt_regs *regs)
  410. {
  411. int err;
  412. unsigned long val[2];
  413. if (!address_ok(regs, ea, 16))
  414. return -EFAULT;
  415. if ((ea & 3) == 0)
  416. return (*func)(rn, ea);
  417. err = (*func)(rn, (unsigned long) &val[0]);
  418. if (err)
  419. return err;
  420. err = write_mem_unaligned(val[0], ea, 8, regs);
  421. if (!err)
  422. err = write_mem_unaligned(val[1], ea + 8, 8, regs);
  423. return err;
  424. }
  425. #endif /* CONFIG_VSX */
  426. #define __put_user_asmx(x, addr, err, op, cr) \
  427. __asm__ __volatile__( \
  428. "1: " op " %2,0,%3\n" \
  429. " mfcr %1\n" \
  430. "2:\n" \
  431. ".section .fixup,\"ax\"\n" \
  432. "3: li %0,%4\n" \
  433. " b 2b\n" \
  434. ".previous\n" \
  435. ".section __ex_table,\"a\"\n" \
  436. PPC_LONG_ALIGN "\n" \
  437. PPC_LONG "1b,3b\n" \
  438. ".previous" \
  439. : "=r" (err), "=r" (cr) \
  440. : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
  441. #define __get_user_asmx(x, addr, err, op) \
  442. __asm__ __volatile__( \
  443. "1: "op" %1,0,%2\n" \
  444. "2:\n" \
  445. ".section .fixup,\"ax\"\n" \
  446. "3: li %0,%3\n" \
  447. " b 2b\n" \
  448. ".previous\n" \
  449. ".section __ex_table,\"a\"\n" \
  450. PPC_LONG_ALIGN "\n" \
  451. PPC_LONG "1b,3b\n" \
  452. ".previous" \
  453. : "=r" (err), "=r" (x) \
  454. : "r" (addr), "i" (-EFAULT), "0" (err))
  455. #define __cacheop_user_asmx(addr, err, op) \
  456. __asm__ __volatile__( \
  457. "1: "op" 0,%1\n" \
  458. "2:\n" \
  459. ".section .fixup,\"ax\"\n" \
  460. "3: li %0,%3\n" \
  461. " b 2b\n" \
  462. ".previous\n" \
  463. ".section __ex_table,\"a\"\n" \
  464. PPC_LONG_ALIGN "\n" \
  465. PPC_LONG "1b,3b\n" \
  466. ".previous" \
  467. : "=r" (err) \
  468. : "r" (addr), "i" (-EFAULT), "0" (err))
  469. static void __kprobes set_cr0(struct pt_regs *regs, int rd)
  470. {
  471. long val = regs->gpr[rd];
  472. regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
  473. #ifdef __powerpc64__
  474. if (!(regs->msr & MSR_64BIT))
  475. val = (int) val;
  476. #endif
  477. if (val < 0)
  478. regs->ccr |= 0x80000000;
  479. else if (val > 0)
  480. regs->ccr |= 0x40000000;
  481. else
  482. regs->ccr |= 0x20000000;
  483. }
  484. static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
  485. unsigned long val1, unsigned long val2,
  486. unsigned long carry_in)
  487. {
  488. unsigned long val = val1 + val2;
  489. if (carry_in)
  490. ++val;
  491. regs->gpr[rd] = val;
  492. #ifdef __powerpc64__
  493. if (!(regs->msr & MSR_64BIT)) {
  494. val = (unsigned int) val;
  495. val1 = (unsigned int) val1;
  496. }
  497. #endif
  498. if (val < val1 || (carry_in && val == val1))
  499. regs->xer |= XER_CA;
  500. else
  501. regs->xer &= ~XER_CA;
  502. }
  503. static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
  504. int crfld)
  505. {
  506. unsigned int crval, shift;
  507. crval = (regs->xer >> 31) & 1; /* get SO bit */
  508. if (v1 < v2)
  509. crval |= 8;
  510. else if (v1 > v2)
  511. crval |= 4;
  512. else
  513. crval |= 2;
  514. shift = (7 - crfld) * 4;
  515. regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  516. }
  517. static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
  518. unsigned long v2, int crfld)
  519. {
  520. unsigned int crval, shift;
  521. crval = (regs->xer >> 31) & 1; /* get SO bit */
  522. if (v1 < v2)
  523. crval |= 8;
  524. else if (v1 > v2)
  525. crval |= 4;
  526. else
  527. crval |= 2;
  528. shift = (7 - crfld) * 4;
  529. regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  530. }
  531. /*
  532. * Elements of 32-bit rotate and mask instructions.
  533. */
  534. #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
  535. ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
  536. #ifdef __powerpc64__
  537. #define MASK64_L(mb) (~0UL >> (mb))
  538. #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
  539. #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
  540. #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
  541. #else
  542. #define DATA32(x) (x)
  543. #endif
  544. #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
  545. /*
  546. * Emulate instructions that cause a transfer of control,
  547. * loads and stores, and a few other instructions.
  548. * Returns 1 if the step was emulated, 0 if not,
  549. * or -1 if the instruction is one that should not be stepped,
  550. * such as an rfid, or a mtmsrd that would clear MSR_RI.
  551. */
  552. int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
  553. {
  554. unsigned int opcode, ra, rb, rd, spr, u;
  555. unsigned long int imm;
  556. unsigned long int val, val2;
  557. unsigned long int ea;
  558. unsigned int cr, mb, me, sh;
  559. int err;
  560. unsigned long old_ra, val3;
  561. long ival;
  562. opcode = instr >> 26;
  563. switch (opcode) {
  564. case 16: /* bc */
  565. imm = (signed short)(instr & 0xfffc);
  566. if ((instr & 2) == 0)
  567. imm += regs->nip;
  568. regs->nip += 4;
  569. regs->nip = truncate_if_32bit(regs->msr, regs->nip);
  570. if (instr & 1)
  571. regs->link = regs->nip;
  572. if (branch_taken(instr, regs))
  573. regs->nip = truncate_if_32bit(regs->msr, imm);
  574. return 1;
  575. #ifdef CONFIG_PPC64
  576. case 17: /* sc */
  577. /*
  578. * N.B. this uses knowledge about how the syscall
  579. * entry code works. If that is changed, this will
  580. * need to be changed also.
  581. */
  582. if (regs->gpr[0] == 0x1ebe &&
  583. cpu_has_feature(CPU_FTR_REAL_LE)) {
  584. regs->msr ^= MSR_LE;
  585. goto instr_done;
  586. }
  587. regs->gpr[9] = regs->gpr[13];
  588. regs->gpr[10] = MSR_KERNEL;
  589. regs->gpr[11] = regs->nip + 4;
  590. regs->gpr[12] = regs->msr & MSR_MASK;
  591. regs->gpr[13] = (unsigned long) get_paca();
  592. regs->nip = (unsigned long) &system_call_common;
  593. regs->msr = MSR_KERNEL;
  594. return 1;
  595. #endif
  596. case 18: /* b */
  597. imm = instr & 0x03fffffc;
  598. if (imm & 0x02000000)
  599. imm -= 0x04000000;
  600. if ((instr & 2) == 0)
  601. imm += regs->nip;
  602. if (instr & 1)
  603. regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
  604. imm = truncate_if_32bit(regs->msr, imm);
  605. regs->nip = imm;
  606. return 1;
  607. case 19:
  608. switch ((instr >> 1) & 0x3ff) {
  609. case 16: /* bclr */
  610. case 528: /* bcctr */
  611. imm = (instr & 0x400)? regs->ctr: regs->link;
  612. regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
  613. imm = truncate_if_32bit(regs->msr, imm);
  614. if (instr & 1)
  615. regs->link = regs->nip;
  616. if (branch_taken(instr, regs))
  617. regs->nip = imm;
  618. return 1;
  619. case 18: /* rfid, scary */
  620. return -1;
  621. case 150: /* isync */
  622. isync();
  623. goto instr_done;
  624. case 33: /* crnor */
  625. case 129: /* crandc */
  626. case 193: /* crxor */
  627. case 225: /* crnand */
  628. case 257: /* crand */
  629. case 289: /* creqv */
  630. case 417: /* crorc */
  631. case 449: /* cror */
  632. ra = (instr >> 16) & 0x1f;
  633. rb = (instr >> 11) & 0x1f;
  634. rd = (instr >> 21) & 0x1f;
  635. ra = (regs->ccr >> (31 - ra)) & 1;
  636. rb = (regs->ccr >> (31 - rb)) & 1;
  637. val = (instr >> (6 + ra * 2 + rb)) & 1;
  638. regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
  639. (val << (31 - rd));
  640. goto instr_done;
  641. }
  642. break;
  643. case 31:
  644. switch ((instr >> 1) & 0x3ff) {
  645. case 598: /* sync */
  646. #ifdef __powerpc64__
  647. switch ((instr >> 21) & 3) {
  648. case 1: /* lwsync */
  649. asm volatile("lwsync" : : : "memory");
  650. goto instr_done;
  651. case 2: /* ptesync */
  652. asm volatile("ptesync" : : : "memory");
  653. goto instr_done;
  654. }
  655. #endif
  656. mb();
  657. goto instr_done;
  658. case 854: /* eieio */
  659. eieio();
  660. goto instr_done;
  661. }
  662. break;
  663. }
  664. /* Following cases refer to regs->gpr[], so we need all regs */
  665. if (!FULL_REGS(regs))
  666. return 0;
  667. rd = (instr >> 21) & 0x1f;
  668. ra = (instr >> 16) & 0x1f;
  669. rb = (instr >> 11) & 0x1f;
  670. switch (opcode) {
  671. case 7: /* mulli */
  672. regs->gpr[rd] = regs->gpr[ra] * (short) instr;
  673. goto instr_done;
  674. case 8: /* subfic */
  675. imm = (short) instr;
  676. add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
  677. goto instr_done;
  678. case 10: /* cmpli */
  679. imm = (unsigned short) instr;
  680. val = regs->gpr[ra];
  681. #ifdef __powerpc64__
  682. if ((rd & 1) == 0)
  683. val = (unsigned int) val;
  684. #endif
  685. do_cmp_unsigned(regs, val, imm, rd >> 2);
  686. goto instr_done;
  687. case 11: /* cmpi */
  688. imm = (short) instr;
  689. val = regs->gpr[ra];
  690. #ifdef __powerpc64__
  691. if ((rd & 1) == 0)
  692. val = (int) val;
  693. #endif
  694. do_cmp_signed(regs, val, imm, rd >> 2);
  695. goto instr_done;
  696. case 12: /* addic */
  697. imm = (short) instr;
  698. add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
  699. goto instr_done;
  700. case 13: /* addic. */
  701. imm = (short) instr;
  702. add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
  703. set_cr0(regs, rd);
  704. goto instr_done;
  705. case 14: /* addi */
  706. imm = (short) instr;
  707. if (ra)
  708. imm += regs->gpr[ra];
  709. regs->gpr[rd] = imm;
  710. goto instr_done;
  711. case 15: /* addis */
  712. imm = ((short) instr) << 16;
  713. if (ra)
  714. imm += regs->gpr[ra];
  715. regs->gpr[rd] = imm;
  716. goto instr_done;
  717. case 20: /* rlwimi */
  718. mb = (instr >> 6) & 0x1f;
  719. me = (instr >> 1) & 0x1f;
  720. val = DATA32(regs->gpr[rd]);
  721. imm = MASK32(mb, me);
  722. regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
  723. goto logical_done;
  724. case 21: /* rlwinm */
  725. mb = (instr >> 6) & 0x1f;
  726. me = (instr >> 1) & 0x1f;
  727. val = DATA32(regs->gpr[rd]);
  728. regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
  729. goto logical_done;
  730. case 23: /* rlwnm */
  731. mb = (instr >> 6) & 0x1f;
  732. me = (instr >> 1) & 0x1f;
  733. rb = regs->gpr[rb] & 0x1f;
  734. val = DATA32(regs->gpr[rd]);
  735. regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
  736. goto logical_done;
  737. case 24: /* ori */
  738. imm = (unsigned short) instr;
  739. regs->gpr[ra] = regs->gpr[rd] | imm;
  740. goto instr_done;
  741. case 25: /* oris */
  742. imm = (unsigned short) instr;
  743. regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
  744. goto instr_done;
  745. case 26: /* xori */
  746. imm = (unsigned short) instr;
  747. regs->gpr[ra] = regs->gpr[rd] ^ imm;
  748. goto instr_done;
  749. case 27: /* xoris */
  750. imm = (unsigned short) instr;
  751. regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
  752. goto instr_done;
  753. case 28: /* andi. */
  754. imm = (unsigned short) instr;
  755. regs->gpr[ra] = regs->gpr[rd] & imm;
  756. set_cr0(regs, ra);
  757. goto instr_done;
  758. case 29: /* andis. */
  759. imm = (unsigned short) instr;
  760. regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
  761. set_cr0(regs, ra);
  762. goto instr_done;
  763. #ifdef __powerpc64__
  764. case 30: /* rld* */
  765. mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
  766. val = regs->gpr[rd];
  767. if ((instr & 0x10) == 0) {
  768. sh = rb | ((instr & 2) << 4);
  769. val = ROTATE(val, sh);
  770. switch ((instr >> 2) & 3) {
  771. case 0: /* rldicl */
  772. regs->gpr[ra] = val & MASK64_L(mb);
  773. goto logical_done;
  774. case 1: /* rldicr */
  775. regs->gpr[ra] = val & MASK64_R(mb);
  776. goto logical_done;
  777. case 2: /* rldic */
  778. regs->gpr[ra] = val & MASK64(mb, 63 - sh);
  779. goto logical_done;
  780. case 3: /* rldimi */
  781. imm = MASK64(mb, 63 - sh);
  782. regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
  783. (val & imm);
  784. goto logical_done;
  785. }
  786. } else {
  787. sh = regs->gpr[rb] & 0x3f;
  788. val = ROTATE(val, sh);
  789. switch ((instr >> 1) & 7) {
  790. case 0: /* rldcl */
  791. regs->gpr[ra] = val & MASK64_L(mb);
  792. goto logical_done;
  793. case 1: /* rldcr */
  794. regs->gpr[ra] = val & MASK64_R(mb);
  795. goto logical_done;
  796. }
  797. }
  798. #endif
  799. case 31:
  800. switch ((instr >> 1) & 0x3ff) {
  801. case 83: /* mfmsr */
  802. if (regs->msr & MSR_PR)
  803. break;
  804. regs->gpr[rd] = regs->msr & MSR_MASK;
  805. goto instr_done;
  806. case 146: /* mtmsr */
  807. if (regs->msr & MSR_PR)
  808. break;
  809. imm = regs->gpr[rd];
  810. if ((imm & MSR_RI) == 0)
  811. /* can't step mtmsr that would clear MSR_RI */
  812. return -1;
  813. regs->msr = imm;
  814. goto instr_done;
  815. #ifdef CONFIG_PPC64
  816. case 178: /* mtmsrd */
  817. /* only MSR_EE and MSR_RI get changed if bit 15 set */
  818. /* mtmsrd doesn't change MSR_HV and MSR_ME */
  819. if (regs->msr & MSR_PR)
  820. break;
  821. imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
  822. imm = (regs->msr & MSR_MASK & ~imm)
  823. | (regs->gpr[rd] & imm);
  824. if ((imm & MSR_RI) == 0)
  825. /* can't step mtmsrd that would clear MSR_RI */
  826. return -1;
  827. regs->msr = imm;
  828. goto instr_done;
  829. #endif
  830. case 19: /* mfcr */
  831. regs->gpr[rd] = regs->ccr;
  832. regs->gpr[rd] &= 0xffffffffUL;
  833. goto instr_done;
  834. case 144: /* mtcrf */
  835. imm = 0xf0000000UL;
  836. val = regs->gpr[rd];
  837. for (sh = 0; sh < 8; ++sh) {
  838. if (instr & (0x80000 >> sh))
  839. regs->ccr = (regs->ccr & ~imm) |
  840. (val & imm);
  841. imm >>= 4;
  842. }
  843. goto instr_done;
  844. case 339: /* mfspr */
  845. spr = (instr >> 11) & 0x3ff;
  846. switch (spr) {
  847. case 0x20: /* mfxer */
  848. regs->gpr[rd] = regs->xer;
  849. regs->gpr[rd] &= 0xffffffffUL;
  850. goto instr_done;
  851. case 0x100: /* mflr */
  852. regs->gpr[rd] = regs->link;
  853. goto instr_done;
  854. case 0x120: /* mfctr */
  855. regs->gpr[rd] = regs->ctr;
  856. goto instr_done;
  857. }
  858. break;
  859. case 467: /* mtspr */
  860. spr = (instr >> 11) & 0x3ff;
  861. switch (spr) {
  862. case 0x20: /* mtxer */
  863. regs->xer = (regs->gpr[rd] & 0xffffffffUL);
  864. goto instr_done;
  865. case 0x100: /* mtlr */
  866. regs->link = regs->gpr[rd];
  867. goto instr_done;
  868. case 0x120: /* mtctr */
  869. regs->ctr = regs->gpr[rd];
  870. goto instr_done;
  871. }
  872. break;
  873. /*
  874. * Compare instructions
  875. */
  876. case 0: /* cmp */
  877. val = regs->gpr[ra];
  878. val2 = regs->gpr[rb];
  879. #ifdef __powerpc64__
  880. if ((rd & 1) == 0) {
  881. /* word (32-bit) compare */
  882. val = (int) val;
  883. val2 = (int) val2;
  884. }
  885. #endif
  886. do_cmp_signed(regs, val, val2, rd >> 2);
  887. goto instr_done;
  888. case 32: /* cmpl */
  889. val = regs->gpr[ra];
  890. val2 = regs->gpr[rb];
  891. #ifdef __powerpc64__
  892. if ((rd & 1) == 0) {
  893. /* word (32-bit) compare */
  894. val = (unsigned int) val;
  895. val2 = (unsigned int) val2;
  896. }
  897. #endif
  898. do_cmp_unsigned(regs, val, val2, rd >> 2);
  899. goto instr_done;
  900. /*
  901. * Arithmetic instructions
  902. */
  903. case 8: /* subfc */
  904. add_with_carry(regs, rd, ~regs->gpr[ra],
  905. regs->gpr[rb], 1);
  906. goto arith_done;
  907. #ifdef __powerpc64__
  908. case 9: /* mulhdu */
  909. asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
  910. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  911. goto arith_done;
  912. #endif
  913. case 10: /* addc */
  914. add_with_carry(regs, rd, regs->gpr[ra],
  915. regs->gpr[rb], 0);
  916. goto arith_done;
  917. case 11: /* mulhwu */
  918. asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
  919. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  920. goto arith_done;
  921. case 40: /* subf */
  922. regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
  923. goto arith_done;
  924. #ifdef __powerpc64__
  925. case 73: /* mulhd */
  926. asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
  927. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  928. goto arith_done;
  929. #endif
  930. case 75: /* mulhw */
  931. asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
  932. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  933. goto arith_done;
  934. case 104: /* neg */
  935. regs->gpr[rd] = -regs->gpr[ra];
  936. goto arith_done;
  937. case 136: /* subfe */
  938. add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
  939. regs->xer & XER_CA);
  940. goto arith_done;
  941. case 138: /* adde */
  942. add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
  943. regs->xer & XER_CA);
  944. goto arith_done;
  945. case 200: /* subfze */
  946. add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
  947. regs->xer & XER_CA);
  948. goto arith_done;
  949. case 202: /* addze */
  950. add_with_carry(regs, rd, regs->gpr[ra], 0L,
  951. regs->xer & XER_CA);
  952. goto arith_done;
  953. case 232: /* subfme */
  954. add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
  955. regs->xer & XER_CA);
  956. goto arith_done;
  957. #ifdef __powerpc64__
  958. case 233: /* mulld */
  959. regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
  960. goto arith_done;
  961. #endif
  962. case 234: /* addme */
  963. add_with_carry(regs, rd, regs->gpr[ra], -1L,
  964. regs->xer & XER_CA);
  965. goto arith_done;
  966. case 235: /* mullw */
  967. regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
  968. (unsigned int) regs->gpr[rb];
  969. goto arith_done;
  970. case 266: /* add */
  971. regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
  972. goto arith_done;
  973. #ifdef __powerpc64__
  974. case 457: /* divdu */
  975. regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
  976. goto arith_done;
  977. #endif
  978. case 459: /* divwu */
  979. regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
  980. (unsigned int) regs->gpr[rb];
  981. goto arith_done;
  982. #ifdef __powerpc64__
  983. case 489: /* divd */
  984. regs->gpr[rd] = (long int) regs->gpr[ra] /
  985. (long int) regs->gpr[rb];
  986. goto arith_done;
  987. #endif
  988. case 491: /* divw */
  989. regs->gpr[rd] = (int) regs->gpr[ra] /
  990. (int) regs->gpr[rb];
  991. goto arith_done;
  992. /*
  993. * Logical instructions
  994. */
  995. case 26: /* cntlzw */
  996. asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
  997. "r" (regs->gpr[rd]));
  998. goto logical_done;
  999. #ifdef __powerpc64__
  1000. case 58: /* cntlzd */
  1001. asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
  1002. "r" (regs->gpr[rd]));
  1003. goto logical_done;
  1004. #endif
  1005. case 28: /* and */
  1006. regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
  1007. goto logical_done;
  1008. case 60: /* andc */
  1009. regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
  1010. goto logical_done;
  1011. case 124: /* nor */
  1012. regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
  1013. goto logical_done;
  1014. case 284: /* xor */
  1015. regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
  1016. goto logical_done;
  1017. case 316: /* xor */
  1018. regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
  1019. goto logical_done;
  1020. case 412: /* orc */
  1021. regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
  1022. goto logical_done;
  1023. case 444: /* or */
  1024. regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
  1025. goto logical_done;
  1026. case 476: /* nand */
  1027. regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
  1028. goto logical_done;
  1029. case 922: /* extsh */
  1030. regs->gpr[ra] = (signed short) regs->gpr[rd];
  1031. goto logical_done;
  1032. case 954: /* extsb */
  1033. regs->gpr[ra] = (signed char) regs->gpr[rd];
  1034. goto logical_done;
  1035. #ifdef __powerpc64__
  1036. case 986: /* extsw */
  1037. regs->gpr[ra] = (signed int) regs->gpr[rd];
  1038. goto logical_done;
  1039. #endif
  1040. /*
  1041. * Shift instructions
  1042. */
  1043. case 24: /* slw */
  1044. sh = regs->gpr[rb] & 0x3f;
  1045. if (sh < 32)
  1046. regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
  1047. else
  1048. regs->gpr[ra] = 0;
  1049. goto logical_done;
  1050. case 536: /* srw */
  1051. sh = regs->gpr[rb] & 0x3f;
  1052. if (sh < 32)
  1053. regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
  1054. else
  1055. regs->gpr[ra] = 0;
  1056. goto logical_done;
  1057. case 792: /* sraw */
  1058. sh = regs->gpr[rb] & 0x3f;
  1059. ival = (signed int) regs->gpr[rd];
  1060. regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
  1061. if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
  1062. regs->xer |= XER_CA;
  1063. else
  1064. regs->xer &= ~XER_CA;
  1065. goto logical_done;
  1066. case 824: /* srawi */
  1067. sh = rb;
  1068. ival = (signed int) regs->gpr[rd];
  1069. regs->gpr[ra] = ival >> sh;
  1070. if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
  1071. regs->xer |= XER_CA;
  1072. else
  1073. regs->xer &= ~XER_CA;
  1074. goto logical_done;
  1075. #ifdef __powerpc64__
  1076. case 27: /* sld */
  1077. sh = regs->gpr[rd] & 0x7f;
  1078. if (sh < 64)
  1079. regs->gpr[ra] = regs->gpr[rd] << sh;
  1080. else
  1081. regs->gpr[ra] = 0;
  1082. goto logical_done;
  1083. case 539: /* srd */
  1084. sh = regs->gpr[rb] & 0x7f;
  1085. if (sh < 64)
  1086. regs->gpr[ra] = regs->gpr[rd] >> sh;
  1087. else
  1088. regs->gpr[ra] = 0;
  1089. goto logical_done;
  1090. case 794: /* srad */
  1091. sh = regs->gpr[rb] & 0x7f;
  1092. ival = (signed long int) regs->gpr[rd];
  1093. regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
  1094. if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
  1095. regs->xer |= XER_CA;
  1096. else
  1097. regs->xer &= ~XER_CA;
  1098. goto logical_done;
  1099. case 826: /* sradi with sh_5 = 0 */
  1100. case 827: /* sradi with sh_5 = 1 */
  1101. sh = rb | ((instr & 2) << 4);
  1102. ival = (signed long int) regs->gpr[rd];
  1103. regs->gpr[ra] = ival >> sh;
  1104. if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
  1105. regs->xer |= XER_CA;
  1106. else
  1107. regs->xer &= ~XER_CA;
  1108. goto logical_done;
  1109. #endif /* __powerpc64__ */
  1110. /*
  1111. * Cache instructions
  1112. */
  1113. case 54: /* dcbst */
  1114. ea = xform_ea(instr, regs, 0);
  1115. if (!address_ok(regs, ea, 8))
  1116. return 0;
  1117. err = 0;
  1118. __cacheop_user_asmx(ea, err, "dcbst");
  1119. if (err)
  1120. return 0;
  1121. goto instr_done;
  1122. case 86: /* dcbf */
  1123. ea = xform_ea(instr, regs, 0);
  1124. if (!address_ok(regs, ea, 8))
  1125. return 0;
  1126. err = 0;
  1127. __cacheop_user_asmx(ea, err, "dcbf");
  1128. if (err)
  1129. return 0;
  1130. goto instr_done;
  1131. case 246: /* dcbtst */
  1132. if (rd == 0) {
  1133. ea = xform_ea(instr, regs, 0);
  1134. prefetchw((void *) ea);
  1135. }
  1136. goto instr_done;
  1137. case 278: /* dcbt */
  1138. if (rd == 0) {
  1139. ea = xform_ea(instr, regs, 0);
  1140. prefetch((void *) ea);
  1141. }
  1142. goto instr_done;
  1143. }
  1144. break;
  1145. }
  1146. /*
  1147. * Following cases are for loads and stores, so bail out
  1148. * if we're in little-endian mode.
  1149. */
  1150. if (regs->msr & MSR_LE)
  1151. return 0;
  1152. /*
  1153. * Save register RA in case it's an update form load or store
  1154. * and the access faults.
  1155. */
  1156. old_ra = regs->gpr[ra];
  1157. switch (opcode) {
  1158. case 31:
  1159. u = instr & 0x40;
  1160. switch ((instr >> 1) & 0x3ff) {
  1161. case 20: /* lwarx */
  1162. ea = xform_ea(instr, regs, 0);
  1163. if (ea & 3)
  1164. break; /* can't handle misaligned */
  1165. err = -EFAULT;
  1166. if (!address_ok(regs, ea, 4))
  1167. goto ldst_done;
  1168. err = 0;
  1169. __get_user_asmx(val, ea, err, "lwarx");
  1170. if (!err)
  1171. regs->gpr[rd] = val;
  1172. goto ldst_done;
  1173. case 150: /* stwcx. */
  1174. ea = xform_ea(instr, regs, 0);
  1175. if (ea & 3)
  1176. break; /* can't handle misaligned */
  1177. err = -EFAULT;
  1178. if (!address_ok(regs, ea, 4))
  1179. goto ldst_done;
  1180. err = 0;
  1181. __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
  1182. if (!err)
  1183. regs->ccr = (regs->ccr & 0x0fffffff) |
  1184. (cr & 0xe0000000) |
  1185. ((regs->xer >> 3) & 0x10000000);
  1186. goto ldst_done;
  1187. #ifdef __powerpc64__
  1188. case 84: /* ldarx */
  1189. ea = xform_ea(instr, regs, 0);
  1190. if (ea & 7)
  1191. break; /* can't handle misaligned */
  1192. err = -EFAULT;
  1193. if (!address_ok(regs, ea, 8))
  1194. goto ldst_done;
  1195. err = 0;
  1196. __get_user_asmx(val, ea, err, "ldarx");
  1197. if (!err)
  1198. regs->gpr[rd] = val;
  1199. goto ldst_done;
  1200. case 214: /* stdcx. */
  1201. ea = xform_ea(instr, regs, 0);
  1202. if (ea & 7)
  1203. break; /* can't handle misaligned */
  1204. err = -EFAULT;
  1205. if (!address_ok(regs, ea, 8))
  1206. goto ldst_done;
  1207. err = 0;
  1208. __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
  1209. if (!err)
  1210. regs->ccr = (regs->ccr & 0x0fffffff) |
  1211. (cr & 0xe0000000) |
  1212. ((regs->xer >> 3) & 0x10000000);
  1213. goto ldst_done;
  1214. case 21: /* ldx */
  1215. case 53: /* ldux */
  1216. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1217. 8, regs);
  1218. goto ldst_done;
  1219. #endif
  1220. case 23: /* lwzx */
  1221. case 55: /* lwzux */
  1222. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1223. 4, regs);
  1224. goto ldst_done;
  1225. case 87: /* lbzx */
  1226. case 119: /* lbzux */
  1227. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1228. 1, regs);
  1229. goto ldst_done;
  1230. #ifdef CONFIG_ALTIVEC
  1231. case 103: /* lvx */
  1232. case 359: /* lvxl */
  1233. if (!(regs->msr & MSR_VEC))
  1234. break;
  1235. ea = xform_ea(instr, regs, 0);
  1236. err = do_vec_load(rd, do_lvx, ea, regs);
  1237. goto ldst_done;
  1238. case 231: /* stvx */
  1239. case 487: /* stvxl */
  1240. if (!(regs->msr & MSR_VEC))
  1241. break;
  1242. ea = xform_ea(instr, regs, 0);
  1243. err = do_vec_store(rd, do_stvx, ea, regs);
  1244. goto ldst_done;
  1245. #endif /* CONFIG_ALTIVEC */
  1246. #ifdef __powerpc64__
  1247. case 149: /* stdx */
  1248. case 181: /* stdux */
  1249. val = regs->gpr[rd];
  1250. err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
  1251. goto ldst_done;
  1252. #endif
  1253. case 151: /* stwx */
  1254. case 183: /* stwux */
  1255. val = regs->gpr[rd];
  1256. err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
  1257. goto ldst_done;
  1258. case 215: /* stbx */
  1259. case 247: /* stbux */
  1260. val = regs->gpr[rd];
  1261. err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
  1262. goto ldst_done;
  1263. case 279: /* lhzx */
  1264. case 311: /* lhzux */
  1265. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1266. 2, regs);
  1267. goto ldst_done;
  1268. #ifdef __powerpc64__
  1269. case 341: /* lwax */
  1270. case 373: /* lwaux */
  1271. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1272. 4, regs);
  1273. if (!err)
  1274. regs->gpr[rd] = (signed int) regs->gpr[rd];
  1275. goto ldst_done;
  1276. #endif
  1277. case 343: /* lhax */
  1278. case 375: /* lhaux */
  1279. err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
  1280. 2, regs);
  1281. if (!err)
  1282. regs->gpr[rd] = (signed short) regs->gpr[rd];
  1283. goto ldst_done;
  1284. case 407: /* sthx */
  1285. case 439: /* sthux */
  1286. val = regs->gpr[rd];
  1287. err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
  1288. goto ldst_done;
  1289. #ifdef __powerpc64__
  1290. case 532: /* ldbrx */
  1291. err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
  1292. if (!err)
  1293. regs->gpr[rd] = byterev_8(val);
  1294. goto ldst_done;
  1295. #endif
  1296. case 534: /* lwbrx */
  1297. err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
  1298. if (!err)
  1299. regs->gpr[rd] = byterev_4(val);
  1300. goto ldst_done;
  1301. #ifdef CONFIG_PPC_CPU
  1302. case 535: /* lfsx */
  1303. case 567: /* lfsux */
  1304. if (!(regs->msr & MSR_FP))
  1305. break;
  1306. ea = xform_ea(instr, regs, u);
  1307. err = do_fp_load(rd, do_lfs, ea, 4, regs);
  1308. goto ldst_done;
  1309. case 599: /* lfdx */
  1310. case 631: /* lfdux */
  1311. if (!(regs->msr & MSR_FP))
  1312. break;
  1313. ea = xform_ea(instr, regs, u);
  1314. err = do_fp_load(rd, do_lfd, ea, 8, regs);
  1315. goto ldst_done;
  1316. case 663: /* stfsx */
  1317. case 695: /* stfsux */
  1318. if (!(regs->msr & MSR_FP))
  1319. break;
  1320. ea = xform_ea(instr, regs, u);
  1321. err = do_fp_store(rd, do_stfs, ea, 4, regs);
  1322. goto ldst_done;
  1323. case 727: /* stfdx */
  1324. case 759: /* stfdux */
  1325. if (!(regs->msr & MSR_FP))
  1326. break;
  1327. ea = xform_ea(instr, regs, u);
  1328. err = do_fp_store(rd, do_stfd, ea, 8, regs);
  1329. goto ldst_done;
  1330. #endif
  1331. #ifdef __powerpc64__
  1332. case 660: /* stdbrx */
  1333. val = byterev_8(regs->gpr[rd]);
  1334. err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
  1335. goto ldst_done;
  1336. #endif
  1337. case 662: /* stwbrx */
  1338. val = byterev_4(regs->gpr[rd]);
  1339. err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
  1340. goto ldst_done;
  1341. case 790: /* lhbrx */
  1342. err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
  1343. if (!err)
  1344. regs->gpr[rd] = byterev_2(val);
  1345. goto ldst_done;
  1346. case 918: /* sthbrx */
  1347. val = byterev_2(regs->gpr[rd]);
  1348. err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
  1349. goto ldst_done;
  1350. #ifdef CONFIG_VSX
  1351. case 844: /* lxvd2x */
  1352. case 876: /* lxvd2ux */
  1353. if (!(regs->msr & MSR_VSX))
  1354. break;
  1355. rd |= (instr & 1) << 5;
  1356. ea = xform_ea(instr, regs, u);
  1357. err = do_vsx_load(rd, do_lxvd2x, ea, regs);
  1358. goto ldst_done;
  1359. case 972: /* stxvd2x */
  1360. case 1004: /* stxvd2ux */
  1361. if (!(regs->msr & MSR_VSX))
  1362. break;
  1363. rd |= (instr & 1) << 5;
  1364. ea = xform_ea(instr, regs, u);
  1365. err = do_vsx_store(rd, do_stxvd2x, ea, regs);
  1366. goto ldst_done;
  1367. #endif /* CONFIG_VSX */
  1368. }
  1369. break;
  1370. case 32: /* lwz */
  1371. case 33: /* lwzu */
  1372. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
  1373. goto ldst_done;
  1374. case 34: /* lbz */
  1375. case 35: /* lbzu */
  1376. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
  1377. goto ldst_done;
  1378. case 36: /* stw */
  1379. val = regs->gpr[rd];
  1380. err = write_mem(val, dform_ea(instr, regs), 4, regs);
  1381. goto ldst_done;
  1382. case 37: /* stwu */
  1383. val = regs->gpr[rd];
  1384. val3 = dform_ea(instr, regs);
  1385. /*
  1386. * For PPC32 we always use stwu to change stack point with r1. So
  1387. * this emulated store may corrupt the exception frame, now we
  1388. * have to provide the exception frame trampoline, which is pushed
  1389. * below the kprobed function stack. So we only update gpr[1] but
  1390. * don't emulate the real store operation. We will do real store
  1391. * operation safely in exception return code by checking this flag.
  1392. */
  1393. if ((ra == 1) && !(regs->msr & MSR_PR) \
  1394. && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
  1395. #ifdef CONFIG_PPC32
  1396. /*
  1397. * Check if we will touch kernel sack overflow
  1398. */
  1399. if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
  1400. printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
  1401. err = -EINVAL;
  1402. break;
  1403. }
  1404. #endif /* CONFIG_PPC32 */
  1405. /*
  1406. * Check if we already set since that means we'll
  1407. * lose the previous value.
  1408. */
  1409. WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
  1410. set_thread_flag(TIF_EMULATE_STACK_STORE);
  1411. err = 0;
  1412. } else
  1413. err = write_mem(val, val3, 4, regs);
  1414. goto ldst_done;
  1415. case 38: /* stb */
  1416. case 39: /* stbu */
  1417. val = regs->gpr[rd];
  1418. err = write_mem(val, dform_ea(instr, regs), 1, regs);
  1419. goto ldst_done;
  1420. case 40: /* lhz */
  1421. case 41: /* lhzu */
  1422. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
  1423. goto ldst_done;
  1424. case 42: /* lha */
  1425. case 43: /* lhau */
  1426. err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
  1427. if (!err)
  1428. regs->gpr[rd] = (signed short) regs->gpr[rd];
  1429. goto ldst_done;
  1430. case 44: /* sth */
  1431. case 45: /* sthu */
  1432. val = regs->gpr[rd];
  1433. err = write_mem(val, dform_ea(instr, regs), 2, regs);
  1434. goto ldst_done;
  1435. case 46: /* lmw */
  1436. ra = (instr >> 16) & 0x1f;
  1437. if (ra >= rd)
  1438. break; /* invalid form, ra in range to load */
  1439. ea = dform_ea(instr, regs);
  1440. do {
  1441. err = read_mem(&regs->gpr[rd], ea, 4, regs);
  1442. if (err)
  1443. return 0;
  1444. ea += 4;
  1445. } while (++rd < 32);
  1446. goto instr_done;
  1447. case 47: /* stmw */
  1448. ea = dform_ea(instr, regs);
  1449. do {
  1450. err = write_mem(regs->gpr[rd], ea, 4, regs);
  1451. if (err)
  1452. return 0;
  1453. ea += 4;
  1454. } while (++rd < 32);
  1455. goto instr_done;
  1456. #ifdef CONFIG_PPC_FPU
  1457. case 48: /* lfs */
  1458. case 49: /* lfsu */
  1459. if (!(regs->msr & MSR_FP))
  1460. break;
  1461. ea = dform_ea(instr, regs);
  1462. err = do_fp_load(rd, do_lfs, ea, 4, regs);
  1463. goto ldst_done;
  1464. case 50: /* lfd */
  1465. case 51: /* lfdu */
  1466. if (!(regs->msr & MSR_FP))
  1467. break;
  1468. ea = dform_ea(instr, regs);
  1469. err = do_fp_load(rd, do_lfd, ea, 8, regs);
  1470. goto ldst_done;
  1471. case 52: /* stfs */
  1472. case 53: /* stfsu */
  1473. if (!(regs->msr & MSR_FP))
  1474. break;
  1475. ea = dform_ea(instr, regs);
  1476. err = do_fp_store(rd, do_stfs, ea, 4, regs);
  1477. goto ldst_done;
  1478. case 54: /* stfd */
  1479. case 55: /* stfdu */
  1480. if (!(regs->msr & MSR_FP))
  1481. break;
  1482. ea = dform_ea(instr, regs);
  1483. err = do_fp_store(rd, do_stfd, ea, 8, regs);
  1484. goto ldst_done;
  1485. #endif
  1486. #ifdef __powerpc64__
  1487. case 58: /* ld[u], lwa */
  1488. switch (instr & 3) {
  1489. case 0: /* ld */
  1490. err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
  1491. 8, regs);
  1492. goto ldst_done;
  1493. case 1: /* ldu */
  1494. err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
  1495. 8, regs);
  1496. goto ldst_done;
  1497. case 2: /* lwa */
  1498. err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
  1499. 4, regs);
  1500. if (!err)
  1501. regs->gpr[rd] = (signed int) regs->gpr[rd];
  1502. goto ldst_done;
  1503. }
  1504. break;
  1505. case 62: /* std[u] */
  1506. val = regs->gpr[rd];
  1507. switch (instr & 3) {
  1508. case 0: /* std */
  1509. err = write_mem(val, dsform_ea(instr, regs), 8, regs);
  1510. goto ldst_done;
  1511. case 1: /* stdu */
  1512. err = write_mem(val, dsform_ea(instr, regs), 8, regs);
  1513. goto ldst_done;
  1514. }
  1515. break;
  1516. #endif /* __powerpc64__ */
  1517. }
  1518. err = -EINVAL;
  1519. ldst_done:
  1520. if (err) {
  1521. regs->gpr[ra] = old_ra;
  1522. return 0; /* invoke DSI if -EFAULT? */
  1523. }
  1524. instr_done:
  1525. regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
  1526. return 1;
  1527. logical_done:
  1528. if (instr & 1)
  1529. set_cr0(regs, ra);
  1530. goto instr_done;
  1531. arith_done:
  1532. if (instr & 1)
  1533. set_cr0(regs, rd);
  1534. goto instr_done;
  1535. }