ptrace.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. enum s390_regset {
  38. REGSET_GENERAL,
  39. REGSET_FP,
  40. REGSET_LAST_BREAK,
  41. REGSET_TDB,
  42. REGSET_SYSTEM_CALL,
  43. REGSET_GENERAL_EXTENDED,
  44. };
  45. void update_cr_regs(struct task_struct *task)
  46. {
  47. struct pt_regs *regs = task_pt_regs(task);
  48. struct thread_struct *thread = &task->thread;
  49. struct per_regs old, new;
  50. #ifdef CONFIG_64BIT
  51. /* Take care of the enable/disable of transactional execution. */
  52. if (MACHINE_HAS_TE) {
  53. unsigned long cr[3], cr_new[3];
  54. __ctl_store(cr, 0, 2);
  55. cr_new[1] = cr[1];
  56. /* Set or clear transaction execution TXC bit 8. */
  57. if (task->thread.per_flags & PER_FLAG_NO_TE)
  58. cr_new[0] = cr[0] & ~(1UL << 55);
  59. else
  60. cr_new[0] = cr[0] | (1UL << 55);
  61. /* Set or clear transaction execution TDC bits 62 and 63. */
  62. cr_new[2] = cr[2] & ~3UL;
  63. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  64. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
  65. cr_new[2] |= 1UL;
  66. else
  67. cr_new[2] |= 2UL;
  68. }
  69. if (memcmp(&cr_new, &cr, sizeof(cr)))
  70. __ctl_load(cr_new, 0, 2);
  71. }
  72. #endif
  73. /* Copy user specified PER registers */
  74. new.control = thread->per_user.control;
  75. new.start = thread->per_user.start;
  76. new.end = thread->per_user.end;
  77. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  78. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
  79. new.control |= PER_EVENT_IFETCH;
  80. #ifdef CONFIG_64BIT
  81. new.control |= PER_CONTROL_SUSPENSION;
  82. new.control |= PER_EVENT_TRANSACTION_END;
  83. #endif
  84. new.start = 0;
  85. new.end = PSW_ADDR_INSN;
  86. }
  87. /* Take care of the PER enablement bit in the PSW. */
  88. if (!(new.control & PER_EVENT_MASK)) {
  89. regs->psw.mask &= ~PSW_MASK_PER;
  90. return;
  91. }
  92. regs->psw.mask |= PSW_MASK_PER;
  93. __ctl_store(old, 9, 11);
  94. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  95. __ctl_load(new, 9, 11);
  96. }
  97. void user_enable_single_step(struct task_struct *task)
  98. {
  99. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  100. if (task == current)
  101. update_cr_regs(task);
  102. }
  103. void user_disable_single_step(struct task_struct *task)
  104. {
  105. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  106. if (task == current)
  107. update_cr_regs(task);
  108. }
  109. /*
  110. * Called by kernel/ptrace.c when detaching..
  111. *
  112. * Clear all debugging related fields.
  113. */
  114. void ptrace_disable(struct task_struct *task)
  115. {
  116. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  117. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  118. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  119. clear_tsk_thread_flag(task, TIF_PER_TRAP);
  120. task->thread.per_flags = 0;
  121. }
  122. #ifndef CONFIG_64BIT
  123. # define __ADDR_MASK 3
  124. #else
  125. # define __ADDR_MASK 7
  126. #endif
  127. static inline unsigned long __peek_user_per(struct task_struct *child,
  128. addr_t addr)
  129. {
  130. struct per_struct_kernel *dummy = NULL;
  131. if (addr == (addr_t) &dummy->cr9)
  132. /* Control bits of the active per set. */
  133. return test_thread_flag(TIF_SINGLE_STEP) ?
  134. PER_EVENT_IFETCH : child->thread.per_user.control;
  135. else if (addr == (addr_t) &dummy->cr10)
  136. /* Start address of the active per set. */
  137. return test_thread_flag(TIF_SINGLE_STEP) ?
  138. 0 : child->thread.per_user.start;
  139. else if (addr == (addr_t) &dummy->cr11)
  140. /* End address of the active per set. */
  141. return test_thread_flag(TIF_SINGLE_STEP) ?
  142. PSW_ADDR_INSN : child->thread.per_user.end;
  143. else if (addr == (addr_t) &dummy->bits)
  144. /* Single-step bit. */
  145. return test_thread_flag(TIF_SINGLE_STEP) ?
  146. (1UL << (BITS_PER_LONG - 1)) : 0;
  147. else if (addr == (addr_t) &dummy->starting_addr)
  148. /* Start address of the user specified per set. */
  149. return child->thread.per_user.start;
  150. else if (addr == (addr_t) &dummy->ending_addr)
  151. /* End address of the user specified per set. */
  152. return child->thread.per_user.end;
  153. else if (addr == (addr_t) &dummy->perc_atmid)
  154. /* PER code, ATMID and AI of the last PER trap */
  155. return (unsigned long)
  156. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  157. else if (addr == (addr_t) &dummy->address)
  158. /* Address of the last PER trap */
  159. return child->thread.per_event.address;
  160. else if (addr == (addr_t) &dummy->access_id)
  161. /* Access id of the last PER trap */
  162. return (unsigned long)
  163. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  164. return 0;
  165. }
  166. /*
  167. * Read the word at offset addr from the user area of a process. The
  168. * trouble here is that the information is littered over different
  169. * locations. The process registers are found on the kernel stack,
  170. * the floating point stuff and the trace settings are stored in
  171. * the task structure. In addition the different structures in
  172. * struct user contain pad bytes that should be read as zeroes.
  173. * Lovely...
  174. */
  175. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  176. {
  177. struct user *dummy = NULL;
  178. addr_t offset, tmp;
  179. if (addr < (addr_t) &dummy->regs.acrs) {
  180. /*
  181. * psw and gprs are stored on the stack
  182. */
  183. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  184. if (addr == (addr_t) &dummy->regs.psw.mask) {
  185. /* Return a clean psw mask. */
  186. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  187. tmp |= PSW_USER_BITS;
  188. }
  189. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  190. /*
  191. * access registers are stored in the thread structure
  192. */
  193. offset = addr - (addr_t) &dummy->regs.acrs;
  194. #ifdef CONFIG_64BIT
  195. /*
  196. * Very special case: old & broken 64 bit gdb reading
  197. * from acrs[15]. Result is a 64 bit value. Read the
  198. * 32 bit acrs[15] value and shift it by 32. Sick...
  199. */
  200. if (addr == (addr_t) &dummy->regs.acrs[15])
  201. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  202. else
  203. #endif
  204. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  205. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  206. /*
  207. * orig_gpr2 is stored on the kernel stack
  208. */
  209. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  210. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  211. /*
  212. * prevent reads of padding hole between
  213. * orig_gpr2 and fp_regs on s390.
  214. */
  215. tmp = 0;
  216. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  217. /*
  218. * floating point regs. are stored in the thread structure
  219. */
  220. offset = addr - (addr_t) &dummy->regs.fp_regs;
  221. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  222. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  223. tmp <<= BITS_PER_LONG - 32;
  224. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  225. /*
  226. * Handle access to the per_info structure.
  227. */
  228. addr -= (addr_t) &dummy->regs.per_info;
  229. tmp = __peek_user_per(child, addr);
  230. } else
  231. tmp = 0;
  232. return tmp;
  233. }
  234. static int
  235. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  236. {
  237. addr_t tmp, mask;
  238. /*
  239. * Stupid gdb peeks/pokes the access registers in 64 bit with
  240. * an alignment of 4. Programmers from hell...
  241. */
  242. mask = __ADDR_MASK;
  243. #ifdef CONFIG_64BIT
  244. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  245. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  246. mask = 3;
  247. #endif
  248. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  249. return -EIO;
  250. tmp = __peek_user(child, addr);
  251. return put_user(tmp, (addr_t __user *) data);
  252. }
  253. static inline void __poke_user_per(struct task_struct *child,
  254. addr_t addr, addr_t data)
  255. {
  256. struct per_struct_kernel *dummy = NULL;
  257. /*
  258. * There are only three fields in the per_info struct that the
  259. * debugger user can write to.
  260. * 1) cr9: the debugger wants to set a new PER event mask
  261. * 2) starting_addr: the debugger wants to set a new starting
  262. * address to use with the PER event mask.
  263. * 3) ending_addr: the debugger wants to set a new ending
  264. * address to use with the PER event mask.
  265. * The user specified PER event mask and the start and end
  266. * addresses are used only if single stepping is not in effect.
  267. * Writes to any other field in per_info are ignored.
  268. */
  269. if (addr == (addr_t) &dummy->cr9)
  270. /* PER event mask of the user specified per set. */
  271. child->thread.per_user.control =
  272. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  273. else if (addr == (addr_t) &dummy->starting_addr)
  274. /* Starting address of the user specified per set. */
  275. child->thread.per_user.start = data;
  276. else if (addr == (addr_t) &dummy->ending_addr)
  277. /* Ending address of the user specified per set. */
  278. child->thread.per_user.end = data;
  279. }
  280. /*
  281. * Write a word to the user area of a process at location addr. This
  282. * operation does have an additional problem compared to peek_user.
  283. * Stores to the program status word and on the floating point
  284. * control register needs to get checked for validity.
  285. */
  286. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  287. {
  288. struct user *dummy = NULL;
  289. addr_t offset;
  290. if (addr < (addr_t) &dummy->regs.acrs) {
  291. /*
  292. * psw and gprs are stored on the stack
  293. */
  294. if (addr == (addr_t) &dummy->regs.psw.mask) {
  295. unsigned long mask = PSW_MASK_USER;
  296. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  297. if ((data & ~mask) != PSW_USER_BITS)
  298. return -EINVAL;
  299. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  300. return -EINVAL;
  301. }
  302. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  303. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  304. /*
  305. * access registers are stored in the thread structure
  306. */
  307. offset = addr - (addr_t) &dummy->regs.acrs;
  308. #ifdef CONFIG_64BIT
  309. /*
  310. * Very special case: old & broken 64 bit gdb writing
  311. * to acrs[15] with a 64 bit value. Ignore the lower
  312. * half of the value and write the upper 32 bit to
  313. * acrs[15]. Sick...
  314. */
  315. if (addr == (addr_t) &dummy->regs.acrs[15])
  316. child->thread.acrs[15] = (unsigned int) (data >> 32);
  317. else
  318. #endif
  319. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  320. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  321. /*
  322. * orig_gpr2 is stored on the kernel stack
  323. */
  324. task_pt_regs(child)->orig_gpr2 = data;
  325. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  326. /*
  327. * prevent writes of padding hole between
  328. * orig_gpr2 and fp_regs on s390.
  329. */
  330. return 0;
  331. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  332. /*
  333. * floating point regs. are stored in the thread structure
  334. */
  335. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  336. if ((unsigned int) data != 0 ||
  337. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  338. return -EINVAL;
  339. offset = addr - (addr_t) &dummy->regs.fp_regs;
  340. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  341. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  342. /*
  343. * Handle access to the per_info structure.
  344. */
  345. addr -= (addr_t) &dummy->regs.per_info;
  346. __poke_user_per(child, addr, data);
  347. }
  348. return 0;
  349. }
  350. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  351. {
  352. addr_t mask;
  353. /*
  354. * Stupid gdb peeks/pokes the access registers in 64 bit with
  355. * an alignment of 4. Programmers from hell indeed...
  356. */
  357. mask = __ADDR_MASK;
  358. #ifdef CONFIG_64BIT
  359. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  360. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  361. mask = 3;
  362. #endif
  363. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  364. return -EIO;
  365. return __poke_user(child, addr, data);
  366. }
  367. long arch_ptrace(struct task_struct *child, long request,
  368. unsigned long addr, unsigned long data)
  369. {
  370. ptrace_area parea;
  371. int copied, ret;
  372. switch (request) {
  373. case PTRACE_PEEKUSR:
  374. /* read the word at location addr in the USER area. */
  375. return peek_user(child, addr, data);
  376. case PTRACE_POKEUSR:
  377. /* write the word at location addr in the USER area */
  378. return poke_user(child, addr, data);
  379. case PTRACE_PEEKUSR_AREA:
  380. case PTRACE_POKEUSR_AREA:
  381. if (copy_from_user(&parea, (void __force __user *) addr,
  382. sizeof(parea)))
  383. return -EFAULT;
  384. addr = parea.kernel_addr;
  385. data = parea.process_addr;
  386. copied = 0;
  387. while (copied < parea.len) {
  388. if (request == PTRACE_PEEKUSR_AREA)
  389. ret = peek_user(child, addr, data);
  390. else {
  391. addr_t utmp;
  392. if (get_user(utmp,
  393. (addr_t __force __user *) data))
  394. return -EFAULT;
  395. ret = poke_user(child, addr, utmp);
  396. }
  397. if (ret)
  398. return ret;
  399. addr += sizeof(unsigned long);
  400. data += sizeof(unsigned long);
  401. copied += sizeof(unsigned long);
  402. }
  403. return 0;
  404. case PTRACE_GET_LAST_BREAK:
  405. put_user(task_thread_info(child)->last_break,
  406. (unsigned long __user *) data);
  407. return 0;
  408. case PTRACE_ENABLE_TE:
  409. if (!MACHINE_HAS_TE)
  410. return -EIO;
  411. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  412. return 0;
  413. case PTRACE_DISABLE_TE:
  414. if (!MACHINE_HAS_TE)
  415. return -EIO;
  416. child->thread.per_flags |= PER_FLAG_NO_TE;
  417. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  418. return 0;
  419. case PTRACE_TE_ABORT_RAND:
  420. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  421. return -EIO;
  422. switch (data) {
  423. case 0UL:
  424. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  425. break;
  426. case 1UL:
  427. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  428. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  429. break;
  430. case 2UL:
  431. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  432. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  433. break;
  434. default:
  435. return -EINVAL;
  436. }
  437. return 0;
  438. default:
  439. /* Removing high order bit from addr (only for 31 bit). */
  440. addr &= PSW_ADDR_INSN;
  441. return ptrace_request(child, request, addr, data);
  442. }
  443. }
  444. #ifdef CONFIG_COMPAT
  445. /*
  446. * Now the fun part starts... a 31 bit program running in the
  447. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  448. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  449. * to handle, the difference to the 64 bit versions of the requests
  450. * is that the access is done in multiples of 4 byte instead of
  451. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  452. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  453. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  454. * is a 31 bit program too, the content of struct user can be
  455. * emulated. A 31 bit program peeking into the struct user of
  456. * a 64 bit program is a no-no.
  457. */
  458. /*
  459. * Same as peek_user_per but for a 31 bit program.
  460. */
  461. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  462. addr_t addr)
  463. {
  464. struct compat_per_struct_kernel *dummy32 = NULL;
  465. if (addr == (addr_t) &dummy32->cr9)
  466. /* Control bits of the active per set. */
  467. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  468. PER_EVENT_IFETCH : child->thread.per_user.control;
  469. else if (addr == (addr_t) &dummy32->cr10)
  470. /* Start address of the active per set. */
  471. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  472. 0 : child->thread.per_user.start;
  473. else if (addr == (addr_t) &dummy32->cr11)
  474. /* End address of the active per set. */
  475. return test_thread_flag(TIF_SINGLE_STEP) ?
  476. PSW32_ADDR_INSN : child->thread.per_user.end;
  477. else if (addr == (addr_t) &dummy32->bits)
  478. /* Single-step bit. */
  479. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  480. 0x80000000 : 0;
  481. else if (addr == (addr_t) &dummy32->starting_addr)
  482. /* Start address of the user specified per set. */
  483. return (__u32) child->thread.per_user.start;
  484. else if (addr == (addr_t) &dummy32->ending_addr)
  485. /* End address of the user specified per set. */
  486. return (__u32) child->thread.per_user.end;
  487. else if (addr == (addr_t) &dummy32->perc_atmid)
  488. /* PER code, ATMID and AI of the last PER trap */
  489. return (__u32) child->thread.per_event.cause << 16;
  490. else if (addr == (addr_t) &dummy32->address)
  491. /* Address of the last PER trap */
  492. return (__u32) child->thread.per_event.address;
  493. else if (addr == (addr_t) &dummy32->access_id)
  494. /* Access id of the last PER trap */
  495. return (__u32) child->thread.per_event.paid << 24;
  496. return 0;
  497. }
  498. /*
  499. * Same as peek_user but for a 31 bit program.
  500. */
  501. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  502. {
  503. struct compat_user *dummy32 = NULL;
  504. addr_t offset;
  505. __u32 tmp;
  506. if (addr < (addr_t) &dummy32->regs.acrs) {
  507. struct pt_regs *regs = task_pt_regs(child);
  508. /*
  509. * psw and gprs are stored on the stack
  510. */
  511. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  512. /* Fake a 31 bit psw mask. */
  513. tmp = (__u32)(regs->psw.mask >> 32);
  514. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  515. tmp |= PSW32_USER_BITS;
  516. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  517. /* Fake a 31 bit psw address. */
  518. tmp = (__u32) regs->psw.addr |
  519. (__u32)(regs->psw.mask & PSW_MASK_BA);
  520. } else {
  521. /* gpr 0-15 */
  522. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  523. }
  524. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  525. /*
  526. * access registers are stored in the thread structure
  527. */
  528. offset = addr - (addr_t) &dummy32->regs.acrs;
  529. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  530. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  531. /*
  532. * orig_gpr2 is stored on the kernel stack
  533. */
  534. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  535. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  536. /*
  537. * prevent reads of padding hole between
  538. * orig_gpr2 and fp_regs on s390.
  539. */
  540. tmp = 0;
  541. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  542. /*
  543. * floating point regs. are stored in the thread structure
  544. */
  545. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  546. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  547. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  548. /*
  549. * Handle access to the per_info structure.
  550. */
  551. addr -= (addr_t) &dummy32->regs.per_info;
  552. tmp = __peek_user_per_compat(child, addr);
  553. } else
  554. tmp = 0;
  555. return tmp;
  556. }
  557. static int peek_user_compat(struct task_struct *child,
  558. addr_t addr, addr_t data)
  559. {
  560. __u32 tmp;
  561. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  562. return -EIO;
  563. tmp = __peek_user_compat(child, addr);
  564. return put_user(tmp, (__u32 __user *) data);
  565. }
  566. /*
  567. * Same as poke_user_per but for a 31 bit program.
  568. */
  569. static inline void __poke_user_per_compat(struct task_struct *child,
  570. addr_t addr, __u32 data)
  571. {
  572. struct compat_per_struct_kernel *dummy32 = NULL;
  573. if (addr == (addr_t) &dummy32->cr9)
  574. /* PER event mask of the user specified per set. */
  575. child->thread.per_user.control =
  576. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  577. else if (addr == (addr_t) &dummy32->starting_addr)
  578. /* Starting address of the user specified per set. */
  579. child->thread.per_user.start = data;
  580. else if (addr == (addr_t) &dummy32->ending_addr)
  581. /* Ending address of the user specified per set. */
  582. child->thread.per_user.end = data;
  583. }
  584. /*
  585. * Same as poke_user but for a 31 bit program.
  586. */
  587. static int __poke_user_compat(struct task_struct *child,
  588. addr_t addr, addr_t data)
  589. {
  590. struct compat_user *dummy32 = NULL;
  591. __u32 tmp = (__u32) data;
  592. addr_t offset;
  593. if (addr < (addr_t) &dummy32->regs.acrs) {
  594. struct pt_regs *regs = task_pt_regs(child);
  595. /*
  596. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  597. */
  598. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  599. __u32 mask = PSW32_MASK_USER;
  600. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  601. /* Build a 64 bit psw mask from 31 bit mask. */
  602. if ((tmp & ~mask) != PSW32_USER_BITS)
  603. /* Invalid psw mask. */
  604. return -EINVAL;
  605. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  606. (regs->psw.mask & PSW_MASK_BA) |
  607. (__u64)(tmp & mask) << 32;
  608. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  609. /* Build a 64 bit psw address from 31 bit address. */
  610. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  611. /* Transfer 31 bit amode bit to psw mask. */
  612. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  613. (__u64)(tmp & PSW32_ADDR_AMODE);
  614. } else {
  615. /* gpr 0-15 */
  616. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  617. }
  618. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  619. /*
  620. * access registers are stored in the thread structure
  621. */
  622. offset = addr - (addr_t) &dummy32->regs.acrs;
  623. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  624. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  625. /*
  626. * orig_gpr2 is stored on the kernel stack
  627. */
  628. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  629. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  630. /*
  631. * prevent writess of padding hole between
  632. * orig_gpr2 and fp_regs on s390.
  633. */
  634. return 0;
  635. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  636. /*
  637. * floating point regs. are stored in the thread structure
  638. */
  639. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  640. test_fp_ctl(tmp))
  641. return -EINVAL;
  642. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  643. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  644. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  645. /*
  646. * Handle access to the per_info structure.
  647. */
  648. addr -= (addr_t) &dummy32->regs.per_info;
  649. __poke_user_per_compat(child, addr, data);
  650. }
  651. return 0;
  652. }
  653. static int poke_user_compat(struct task_struct *child,
  654. addr_t addr, addr_t data)
  655. {
  656. if (!is_compat_task() || (addr & 3) ||
  657. addr > sizeof(struct compat_user) - 3)
  658. return -EIO;
  659. return __poke_user_compat(child, addr, data);
  660. }
  661. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  662. compat_ulong_t caddr, compat_ulong_t cdata)
  663. {
  664. unsigned long addr = caddr;
  665. unsigned long data = cdata;
  666. compat_ptrace_area parea;
  667. int copied, ret;
  668. switch (request) {
  669. case PTRACE_PEEKUSR:
  670. /* read the word at location addr in the USER area. */
  671. return peek_user_compat(child, addr, data);
  672. case PTRACE_POKEUSR:
  673. /* write the word at location addr in the USER area */
  674. return poke_user_compat(child, addr, data);
  675. case PTRACE_PEEKUSR_AREA:
  676. case PTRACE_POKEUSR_AREA:
  677. if (copy_from_user(&parea, (void __force __user *) addr,
  678. sizeof(parea)))
  679. return -EFAULT;
  680. addr = parea.kernel_addr;
  681. data = parea.process_addr;
  682. copied = 0;
  683. while (copied < parea.len) {
  684. if (request == PTRACE_PEEKUSR_AREA)
  685. ret = peek_user_compat(child, addr, data);
  686. else {
  687. __u32 utmp;
  688. if (get_user(utmp,
  689. (__u32 __force __user *) data))
  690. return -EFAULT;
  691. ret = poke_user_compat(child, addr, utmp);
  692. }
  693. if (ret)
  694. return ret;
  695. addr += sizeof(unsigned int);
  696. data += sizeof(unsigned int);
  697. copied += sizeof(unsigned int);
  698. }
  699. return 0;
  700. case PTRACE_GET_LAST_BREAK:
  701. put_user(task_thread_info(child)->last_break,
  702. (unsigned int __user *) data);
  703. return 0;
  704. }
  705. return compat_ptrace_request(child, request, addr, data);
  706. }
  707. #endif
  708. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  709. {
  710. long ret = 0;
  711. /* Do the secure computing check first. */
  712. if (secure_computing(regs->gprs[2])) {
  713. /* seccomp failures shouldn't expose any additional code. */
  714. ret = -1;
  715. goto out;
  716. }
  717. /*
  718. * The sysc_tracesys code in entry.S stored the system
  719. * call number to gprs[2].
  720. */
  721. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  722. (tracehook_report_syscall_entry(regs) ||
  723. regs->gprs[2] >= NR_syscalls)) {
  724. /*
  725. * Tracing decided this syscall should not happen or the
  726. * debugger stored an invalid system call number. Skip
  727. * the system call and the system call restart handling.
  728. */
  729. clear_thread_flag(TIF_SYSCALL);
  730. ret = -1;
  731. }
  732. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  733. trace_sys_enter(regs, regs->gprs[2]);
  734. audit_syscall_entry(is_compat_task() ?
  735. AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
  736. regs->gprs[2], regs->orig_gpr2,
  737. regs->gprs[3], regs->gprs[4],
  738. regs->gprs[5]);
  739. out:
  740. return ret ?: regs->gprs[2];
  741. }
  742. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  743. {
  744. audit_syscall_exit(regs);
  745. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  746. trace_sys_exit(regs, regs->gprs[2]);
  747. if (test_thread_flag(TIF_SYSCALL_TRACE))
  748. tracehook_report_syscall_exit(regs, 0);
  749. }
  750. /*
  751. * user_regset definitions.
  752. */
  753. static int s390_regs_get(struct task_struct *target,
  754. const struct user_regset *regset,
  755. unsigned int pos, unsigned int count,
  756. void *kbuf, void __user *ubuf)
  757. {
  758. if (target == current)
  759. save_access_regs(target->thread.acrs);
  760. if (kbuf) {
  761. unsigned long *k = kbuf;
  762. while (count > 0) {
  763. *k++ = __peek_user(target, pos);
  764. count -= sizeof(*k);
  765. pos += sizeof(*k);
  766. }
  767. } else {
  768. unsigned long __user *u = ubuf;
  769. while (count > 0) {
  770. if (__put_user(__peek_user(target, pos), u++))
  771. return -EFAULT;
  772. count -= sizeof(*u);
  773. pos += sizeof(*u);
  774. }
  775. }
  776. return 0;
  777. }
  778. static int s390_regs_set(struct task_struct *target,
  779. const struct user_regset *regset,
  780. unsigned int pos, unsigned int count,
  781. const void *kbuf, const void __user *ubuf)
  782. {
  783. int rc = 0;
  784. if (target == current)
  785. save_access_regs(target->thread.acrs);
  786. if (kbuf) {
  787. const unsigned long *k = kbuf;
  788. while (count > 0 && !rc) {
  789. rc = __poke_user(target, pos, *k++);
  790. count -= sizeof(*k);
  791. pos += sizeof(*k);
  792. }
  793. } else {
  794. const unsigned long __user *u = ubuf;
  795. while (count > 0 && !rc) {
  796. unsigned long word;
  797. rc = __get_user(word, u++);
  798. if (rc)
  799. break;
  800. rc = __poke_user(target, pos, word);
  801. count -= sizeof(*u);
  802. pos += sizeof(*u);
  803. }
  804. }
  805. if (rc == 0 && target == current)
  806. restore_access_regs(target->thread.acrs);
  807. return rc;
  808. }
  809. static int s390_fpregs_get(struct task_struct *target,
  810. const struct user_regset *regset, unsigned int pos,
  811. unsigned int count, void *kbuf, void __user *ubuf)
  812. {
  813. if (target == current) {
  814. save_fp_ctl(&target->thread.fp_regs.fpc);
  815. save_fp_regs(target->thread.fp_regs.fprs);
  816. }
  817. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  818. &target->thread.fp_regs, 0, -1);
  819. }
  820. static int s390_fpregs_set(struct task_struct *target,
  821. const struct user_regset *regset, unsigned int pos,
  822. unsigned int count, const void *kbuf,
  823. const void __user *ubuf)
  824. {
  825. int rc = 0;
  826. if (target == current) {
  827. save_fp_ctl(&target->thread.fp_regs.fpc);
  828. save_fp_regs(target->thread.fp_regs.fprs);
  829. }
  830. /* If setting FPC, must validate it first. */
  831. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  832. u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
  833. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  834. 0, offsetof(s390_fp_regs, fprs));
  835. if (rc)
  836. return rc;
  837. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  838. return -EINVAL;
  839. target->thread.fp_regs.fpc = ufpc[0];
  840. }
  841. if (rc == 0 && count > 0)
  842. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  843. target->thread.fp_regs.fprs,
  844. offsetof(s390_fp_regs, fprs), -1);
  845. if (rc == 0 && target == current) {
  846. restore_fp_ctl(&target->thread.fp_regs.fpc);
  847. restore_fp_regs(target->thread.fp_regs.fprs);
  848. }
  849. return rc;
  850. }
  851. #ifdef CONFIG_64BIT
  852. static int s390_last_break_get(struct task_struct *target,
  853. const struct user_regset *regset,
  854. unsigned int pos, unsigned int count,
  855. void *kbuf, void __user *ubuf)
  856. {
  857. if (count > 0) {
  858. if (kbuf) {
  859. unsigned long *k = kbuf;
  860. *k = task_thread_info(target)->last_break;
  861. } else {
  862. unsigned long __user *u = ubuf;
  863. if (__put_user(task_thread_info(target)->last_break, u))
  864. return -EFAULT;
  865. }
  866. }
  867. return 0;
  868. }
  869. static int s390_last_break_set(struct task_struct *target,
  870. const struct user_regset *regset,
  871. unsigned int pos, unsigned int count,
  872. const void *kbuf, const void __user *ubuf)
  873. {
  874. return 0;
  875. }
  876. static int s390_tdb_get(struct task_struct *target,
  877. const struct user_regset *regset,
  878. unsigned int pos, unsigned int count,
  879. void *kbuf, void __user *ubuf)
  880. {
  881. struct pt_regs *regs = task_pt_regs(target);
  882. unsigned char *data;
  883. if (!(regs->int_code & 0x200))
  884. return -ENODATA;
  885. data = target->thread.trap_tdb;
  886. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  887. }
  888. static int s390_tdb_set(struct task_struct *target,
  889. const struct user_regset *regset,
  890. unsigned int pos, unsigned int count,
  891. const void *kbuf, const void __user *ubuf)
  892. {
  893. return 0;
  894. }
  895. #endif
  896. static int s390_system_call_get(struct task_struct *target,
  897. const struct user_regset *regset,
  898. unsigned int pos, unsigned int count,
  899. void *kbuf, void __user *ubuf)
  900. {
  901. unsigned int *data = &task_thread_info(target)->system_call;
  902. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  903. data, 0, sizeof(unsigned int));
  904. }
  905. static int s390_system_call_set(struct task_struct *target,
  906. const struct user_regset *regset,
  907. unsigned int pos, unsigned int count,
  908. const void *kbuf, const void __user *ubuf)
  909. {
  910. unsigned int *data = &task_thread_info(target)->system_call;
  911. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  912. data, 0, sizeof(unsigned int));
  913. }
  914. static const struct user_regset s390_regsets[] = {
  915. [REGSET_GENERAL] = {
  916. .core_note_type = NT_PRSTATUS,
  917. .n = sizeof(s390_regs) / sizeof(long),
  918. .size = sizeof(long),
  919. .align = sizeof(long),
  920. .get = s390_regs_get,
  921. .set = s390_regs_set,
  922. },
  923. [REGSET_FP] = {
  924. .core_note_type = NT_PRFPREG,
  925. .n = sizeof(s390_fp_regs) / sizeof(long),
  926. .size = sizeof(long),
  927. .align = sizeof(long),
  928. .get = s390_fpregs_get,
  929. .set = s390_fpregs_set,
  930. },
  931. #ifdef CONFIG_64BIT
  932. [REGSET_LAST_BREAK] = {
  933. .core_note_type = NT_S390_LAST_BREAK,
  934. .n = 1,
  935. .size = sizeof(long),
  936. .align = sizeof(long),
  937. .get = s390_last_break_get,
  938. .set = s390_last_break_set,
  939. },
  940. [REGSET_TDB] = {
  941. .core_note_type = NT_S390_TDB,
  942. .n = 1,
  943. .size = 256,
  944. .align = 1,
  945. .get = s390_tdb_get,
  946. .set = s390_tdb_set,
  947. },
  948. #endif
  949. [REGSET_SYSTEM_CALL] = {
  950. .core_note_type = NT_S390_SYSTEM_CALL,
  951. .n = 1,
  952. .size = sizeof(unsigned int),
  953. .align = sizeof(unsigned int),
  954. .get = s390_system_call_get,
  955. .set = s390_system_call_set,
  956. },
  957. };
  958. static const struct user_regset_view user_s390_view = {
  959. .name = UTS_MACHINE,
  960. .e_machine = EM_S390,
  961. .regsets = s390_regsets,
  962. .n = ARRAY_SIZE(s390_regsets)
  963. };
  964. #ifdef CONFIG_COMPAT
  965. static int s390_compat_regs_get(struct task_struct *target,
  966. const struct user_regset *regset,
  967. unsigned int pos, unsigned int count,
  968. void *kbuf, void __user *ubuf)
  969. {
  970. if (target == current)
  971. save_access_regs(target->thread.acrs);
  972. if (kbuf) {
  973. compat_ulong_t *k = kbuf;
  974. while (count > 0) {
  975. *k++ = __peek_user_compat(target, pos);
  976. count -= sizeof(*k);
  977. pos += sizeof(*k);
  978. }
  979. } else {
  980. compat_ulong_t __user *u = ubuf;
  981. while (count > 0) {
  982. if (__put_user(__peek_user_compat(target, pos), u++))
  983. return -EFAULT;
  984. count -= sizeof(*u);
  985. pos += sizeof(*u);
  986. }
  987. }
  988. return 0;
  989. }
  990. static int s390_compat_regs_set(struct task_struct *target,
  991. const struct user_regset *regset,
  992. unsigned int pos, unsigned int count,
  993. const void *kbuf, const void __user *ubuf)
  994. {
  995. int rc = 0;
  996. if (target == current)
  997. save_access_regs(target->thread.acrs);
  998. if (kbuf) {
  999. const compat_ulong_t *k = kbuf;
  1000. while (count > 0 && !rc) {
  1001. rc = __poke_user_compat(target, pos, *k++);
  1002. count -= sizeof(*k);
  1003. pos += sizeof(*k);
  1004. }
  1005. } else {
  1006. const compat_ulong_t __user *u = ubuf;
  1007. while (count > 0 && !rc) {
  1008. compat_ulong_t word;
  1009. rc = __get_user(word, u++);
  1010. if (rc)
  1011. break;
  1012. rc = __poke_user_compat(target, pos, word);
  1013. count -= sizeof(*u);
  1014. pos += sizeof(*u);
  1015. }
  1016. }
  1017. if (rc == 0 && target == current)
  1018. restore_access_regs(target->thread.acrs);
  1019. return rc;
  1020. }
  1021. static int s390_compat_regs_high_get(struct task_struct *target,
  1022. const struct user_regset *regset,
  1023. unsigned int pos, unsigned int count,
  1024. void *kbuf, void __user *ubuf)
  1025. {
  1026. compat_ulong_t *gprs_high;
  1027. gprs_high = (compat_ulong_t *)
  1028. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1029. if (kbuf) {
  1030. compat_ulong_t *k = kbuf;
  1031. while (count > 0) {
  1032. *k++ = *gprs_high;
  1033. gprs_high += 2;
  1034. count -= sizeof(*k);
  1035. }
  1036. } else {
  1037. compat_ulong_t __user *u = ubuf;
  1038. while (count > 0) {
  1039. if (__put_user(*gprs_high, u++))
  1040. return -EFAULT;
  1041. gprs_high += 2;
  1042. count -= sizeof(*u);
  1043. }
  1044. }
  1045. return 0;
  1046. }
  1047. static int s390_compat_regs_high_set(struct task_struct *target,
  1048. const struct user_regset *regset,
  1049. unsigned int pos, unsigned int count,
  1050. const void *kbuf, const void __user *ubuf)
  1051. {
  1052. compat_ulong_t *gprs_high;
  1053. int rc = 0;
  1054. gprs_high = (compat_ulong_t *)
  1055. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1056. if (kbuf) {
  1057. const compat_ulong_t *k = kbuf;
  1058. while (count > 0) {
  1059. *gprs_high = *k++;
  1060. *gprs_high += 2;
  1061. count -= sizeof(*k);
  1062. }
  1063. } else {
  1064. const compat_ulong_t __user *u = ubuf;
  1065. while (count > 0 && !rc) {
  1066. unsigned long word;
  1067. rc = __get_user(word, u++);
  1068. if (rc)
  1069. break;
  1070. *gprs_high = word;
  1071. *gprs_high += 2;
  1072. count -= sizeof(*u);
  1073. }
  1074. }
  1075. return rc;
  1076. }
  1077. static int s390_compat_last_break_get(struct task_struct *target,
  1078. const struct user_regset *regset,
  1079. unsigned int pos, unsigned int count,
  1080. void *kbuf, void __user *ubuf)
  1081. {
  1082. compat_ulong_t last_break;
  1083. if (count > 0) {
  1084. last_break = task_thread_info(target)->last_break;
  1085. if (kbuf) {
  1086. unsigned long *k = kbuf;
  1087. *k = last_break;
  1088. } else {
  1089. unsigned long __user *u = ubuf;
  1090. if (__put_user(last_break, u))
  1091. return -EFAULT;
  1092. }
  1093. }
  1094. return 0;
  1095. }
  1096. static int s390_compat_last_break_set(struct task_struct *target,
  1097. const struct user_regset *regset,
  1098. unsigned int pos, unsigned int count,
  1099. const void *kbuf, const void __user *ubuf)
  1100. {
  1101. return 0;
  1102. }
  1103. static const struct user_regset s390_compat_regsets[] = {
  1104. [REGSET_GENERAL] = {
  1105. .core_note_type = NT_PRSTATUS,
  1106. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1107. .size = sizeof(compat_long_t),
  1108. .align = sizeof(compat_long_t),
  1109. .get = s390_compat_regs_get,
  1110. .set = s390_compat_regs_set,
  1111. },
  1112. [REGSET_FP] = {
  1113. .core_note_type = NT_PRFPREG,
  1114. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1115. .size = sizeof(compat_long_t),
  1116. .align = sizeof(compat_long_t),
  1117. .get = s390_fpregs_get,
  1118. .set = s390_fpregs_set,
  1119. },
  1120. [REGSET_LAST_BREAK] = {
  1121. .core_note_type = NT_S390_LAST_BREAK,
  1122. .n = 1,
  1123. .size = sizeof(long),
  1124. .align = sizeof(long),
  1125. .get = s390_compat_last_break_get,
  1126. .set = s390_compat_last_break_set,
  1127. },
  1128. [REGSET_TDB] = {
  1129. .core_note_type = NT_S390_TDB,
  1130. .n = 1,
  1131. .size = 256,
  1132. .align = 1,
  1133. .get = s390_tdb_get,
  1134. .set = s390_tdb_set,
  1135. },
  1136. [REGSET_SYSTEM_CALL] = {
  1137. .core_note_type = NT_S390_SYSTEM_CALL,
  1138. .n = 1,
  1139. .size = sizeof(compat_uint_t),
  1140. .align = sizeof(compat_uint_t),
  1141. .get = s390_system_call_get,
  1142. .set = s390_system_call_set,
  1143. },
  1144. [REGSET_GENERAL_EXTENDED] = {
  1145. .core_note_type = NT_S390_HIGH_GPRS,
  1146. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1147. .size = sizeof(compat_long_t),
  1148. .align = sizeof(compat_long_t),
  1149. .get = s390_compat_regs_high_get,
  1150. .set = s390_compat_regs_high_set,
  1151. },
  1152. };
  1153. static const struct user_regset_view user_s390_compat_view = {
  1154. .name = "s390",
  1155. .e_machine = EM_S390,
  1156. .regsets = s390_compat_regsets,
  1157. .n = ARRAY_SIZE(s390_compat_regsets)
  1158. };
  1159. #endif
  1160. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1161. {
  1162. #ifdef CONFIG_COMPAT
  1163. if (test_tsk_thread_flag(task, TIF_31BIT))
  1164. return &user_s390_compat_view;
  1165. #endif
  1166. return &user_s390_view;
  1167. }
  1168. static const char *gpr_names[NUM_GPRS] = {
  1169. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1170. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1171. };
  1172. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1173. {
  1174. if (offset >= NUM_GPRS)
  1175. return 0;
  1176. return regs->gprs[offset];
  1177. }
  1178. int regs_query_register_offset(const char *name)
  1179. {
  1180. unsigned long offset;
  1181. if (!name || *name != 'r')
  1182. return -EINVAL;
  1183. if (kstrtoul(name + 1, 10, &offset))
  1184. return -EINVAL;
  1185. if (offset >= NUM_GPRS)
  1186. return -EINVAL;
  1187. return offset;
  1188. }
  1189. const char *regs_query_register_name(unsigned int offset)
  1190. {
  1191. if (offset >= NUM_GPRS)
  1192. return NULL;
  1193. return gpr_names[offset];
  1194. }
  1195. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1196. {
  1197. unsigned long ksp = kernel_stack_pointer(regs);
  1198. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1199. }
  1200. /**
  1201. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1202. * @regs:pt_regs which contains kernel stack pointer.
  1203. * @n:stack entry number.
  1204. *
  1205. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1206. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1207. * this returns 0.
  1208. */
  1209. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1210. {
  1211. unsigned long addr;
  1212. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1213. if (!regs_within_kernel_stack(regs, addr))
  1214. return 0;
  1215. return *(unsigned long *)addr;
  1216. }