ptrace.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. enum s390_regset {
  38. REGSET_GENERAL,
  39. REGSET_FP,
  40. REGSET_LAST_BREAK,
  41. REGSET_TDB,
  42. REGSET_SYSTEM_CALL,
  43. REGSET_GENERAL_EXTENDED,
  44. };
  45. void update_per_regs(struct task_struct *task)
  46. {
  47. struct pt_regs *regs = task_pt_regs(task);
  48. struct thread_struct *thread = &task->thread;
  49. struct per_regs old, new;
  50. #ifdef CONFIG_64BIT
  51. /* Take care of the enable/disable of transactional execution. */
  52. if (MACHINE_HAS_TE) {
  53. unsigned long cr0, cr0_new;
  54. __ctl_store(cr0, 0, 0);
  55. /* set or clear transaction execution bits 8 and 9. */
  56. if (task->thread.per_flags & PER_FLAG_NO_TE)
  57. cr0_new = cr0 & ~(3UL << 54);
  58. else
  59. cr0_new = cr0 | (3UL << 54);
  60. /* Only load control register 0 if necessary. */
  61. if (cr0 != cr0_new)
  62. __ctl_load(cr0_new, 0, 0);
  63. }
  64. #endif
  65. /* Copy user specified PER registers */
  66. new.control = thread->per_user.control;
  67. new.start = thread->per_user.start;
  68. new.end = thread->per_user.end;
  69. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  70. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
  71. new.control |= PER_EVENT_IFETCH;
  72. #ifdef CONFIG_64BIT
  73. new.control |= PER_CONTROL_SUSPENSION;
  74. new.control |= PER_EVENT_TRANSACTION_END;
  75. #endif
  76. new.start = 0;
  77. new.end = PSW_ADDR_INSN;
  78. }
  79. /* Take care of the PER enablement bit in the PSW. */
  80. if (!(new.control & PER_EVENT_MASK)) {
  81. regs->psw.mask &= ~PSW_MASK_PER;
  82. return;
  83. }
  84. regs->psw.mask |= PSW_MASK_PER;
  85. __ctl_store(old, 9, 11);
  86. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  87. __ctl_load(new, 9, 11);
  88. }
  89. void user_enable_single_step(struct task_struct *task)
  90. {
  91. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  92. if (task == current)
  93. update_per_regs(task);
  94. }
  95. void user_disable_single_step(struct task_struct *task)
  96. {
  97. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  98. if (task == current)
  99. update_per_regs(task);
  100. }
  101. /*
  102. * Called by kernel/ptrace.c when detaching..
  103. *
  104. * Clear all debugging related fields.
  105. */
  106. void ptrace_disable(struct task_struct *task)
  107. {
  108. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  109. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  110. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  111. clear_tsk_thread_flag(task, TIF_PER_TRAP);
  112. task->thread.per_flags = 0;
  113. }
  114. #ifndef CONFIG_64BIT
  115. # define __ADDR_MASK 3
  116. #else
  117. # define __ADDR_MASK 7
  118. #endif
  119. static inline unsigned long __peek_user_per(struct task_struct *child,
  120. addr_t addr)
  121. {
  122. struct per_struct_kernel *dummy = NULL;
  123. if (addr == (addr_t) &dummy->cr9)
  124. /* Control bits of the active per set. */
  125. return test_thread_flag(TIF_SINGLE_STEP) ?
  126. PER_EVENT_IFETCH : child->thread.per_user.control;
  127. else if (addr == (addr_t) &dummy->cr10)
  128. /* Start address of the active per set. */
  129. return test_thread_flag(TIF_SINGLE_STEP) ?
  130. 0 : child->thread.per_user.start;
  131. else if (addr == (addr_t) &dummy->cr11)
  132. /* End address of the active per set. */
  133. return test_thread_flag(TIF_SINGLE_STEP) ?
  134. PSW_ADDR_INSN : child->thread.per_user.end;
  135. else if (addr == (addr_t) &dummy->bits)
  136. /* Single-step bit. */
  137. return test_thread_flag(TIF_SINGLE_STEP) ?
  138. (1UL << (BITS_PER_LONG - 1)) : 0;
  139. else if (addr == (addr_t) &dummy->starting_addr)
  140. /* Start address of the user specified per set. */
  141. return child->thread.per_user.start;
  142. else if (addr == (addr_t) &dummy->ending_addr)
  143. /* End address of the user specified per set. */
  144. return child->thread.per_user.end;
  145. else if (addr == (addr_t) &dummy->perc_atmid)
  146. /* PER code, ATMID and AI of the last PER trap */
  147. return (unsigned long)
  148. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  149. else if (addr == (addr_t) &dummy->address)
  150. /* Address of the last PER trap */
  151. return child->thread.per_event.address;
  152. else if (addr == (addr_t) &dummy->access_id)
  153. /* Access id of the last PER trap */
  154. return (unsigned long)
  155. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  156. return 0;
  157. }
  158. /*
  159. * Read the word at offset addr from the user area of a process. The
  160. * trouble here is that the information is littered over different
  161. * locations. The process registers are found on the kernel stack,
  162. * the floating point stuff and the trace settings are stored in
  163. * the task structure. In addition the different structures in
  164. * struct user contain pad bytes that should be read as zeroes.
  165. * Lovely...
  166. */
  167. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  168. {
  169. struct user *dummy = NULL;
  170. addr_t offset, tmp;
  171. if (addr < (addr_t) &dummy->regs.acrs) {
  172. /*
  173. * psw and gprs are stored on the stack
  174. */
  175. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  176. if (addr == (addr_t) &dummy->regs.psw.mask)
  177. /* Return a clean psw mask. */
  178. tmp = psw_user_bits | (tmp & PSW_MASK_USER);
  179. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  180. /*
  181. * access registers are stored in the thread structure
  182. */
  183. offset = addr - (addr_t) &dummy->regs.acrs;
  184. #ifdef CONFIG_64BIT
  185. /*
  186. * Very special case: old & broken 64 bit gdb reading
  187. * from acrs[15]. Result is a 64 bit value. Read the
  188. * 32 bit acrs[15] value and shift it by 32. Sick...
  189. */
  190. if (addr == (addr_t) &dummy->regs.acrs[15])
  191. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  192. else
  193. #endif
  194. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  195. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  196. /*
  197. * orig_gpr2 is stored on the kernel stack
  198. */
  199. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  200. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  201. /*
  202. * prevent reads of padding hole between
  203. * orig_gpr2 and fp_regs on s390.
  204. */
  205. tmp = 0;
  206. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  207. /*
  208. * floating point regs. are stored in the thread structure
  209. */
  210. offset = addr - (addr_t) &dummy->regs.fp_regs;
  211. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  212. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  213. tmp &= (unsigned long) FPC_VALID_MASK
  214. << (BITS_PER_LONG - 32);
  215. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  216. /*
  217. * Handle access to the per_info structure.
  218. */
  219. addr -= (addr_t) &dummy->regs.per_info;
  220. tmp = __peek_user_per(child, addr);
  221. } else
  222. tmp = 0;
  223. return tmp;
  224. }
  225. static int
  226. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  227. {
  228. addr_t tmp, mask;
  229. /*
  230. * Stupid gdb peeks/pokes the access registers in 64 bit with
  231. * an alignment of 4. Programmers from hell...
  232. */
  233. mask = __ADDR_MASK;
  234. #ifdef CONFIG_64BIT
  235. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  236. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  237. mask = 3;
  238. #endif
  239. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  240. return -EIO;
  241. tmp = __peek_user(child, addr);
  242. return put_user(tmp, (addr_t __user *) data);
  243. }
  244. static inline void __poke_user_per(struct task_struct *child,
  245. addr_t addr, addr_t data)
  246. {
  247. struct per_struct_kernel *dummy = NULL;
  248. /*
  249. * There are only three fields in the per_info struct that the
  250. * debugger user can write to.
  251. * 1) cr9: the debugger wants to set a new PER event mask
  252. * 2) starting_addr: the debugger wants to set a new starting
  253. * address to use with the PER event mask.
  254. * 3) ending_addr: the debugger wants to set a new ending
  255. * address to use with the PER event mask.
  256. * The user specified PER event mask and the start and end
  257. * addresses are used only if single stepping is not in effect.
  258. * Writes to any other field in per_info are ignored.
  259. */
  260. if (addr == (addr_t) &dummy->cr9)
  261. /* PER event mask of the user specified per set. */
  262. child->thread.per_user.control =
  263. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  264. else if (addr == (addr_t) &dummy->starting_addr)
  265. /* Starting address of the user specified per set. */
  266. child->thread.per_user.start = data;
  267. else if (addr == (addr_t) &dummy->ending_addr)
  268. /* Ending address of the user specified per set. */
  269. child->thread.per_user.end = data;
  270. }
  271. /*
  272. * Write a word to the user area of a process at location addr. This
  273. * operation does have an additional problem compared to peek_user.
  274. * Stores to the program status word and on the floating point
  275. * control register needs to get checked for validity.
  276. */
  277. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  278. {
  279. struct user *dummy = NULL;
  280. addr_t offset;
  281. if (addr < (addr_t) &dummy->regs.acrs) {
  282. /*
  283. * psw and gprs are stored on the stack
  284. */
  285. if (addr == (addr_t) &dummy->regs.psw.mask &&
  286. ((data & ~PSW_MASK_USER) != psw_user_bits ||
  287. ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
  288. /* Invalid psw mask. */
  289. return -EINVAL;
  290. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  291. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  292. /*
  293. * access registers are stored in the thread structure
  294. */
  295. offset = addr - (addr_t) &dummy->regs.acrs;
  296. #ifdef CONFIG_64BIT
  297. /*
  298. * Very special case: old & broken 64 bit gdb writing
  299. * to acrs[15] with a 64 bit value. Ignore the lower
  300. * half of the value and write the upper 32 bit to
  301. * acrs[15]. Sick...
  302. */
  303. if (addr == (addr_t) &dummy->regs.acrs[15])
  304. child->thread.acrs[15] = (unsigned int) (data >> 32);
  305. else
  306. #endif
  307. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  308. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  309. /*
  310. * orig_gpr2 is stored on the kernel stack
  311. */
  312. task_pt_regs(child)->orig_gpr2 = data;
  313. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  314. /*
  315. * prevent writes of padding hole between
  316. * orig_gpr2 and fp_regs on s390.
  317. */
  318. return 0;
  319. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  320. /*
  321. * floating point regs. are stored in the thread structure
  322. */
  323. if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
  324. (data & ~((unsigned long) FPC_VALID_MASK
  325. << (BITS_PER_LONG - 32))) != 0)
  326. return -EINVAL;
  327. offset = addr - (addr_t) &dummy->regs.fp_regs;
  328. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  329. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  330. /*
  331. * Handle access to the per_info structure.
  332. */
  333. addr -= (addr_t) &dummy->regs.per_info;
  334. __poke_user_per(child, addr, data);
  335. }
  336. return 0;
  337. }
  338. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  339. {
  340. addr_t mask;
  341. /*
  342. * Stupid gdb peeks/pokes the access registers in 64 bit with
  343. * an alignment of 4. Programmers from hell indeed...
  344. */
  345. mask = __ADDR_MASK;
  346. #ifdef CONFIG_64BIT
  347. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  348. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  349. mask = 3;
  350. #endif
  351. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  352. return -EIO;
  353. return __poke_user(child, addr, data);
  354. }
  355. long arch_ptrace(struct task_struct *child, long request,
  356. unsigned long addr, unsigned long data)
  357. {
  358. ptrace_area parea;
  359. int copied, ret;
  360. switch (request) {
  361. case PTRACE_PEEKUSR:
  362. /* read the word at location addr in the USER area. */
  363. return peek_user(child, addr, data);
  364. case PTRACE_POKEUSR:
  365. /* write the word at location addr in the USER area */
  366. return poke_user(child, addr, data);
  367. case PTRACE_PEEKUSR_AREA:
  368. case PTRACE_POKEUSR_AREA:
  369. if (copy_from_user(&parea, (void __force __user *) addr,
  370. sizeof(parea)))
  371. return -EFAULT;
  372. addr = parea.kernel_addr;
  373. data = parea.process_addr;
  374. copied = 0;
  375. while (copied < parea.len) {
  376. if (request == PTRACE_PEEKUSR_AREA)
  377. ret = peek_user(child, addr, data);
  378. else {
  379. addr_t utmp;
  380. if (get_user(utmp,
  381. (addr_t __force __user *) data))
  382. return -EFAULT;
  383. ret = poke_user(child, addr, utmp);
  384. }
  385. if (ret)
  386. return ret;
  387. addr += sizeof(unsigned long);
  388. data += sizeof(unsigned long);
  389. copied += sizeof(unsigned long);
  390. }
  391. return 0;
  392. case PTRACE_GET_LAST_BREAK:
  393. put_user(task_thread_info(child)->last_break,
  394. (unsigned long __user *) data);
  395. return 0;
  396. case PTRACE_ENABLE_TE:
  397. if (!MACHINE_HAS_TE)
  398. return -EIO;
  399. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  400. return 0;
  401. case PTRACE_DISABLE_TE:
  402. if (!MACHINE_HAS_TE)
  403. return -EIO;
  404. child->thread.per_flags |= PER_FLAG_NO_TE;
  405. return 0;
  406. default:
  407. /* Removing high order bit from addr (only for 31 bit). */
  408. addr &= PSW_ADDR_INSN;
  409. return ptrace_request(child, request, addr, data);
  410. }
  411. }
  412. #ifdef CONFIG_COMPAT
  413. /*
  414. * Now the fun part starts... a 31 bit program running in the
  415. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  416. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  417. * to handle, the difference to the 64 bit versions of the requests
  418. * is that the access is done in multiples of 4 byte instead of
  419. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  420. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  421. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  422. * is a 31 bit program too, the content of struct user can be
  423. * emulated. A 31 bit program peeking into the struct user of
  424. * a 64 bit program is a no-no.
  425. */
  426. /*
  427. * Same as peek_user_per but for a 31 bit program.
  428. */
  429. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  430. addr_t addr)
  431. {
  432. struct compat_per_struct_kernel *dummy32 = NULL;
  433. if (addr == (addr_t) &dummy32->cr9)
  434. /* Control bits of the active per set. */
  435. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  436. PER_EVENT_IFETCH : child->thread.per_user.control;
  437. else if (addr == (addr_t) &dummy32->cr10)
  438. /* Start address of the active per set. */
  439. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  440. 0 : child->thread.per_user.start;
  441. else if (addr == (addr_t) &dummy32->cr11)
  442. /* End address of the active per set. */
  443. return test_thread_flag(TIF_SINGLE_STEP) ?
  444. PSW32_ADDR_INSN : child->thread.per_user.end;
  445. else if (addr == (addr_t) &dummy32->bits)
  446. /* Single-step bit. */
  447. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  448. 0x80000000 : 0;
  449. else if (addr == (addr_t) &dummy32->starting_addr)
  450. /* Start address of the user specified per set. */
  451. return (__u32) child->thread.per_user.start;
  452. else if (addr == (addr_t) &dummy32->ending_addr)
  453. /* End address of the user specified per set. */
  454. return (__u32) child->thread.per_user.end;
  455. else if (addr == (addr_t) &dummy32->perc_atmid)
  456. /* PER code, ATMID and AI of the last PER trap */
  457. return (__u32) child->thread.per_event.cause << 16;
  458. else if (addr == (addr_t) &dummy32->address)
  459. /* Address of the last PER trap */
  460. return (__u32) child->thread.per_event.address;
  461. else if (addr == (addr_t) &dummy32->access_id)
  462. /* Access id of the last PER trap */
  463. return (__u32) child->thread.per_event.paid << 24;
  464. return 0;
  465. }
  466. /*
  467. * Same as peek_user but for a 31 bit program.
  468. */
  469. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  470. {
  471. struct compat_user *dummy32 = NULL;
  472. addr_t offset;
  473. __u32 tmp;
  474. if (addr < (addr_t) &dummy32->regs.acrs) {
  475. struct pt_regs *regs = task_pt_regs(child);
  476. /*
  477. * psw and gprs are stored on the stack
  478. */
  479. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  480. /* Fake a 31 bit psw mask. */
  481. tmp = (__u32)(regs->psw.mask >> 32);
  482. tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
  483. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  484. /* Fake a 31 bit psw address. */
  485. tmp = (__u32) regs->psw.addr |
  486. (__u32)(regs->psw.mask & PSW_MASK_BA);
  487. } else {
  488. /* gpr 0-15 */
  489. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  490. }
  491. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  492. /*
  493. * access registers are stored in the thread structure
  494. */
  495. offset = addr - (addr_t) &dummy32->regs.acrs;
  496. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  497. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  498. /*
  499. * orig_gpr2 is stored on the kernel stack
  500. */
  501. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  502. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  503. /*
  504. * prevent reads of padding hole between
  505. * orig_gpr2 and fp_regs on s390.
  506. */
  507. tmp = 0;
  508. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  509. /*
  510. * floating point regs. are stored in the thread structure
  511. */
  512. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  513. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  514. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  515. /*
  516. * Handle access to the per_info structure.
  517. */
  518. addr -= (addr_t) &dummy32->regs.per_info;
  519. tmp = __peek_user_per_compat(child, addr);
  520. } else
  521. tmp = 0;
  522. return tmp;
  523. }
  524. static int peek_user_compat(struct task_struct *child,
  525. addr_t addr, addr_t data)
  526. {
  527. __u32 tmp;
  528. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  529. return -EIO;
  530. tmp = __peek_user_compat(child, addr);
  531. return put_user(tmp, (__u32 __user *) data);
  532. }
  533. /*
  534. * Same as poke_user_per but for a 31 bit program.
  535. */
  536. static inline void __poke_user_per_compat(struct task_struct *child,
  537. addr_t addr, __u32 data)
  538. {
  539. struct compat_per_struct_kernel *dummy32 = NULL;
  540. if (addr == (addr_t) &dummy32->cr9)
  541. /* PER event mask of the user specified per set. */
  542. child->thread.per_user.control =
  543. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  544. else if (addr == (addr_t) &dummy32->starting_addr)
  545. /* Starting address of the user specified per set. */
  546. child->thread.per_user.start = data;
  547. else if (addr == (addr_t) &dummy32->ending_addr)
  548. /* Ending address of the user specified per set. */
  549. child->thread.per_user.end = data;
  550. }
  551. /*
  552. * Same as poke_user but for a 31 bit program.
  553. */
  554. static int __poke_user_compat(struct task_struct *child,
  555. addr_t addr, addr_t data)
  556. {
  557. struct compat_user *dummy32 = NULL;
  558. __u32 tmp = (__u32) data;
  559. addr_t offset;
  560. if (addr < (addr_t) &dummy32->regs.acrs) {
  561. struct pt_regs *regs = task_pt_regs(child);
  562. /*
  563. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  564. */
  565. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  566. /* Build a 64 bit psw mask from 31 bit mask. */
  567. if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
  568. /* Invalid psw mask. */
  569. return -EINVAL;
  570. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  571. (regs->psw.mask & PSW_MASK_BA) |
  572. (__u64)(tmp & PSW32_MASK_USER) << 32;
  573. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  574. /* Build a 64 bit psw address from 31 bit address. */
  575. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  576. /* Transfer 31 bit amode bit to psw mask. */
  577. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  578. (__u64)(tmp & PSW32_ADDR_AMODE);
  579. } else {
  580. /* gpr 0-15 */
  581. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  582. }
  583. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  584. /*
  585. * access registers are stored in the thread structure
  586. */
  587. offset = addr - (addr_t) &dummy32->regs.acrs;
  588. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  589. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  590. /*
  591. * orig_gpr2 is stored on the kernel stack
  592. */
  593. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  594. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  595. /*
  596. * prevent writess of padding hole between
  597. * orig_gpr2 and fp_regs on s390.
  598. */
  599. return 0;
  600. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  601. /*
  602. * floating point regs. are stored in the thread structure
  603. */
  604. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  605. (tmp & ~FPC_VALID_MASK) != 0)
  606. /* Invalid floating point control. */
  607. return -EINVAL;
  608. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  609. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  610. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  611. /*
  612. * Handle access to the per_info structure.
  613. */
  614. addr -= (addr_t) &dummy32->regs.per_info;
  615. __poke_user_per_compat(child, addr, data);
  616. }
  617. return 0;
  618. }
  619. static int poke_user_compat(struct task_struct *child,
  620. addr_t addr, addr_t data)
  621. {
  622. if (!is_compat_task() || (addr & 3) ||
  623. addr > sizeof(struct compat_user) - 3)
  624. return -EIO;
  625. return __poke_user_compat(child, addr, data);
  626. }
  627. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  628. compat_ulong_t caddr, compat_ulong_t cdata)
  629. {
  630. unsigned long addr = caddr;
  631. unsigned long data = cdata;
  632. compat_ptrace_area parea;
  633. int copied, ret;
  634. switch (request) {
  635. case PTRACE_PEEKUSR:
  636. /* read the word at location addr in the USER area. */
  637. return peek_user_compat(child, addr, data);
  638. case PTRACE_POKEUSR:
  639. /* write the word at location addr in the USER area */
  640. return poke_user_compat(child, addr, data);
  641. case PTRACE_PEEKUSR_AREA:
  642. case PTRACE_POKEUSR_AREA:
  643. if (copy_from_user(&parea, (void __force __user *) addr,
  644. sizeof(parea)))
  645. return -EFAULT;
  646. addr = parea.kernel_addr;
  647. data = parea.process_addr;
  648. copied = 0;
  649. while (copied < parea.len) {
  650. if (request == PTRACE_PEEKUSR_AREA)
  651. ret = peek_user_compat(child, addr, data);
  652. else {
  653. __u32 utmp;
  654. if (get_user(utmp,
  655. (__u32 __force __user *) data))
  656. return -EFAULT;
  657. ret = poke_user_compat(child, addr, utmp);
  658. }
  659. if (ret)
  660. return ret;
  661. addr += sizeof(unsigned int);
  662. data += sizeof(unsigned int);
  663. copied += sizeof(unsigned int);
  664. }
  665. return 0;
  666. case PTRACE_GET_LAST_BREAK:
  667. put_user(task_thread_info(child)->last_break,
  668. (unsigned int __user *) data);
  669. return 0;
  670. }
  671. return compat_ptrace_request(child, request, addr, data);
  672. }
  673. #endif
  674. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  675. {
  676. long ret = 0;
  677. /* Do the secure computing check first. */
  678. if (secure_computing(regs->gprs[2])) {
  679. /* seccomp failures shouldn't expose any additional code. */
  680. ret = -1;
  681. goto out;
  682. }
  683. /*
  684. * The sysc_tracesys code in entry.S stored the system
  685. * call number to gprs[2].
  686. */
  687. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  688. (tracehook_report_syscall_entry(regs) ||
  689. regs->gprs[2] >= NR_syscalls)) {
  690. /*
  691. * Tracing decided this syscall should not happen or the
  692. * debugger stored an invalid system call number. Skip
  693. * the system call and the system call restart handling.
  694. */
  695. clear_thread_flag(TIF_SYSCALL);
  696. ret = -1;
  697. }
  698. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  699. trace_sys_enter(regs, regs->gprs[2]);
  700. audit_syscall_entry(is_compat_task() ?
  701. AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
  702. regs->gprs[2], regs->orig_gpr2,
  703. regs->gprs[3], regs->gprs[4],
  704. regs->gprs[5]);
  705. out:
  706. return ret ?: regs->gprs[2];
  707. }
  708. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  709. {
  710. audit_syscall_exit(regs);
  711. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  712. trace_sys_exit(regs, regs->gprs[2]);
  713. if (test_thread_flag(TIF_SYSCALL_TRACE))
  714. tracehook_report_syscall_exit(regs, 0);
  715. }
  716. /*
  717. * user_regset definitions.
  718. */
  719. static int s390_regs_get(struct task_struct *target,
  720. const struct user_regset *regset,
  721. unsigned int pos, unsigned int count,
  722. void *kbuf, void __user *ubuf)
  723. {
  724. if (target == current)
  725. save_access_regs(target->thread.acrs);
  726. if (kbuf) {
  727. unsigned long *k = kbuf;
  728. while (count > 0) {
  729. *k++ = __peek_user(target, pos);
  730. count -= sizeof(*k);
  731. pos += sizeof(*k);
  732. }
  733. } else {
  734. unsigned long __user *u = ubuf;
  735. while (count > 0) {
  736. if (__put_user(__peek_user(target, pos), u++))
  737. return -EFAULT;
  738. count -= sizeof(*u);
  739. pos += sizeof(*u);
  740. }
  741. }
  742. return 0;
  743. }
  744. static int s390_regs_set(struct task_struct *target,
  745. const struct user_regset *regset,
  746. unsigned int pos, unsigned int count,
  747. const void *kbuf, const void __user *ubuf)
  748. {
  749. int rc = 0;
  750. if (target == current)
  751. save_access_regs(target->thread.acrs);
  752. if (kbuf) {
  753. const unsigned long *k = kbuf;
  754. while (count > 0 && !rc) {
  755. rc = __poke_user(target, pos, *k++);
  756. count -= sizeof(*k);
  757. pos += sizeof(*k);
  758. }
  759. } else {
  760. const unsigned long __user *u = ubuf;
  761. while (count > 0 && !rc) {
  762. unsigned long word;
  763. rc = __get_user(word, u++);
  764. if (rc)
  765. break;
  766. rc = __poke_user(target, pos, word);
  767. count -= sizeof(*u);
  768. pos += sizeof(*u);
  769. }
  770. }
  771. if (rc == 0 && target == current)
  772. restore_access_regs(target->thread.acrs);
  773. return rc;
  774. }
  775. static int s390_fpregs_get(struct task_struct *target,
  776. const struct user_regset *regset, unsigned int pos,
  777. unsigned int count, void *kbuf, void __user *ubuf)
  778. {
  779. if (target == current)
  780. save_fp_regs(&target->thread.fp_regs);
  781. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  782. &target->thread.fp_regs, 0, -1);
  783. }
  784. static int s390_fpregs_set(struct task_struct *target,
  785. const struct user_regset *regset, unsigned int pos,
  786. unsigned int count, const void *kbuf,
  787. const void __user *ubuf)
  788. {
  789. int rc = 0;
  790. if (target == current)
  791. save_fp_regs(&target->thread.fp_regs);
  792. /* If setting FPC, must validate it first. */
  793. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  794. u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
  795. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
  796. 0, offsetof(s390_fp_regs, fprs));
  797. if (rc)
  798. return rc;
  799. if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
  800. return -EINVAL;
  801. target->thread.fp_regs.fpc = fpc[0];
  802. }
  803. if (rc == 0 && count > 0)
  804. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  805. target->thread.fp_regs.fprs,
  806. offsetof(s390_fp_regs, fprs), -1);
  807. if (rc == 0 && target == current)
  808. restore_fp_regs(&target->thread.fp_regs);
  809. return rc;
  810. }
  811. #ifdef CONFIG_64BIT
  812. static int s390_last_break_get(struct task_struct *target,
  813. const struct user_regset *regset,
  814. unsigned int pos, unsigned int count,
  815. void *kbuf, void __user *ubuf)
  816. {
  817. if (count > 0) {
  818. if (kbuf) {
  819. unsigned long *k = kbuf;
  820. *k = task_thread_info(target)->last_break;
  821. } else {
  822. unsigned long __user *u = ubuf;
  823. if (__put_user(task_thread_info(target)->last_break, u))
  824. return -EFAULT;
  825. }
  826. }
  827. return 0;
  828. }
  829. static int s390_last_break_set(struct task_struct *target,
  830. const struct user_regset *regset,
  831. unsigned int pos, unsigned int count,
  832. const void *kbuf, const void __user *ubuf)
  833. {
  834. return 0;
  835. }
  836. static int s390_tdb_get(struct task_struct *target,
  837. const struct user_regset *regset,
  838. unsigned int pos, unsigned int count,
  839. void *kbuf, void __user *ubuf)
  840. {
  841. struct pt_regs *regs = task_pt_regs(target);
  842. unsigned char *data;
  843. if (!(regs->int_code & 0x200))
  844. return -ENODATA;
  845. data = target->thread.trap_tdb;
  846. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  847. }
  848. static int s390_tdb_set(struct task_struct *target,
  849. const struct user_regset *regset,
  850. unsigned int pos, unsigned int count,
  851. const void *kbuf, const void __user *ubuf)
  852. {
  853. return 0;
  854. }
  855. #endif
  856. static int s390_system_call_get(struct task_struct *target,
  857. const struct user_regset *regset,
  858. unsigned int pos, unsigned int count,
  859. void *kbuf, void __user *ubuf)
  860. {
  861. unsigned int *data = &task_thread_info(target)->system_call;
  862. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  863. data, 0, sizeof(unsigned int));
  864. }
  865. static int s390_system_call_set(struct task_struct *target,
  866. const struct user_regset *regset,
  867. unsigned int pos, unsigned int count,
  868. const void *kbuf, const void __user *ubuf)
  869. {
  870. unsigned int *data = &task_thread_info(target)->system_call;
  871. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  872. data, 0, sizeof(unsigned int));
  873. }
  874. static const struct user_regset s390_regsets[] = {
  875. [REGSET_GENERAL] = {
  876. .core_note_type = NT_PRSTATUS,
  877. .n = sizeof(s390_regs) / sizeof(long),
  878. .size = sizeof(long),
  879. .align = sizeof(long),
  880. .get = s390_regs_get,
  881. .set = s390_regs_set,
  882. },
  883. [REGSET_FP] = {
  884. .core_note_type = NT_PRFPREG,
  885. .n = sizeof(s390_fp_regs) / sizeof(long),
  886. .size = sizeof(long),
  887. .align = sizeof(long),
  888. .get = s390_fpregs_get,
  889. .set = s390_fpregs_set,
  890. },
  891. #ifdef CONFIG_64BIT
  892. [REGSET_LAST_BREAK] = {
  893. .core_note_type = NT_S390_LAST_BREAK,
  894. .n = 1,
  895. .size = sizeof(long),
  896. .align = sizeof(long),
  897. .get = s390_last_break_get,
  898. .set = s390_last_break_set,
  899. },
  900. [REGSET_TDB] = {
  901. .core_note_type = NT_S390_TDB,
  902. .n = 1,
  903. .size = 256,
  904. .align = 1,
  905. .get = s390_tdb_get,
  906. .set = s390_tdb_set,
  907. },
  908. #endif
  909. [REGSET_SYSTEM_CALL] = {
  910. .core_note_type = NT_S390_SYSTEM_CALL,
  911. .n = 1,
  912. .size = sizeof(unsigned int),
  913. .align = sizeof(unsigned int),
  914. .get = s390_system_call_get,
  915. .set = s390_system_call_set,
  916. },
  917. };
  918. static const struct user_regset_view user_s390_view = {
  919. .name = UTS_MACHINE,
  920. .e_machine = EM_S390,
  921. .regsets = s390_regsets,
  922. .n = ARRAY_SIZE(s390_regsets)
  923. };
  924. #ifdef CONFIG_COMPAT
  925. static int s390_compat_regs_get(struct task_struct *target,
  926. const struct user_regset *regset,
  927. unsigned int pos, unsigned int count,
  928. void *kbuf, void __user *ubuf)
  929. {
  930. if (target == current)
  931. save_access_regs(target->thread.acrs);
  932. if (kbuf) {
  933. compat_ulong_t *k = kbuf;
  934. while (count > 0) {
  935. *k++ = __peek_user_compat(target, pos);
  936. count -= sizeof(*k);
  937. pos += sizeof(*k);
  938. }
  939. } else {
  940. compat_ulong_t __user *u = ubuf;
  941. while (count > 0) {
  942. if (__put_user(__peek_user_compat(target, pos), u++))
  943. return -EFAULT;
  944. count -= sizeof(*u);
  945. pos += sizeof(*u);
  946. }
  947. }
  948. return 0;
  949. }
  950. static int s390_compat_regs_set(struct task_struct *target,
  951. const struct user_regset *regset,
  952. unsigned int pos, unsigned int count,
  953. const void *kbuf, const void __user *ubuf)
  954. {
  955. int rc = 0;
  956. if (target == current)
  957. save_access_regs(target->thread.acrs);
  958. if (kbuf) {
  959. const compat_ulong_t *k = kbuf;
  960. while (count > 0 && !rc) {
  961. rc = __poke_user_compat(target, pos, *k++);
  962. count -= sizeof(*k);
  963. pos += sizeof(*k);
  964. }
  965. } else {
  966. const compat_ulong_t __user *u = ubuf;
  967. while (count > 0 && !rc) {
  968. compat_ulong_t word;
  969. rc = __get_user(word, u++);
  970. if (rc)
  971. break;
  972. rc = __poke_user_compat(target, pos, word);
  973. count -= sizeof(*u);
  974. pos += sizeof(*u);
  975. }
  976. }
  977. if (rc == 0 && target == current)
  978. restore_access_regs(target->thread.acrs);
  979. return rc;
  980. }
  981. static int s390_compat_regs_high_get(struct task_struct *target,
  982. const struct user_regset *regset,
  983. unsigned int pos, unsigned int count,
  984. void *kbuf, void __user *ubuf)
  985. {
  986. compat_ulong_t *gprs_high;
  987. gprs_high = (compat_ulong_t *)
  988. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  989. if (kbuf) {
  990. compat_ulong_t *k = kbuf;
  991. while (count > 0) {
  992. *k++ = *gprs_high;
  993. gprs_high += 2;
  994. count -= sizeof(*k);
  995. }
  996. } else {
  997. compat_ulong_t __user *u = ubuf;
  998. while (count > 0) {
  999. if (__put_user(*gprs_high, u++))
  1000. return -EFAULT;
  1001. gprs_high += 2;
  1002. count -= sizeof(*u);
  1003. }
  1004. }
  1005. return 0;
  1006. }
  1007. static int s390_compat_regs_high_set(struct task_struct *target,
  1008. const struct user_regset *regset,
  1009. unsigned int pos, unsigned int count,
  1010. const void *kbuf, const void __user *ubuf)
  1011. {
  1012. compat_ulong_t *gprs_high;
  1013. int rc = 0;
  1014. gprs_high = (compat_ulong_t *)
  1015. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1016. if (kbuf) {
  1017. const compat_ulong_t *k = kbuf;
  1018. while (count > 0) {
  1019. *gprs_high = *k++;
  1020. *gprs_high += 2;
  1021. count -= sizeof(*k);
  1022. }
  1023. } else {
  1024. const compat_ulong_t __user *u = ubuf;
  1025. while (count > 0 && !rc) {
  1026. unsigned long word;
  1027. rc = __get_user(word, u++);
  1028. if (rc)
  1029. break;
  1030. *gprs_high = word;
  1031. *gprs_high += 2;
  1032. count -= sizeof(*u);
  1033. }
  1034. }
  1035. return rc;
  1036. }
  1037. static int s390_compat_last_break_get(struct task_struct *target,
  1038. const struct user_regset *regset,
  1039. unsigned int pos, unsigned int count,
  1040. void *kbuf, void __user *ubuf)
  1041. {
  1042. compat_ulong_t last_break;
  1043. if (count > 0) {
  1044. last_break = task_thread_info(target)->last_break;
  1045. if (kbuf) {
  1046. unsigned long *k = kbuf;
  1047. *k = last_break;
  1048. } else {
  1049. unsigned long __user *u = ubuf;
  1050. if (__put_user(last_break, u))
  1051. return -EFAULT;
  1052. }
  1053. }
  1054. return 0;
  1055. }
  1056. static int s390_compat_last_break_set(struct task_struct *target,
  1057. const struct user_regset *regset,
  1058. unsigned int pos, unsigned int count,
  1059. const void *kbuf, const void __user *ubuf)
  1060. {
  1061. return 0;
  1062. }
  1063. static const struct user_regset s390_compat_regsets[] = {
  1064. [REGSET_GENERAL] = {
  1065. .core_note_type = NT_PRSTATUS,
  1066. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1067. .size = sizeof(compat_long_t),
  1068. .align = sizeof(compat_long_t),
  1069. .get = s390_compat_regs_get,
  1070. .set = s390_compat_regs_set,
  1071. },
  1072. [REGSET_FP] = {
  1073. .core_note_type = NT_PRFPREG,
  1074. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1075. .size = sizeof(compat_long_t),
  1076. .align = sizeof(compat_long_t),
  1077. .get = s390_fpregs_get,
  1078. .set = s390_fpregs_set,
  1079. },
  1080. [REGSET_LAST_BREAK] = {
  1081. .core_note_type = NT_S390_LAST_BREAK,
  1082. .n = 1,
  1083. .size = sizeof(long),
  1084. .align = sizeof(long),
  1085. .get = s390_compat_last_break_get,
  1086. .set = s390_compat_last_break_set,
  1087. },
  1088. [REGSET_TDB] = {
  1089. .core_note_type = NT_S390_TDB,
  1090. .n = 1,
  1091. .size = 256,
  1092. .align = 1,
  1093. .get = s390_tdb_get,
  1094. .set = s390_tdb_set,
  1095. },
  1096. [REGSET_SYSTEM_CALL] = {
  1097. .core_note_type = NT_S390_SYSTEM_CALL,
  1098. .n = 1,
  1099. .size = sizeof(compat_uint_t),
  1100. .align = sizeof(compat_uint_t),
  1101. .get = s390_system_call_get,
  1102. .set = s390_system_call_set,
  1103. },
  1104. [REGSET_GENERAL_EXTENDED] = {
  1105. .core_note_type = NT_S390_HIGH_GPRS,
  1106. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1107. .size = sizeof(compat_long_t),
  1108. .align = sizeof(compat_long_t),
  1109. .get = s390_compat_regs_high_get,
  1110. .set = s390_compat_regs_high_set,
  1111. },
  1112. };
  1113. static const struct user_regset_view user_s390_compat_view = {
  1114. .name = "s390",
  1115. .e_machine = EM_S390,
  1116. .regsets = s390_compat_regsets,
  1117. .n = ARRAY_SIZE(s390_compat_regsets)
  1118. };
  1119. #endif
  1120. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1121. {
  1122. #ifdef CONFIG_COMPAT
  1123. if (test_tsk_thread_flag(task, TIF_31BIT))
  1124. return &user_s390_compat_view;
  1125. #endif
  1126. return &user_s390_view;
  1127. }
  1128. static const char *gpr_names[NUM_GPRS] = {
  1129. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1130. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1131. };
  1132. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1133. {
  1134. if (offset >= NUM_GPRS)
  1135. return 0;
  1136. return regs->gprs[offset];
  1137. }
  1138. int regs_query_register_offset(const char *name)
  1139. {
  1140. unsigned long offset;
  1141. if (!name || *name != 'r')
  1142. return -EINVAL;
  1143. if (strict_strtoul(name + 1, 10, &offset))
  1144. return -EINVAL;
  1145. if (offset >= NUM_GPRS)
  1146. return -EINVAL;
  1147. return offset;
  1148. }
  1149. const char *regs_query_register_name(unsigned int offset)
  1150. {
  1151. if (offset >= NUM_GPRS)
  1152. return NULL;
  1153. return gpr_names[offset];
  1154. }
  1155. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1156. {
  1157. unsigned long ksp = kernel_stack_pointer(regs);
  1158. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1159. }
  1160. /**
  1161. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1162. * @regs:pt_regs which contains kernel stack pointer.
  1163. * @n:stack entry number.
  1164. *
  1165. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1166. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1167. * this returns 0.
  1168. */
  1169. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1170. {
  1171. unsigned long addr;
  1172. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1173. if (!regs_within_kernel_stack(regs, addr))
  1174. return 0;
  1175. return *(unsigned long *)addr;
  1176. }