ptrace.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999,2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <trace/syscall.h>
  23. #include <asm/compat.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/system.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/unistd.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. enum s390_regset {
  38. REGSET_GENERAL,
  39. REGSET_FP,
  40. REGSET_LAST_BREAK,
  41. REGSET_SYSTEM_CALL,
  42. REGSET_GENERAL_EXTENDED,
  43. };
  44. void update_per_regs(struct task_struct *task)
  45. {
  46. struct pt_regs *regs = task_pt_regs(task);
  47. struct thread_struct *thread = &task->thread;
  48. struct per_regs old, new;
  49. /* Copy user specified PER registers */
  50. new.control = thread->per_user.control;
  51. new.start = thread->per_user.start;
  52. new.end = thread->per_user.end;
  53. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  54. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
  55. new.control |= PER_EVENT_IFETCH;
  56. new.start = 0;
  57. new.end = PSW_ADDR_INSN;
  58. }
  59. /* Take care of the PER enablement bit in the PSW. */
  60. if (!(new.control & PER_EVENT_MASK)) {
  61. regs->psw.mask &= ~PSW_MASK_PER;
  62. return;
  63. }
  64. regs->psw.mask |= PSW_MASK_PER;
  65. __ctl_store(old, 9, 11);
  66. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  67. __ctl_load(new, 9, 11);
  68. }
  69. void user_enable_single_step(struct task_struct *task)
  70. {
  71. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  72. if (task == current)
  73. update_per_regs(task);
  74. }
  75. void user_disable_single_step(struct task_struct *task)
  76. {
  77. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  78. if (task == current)
  79. update_per_regs(task);
  80. }
  81. /*
  82. * Called by kernel/ptrace.c when detaching..
  83. *
  84. * Clear all debugging related fields.
  85. */
  86. void ptrace_disable(struct task_struct *task)
  87. {
  88. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  89. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  90. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  91. clear_tsk_thread_flag(task, TIF_PER_TRAP);
  92. }
  93. #ifndef CONFIG_64BIT
  94. # define __ADDR_MASK 3
  95. #else
  96. # define __ADDR_MASK 7
  97. #endif
  98. static inline unsigned long __peek_user_per(struct task_struct *child,
  99. addr_t addr)
  100. {
  101. struct per_struct_kernel *dummy = NULL;
  102. if (addr == (addr_t) &dummy->cr9)
  103. /* Control bits of the active per set. */
  104. return test_thread_flag(TIF_SINGLE_STEP) ?
  105. PER_EVENT_IFETCH : child->thread.per_user.control;
  106. else if (addr == (addr_t) &dummy->cr10)
  107. /* Start address of the active per set. */
  108. return test_thread_flag(TIF_SINGLE_STEP) ?
  109. 0 : child->thread.per_user.start;
  110. else if (addr == (addr_t) &dummy->cr11)
  111. /* End address of the active per set. */
  112. return test_thread_flag(TIF_SINGLE_STEP) ?
  113. PSW_ADDR_INSN : child->thread.per_user.end;
  114. else if (addr == (addr_t) &dummy->bits)
  115. /* Single-step bit. */
  116. return test_thread_flag(TIF_SINGLE_STEP) ?
  117. (1UL << (BITS_PER_LONG - 1)) : 0;
  118. else if (addr == (addr_t) &dummy->starting_addr)
  119. /* Start address of the user specified per set. */
  120. return child->thread.per_user.start;
  121. else if (addr == (addr_t) &dummy->ending_addr)
  122. /* End address of the user specified per set. */
  123. return child->thread.per_user.end;
  124. else if (addr == (addr_t) &dummy->perc_atmid)
  125. /* PER code, ATMID and AI of the last PER trap */
  126. return (unsigned long)
  127. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  128. else if (addr == (addr_t) &dummy->address)
  129. /* Address of the last PER trap */
  130. return child->thread.per_event.address;
  131. else if (addr == (addr_t) &dummy->access_id)
  132. /* Access id of the last PER trap */
  133. return (unsigned long)
  134. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  135. return 0;
  136. }
  137. /*
  138. * Read the word at offset addr from the user area of a process. The
  139. * trouble here is that the information is littered over different
  140. * locations. The process registers are found on the kernel stack,
  141. * the floating point stuff and the trace settings are stored in
  142. * the task structure. In addition the different structures in
  143. * struct user contain pad bytes that should be read as zeroes.
  144. * Lovely...
  145. */
  146. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  147. {
  148. struct user *dummy = NULL;
  149. addr_t offset, tmp;
  150. if (addr < (addr_t) &dummy->regs.acrs) {
  151. /*
  152. * psw and gprs are stored on the stack
  153. */
  154. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  155. if (addr == (addr_t) &dummy->regs.psw.mask)
  156. /* Return a clean psw mask. */
  157. tmp = psw_user_bits | (tmp & PSW_MASK_USER) |
  158. PSW_MASK_EA | PSW_MASK_BA;
  159. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  160. /*
  161. * access registers are stored in the thread structure
  162. */
  163. offset = addr - (addr_t) &dummy->regs.acrs;
  164. #ifdef CONFIG_64BIT
  165. /*
  166. * Very special case: old & broken 64 bit gdb reading
  167. * from acrs[15]. Result is a 64 bit value. Read the
  168. * 32 bit acrs[15] value and shift it by 32. Sick...
  169. */
  170. if (addr == (addr_t) &dummy->regs.acrs[15])
  171. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  172. else
  173. #endif
  174. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  175. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  176. /*
  177. * orig_gpr2 is stored on the kernel stack
  178. */
  179. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  180. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  181. /*
  182. * prevent reads of padding hole between
  183. * orig_gpr2 and fp_regs on s390.
  184. */
  185. tmp = 0;
  186. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  187. /*
  188. * floating point regs. are stored in the thread structure
  189. */
  190. offset = addr - (addr_t) &dummy->regs.fp_regs;
  191. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  192. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  193. tmp &= (unsigned long) FPC_VALID_MASK
  194. << (BITS_PER_LONG - 32);
  195. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  196. /*
  197. * Handle access to the per_info structure.
  198. */
  199. addr -= (addr_t) &dummy->regs.per_info;
  200. tmp = __peek_user_per(child, addr);
  201. } else
  202. tmp = 0;
  203. return tmp;
  204. }
  205. static int
  206. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  207. {
  208. addr_t tmp, mask;
  209. /*
  210. * Stupid gdb peeks/pokes the access registers in 64 bit with
  211. * an alignment of 4. Programmers from hell...
  212. */
  213. mask = __ADDR_MASK;
  214. #ifdef CONFIG_64BIT
  215. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  216. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  217. mask = 3;
  218. #endif
  219. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  220. return -EIO;
  221. tmp = __peek_user(child, addr);
  222. return put_user(tmp, (addr_t __user *) data);
  223. }
  224. static inline void __poke_user_per(struct task_struct *child,
  225. addr_t addr, addr_t data)
  226. {
  227. struct per_struct_kernel *dummy = NULL;
  228. /*
  229. * There are only three fields in the per_info struct that the
  230. * debugger user can write to.
  231. * 1) cr9: the debugger wants to set a new PER event mask
  232. * 2) starting_addr: the debugger wants to set a new starting
  233. * address to use with the PER event mask.
  234. * 3) ending_addr: the debugger wants to set a new ending
  235. * address to use with the PER event mask.
  236. * The user specified PER event mask and the start and end
  237. * addresses are used only if single stepping is not in effect.
  238. * Writes to any other field in per_info are ignored.
  239. */
  240. if (addr == (addr_t) &dummy->cr9)
  241. /* PER event mask of the user specified per set. */
  242. child->thread.per_user.control =
  243. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  244. else if (addr == (addr_t) &dummy->starting_addr)
  245. /* Starting address of the user specified per set. */
  246. child->thread.per_user.start = data;
  247. else if (addr == (addr_t) &dummy->ending_addr)
  248. /* Ending address of the user specified per set. */
  249. child->thread.per_user.end = data;
  250. }
  251. /*
  252. * Write a word to the user area of a process at location addr. This
  253. * operation does have an additional problem compared to peek_user.
  254. * Stores to the program status word and on the floating point
  255. * control register needs to get checked for validity.
  256. */
  257. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  258. {
  259. struct user *dummy = NULL;
  260. addr_t offset, tmp;
  261. if (addr < (addr_t) &dummy->regs.acrs) {
  262. /*
  263. * psw and gprs are stored on the stack
  264. */
  265. tmp = (data & ~PSW_MASK_USER) ^ psw_user_bits;
  266. if (addr == (addr_t) &dummy->regs.psw.mask &&
  267. #ifdef CONFIG_COMPAT
  268. tmp != PSW_MASK_BA &&
  269. #endif
  270. tmp != (PSW_MASK_EA | PSW_MASK_BA))
  271. /* Invalid psw mask. */
  272. return -EINVAL;
  273. #ifndef CONFIG_64BIT
  274. if (addr == (addr_t) &dummy->regs.psw.addr)
  275. /* I'd like to reject addresses without the
  276. high order bit but older gdb's rely on it */
  277. data |= PSW_ADDR_AMODE;
  278. #endif
  279. if (addr == (addr_t) &dummy->regs.psw.addr)
  280. /*
  281. * The debugger changed the instruction address,
  282. * reset system call restart, see signal.c:do_signal
  283. */
  284. task_thread_info(child)->system_call = 0;
  285. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  286. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  287. /*
  288. * access registers are stored in the thread structure
  289. */
  290. offset = addr - (addr_t) &dummy->regs.acrs;
  291. #ifdef CONFIG_64BIT
  292. /*
  293. * Very special case: old & broken 64 bit gdb writing
  294. * to acrs[15] with a 64 bit value. Ignore the lower
  295. * half of the value and write the upper 32 bit to
  296. * acrs[15]. Sick...
  297. */
  298. if (addr == (addr_t) &dummy->regs.acrs[15])
  299. child->thread.acrs[15] = (unsigned int) (data >> 32);
  300. else
  301. #endif
  302. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  303. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  304. /*
  305. * orig_gpr2 is stored on the kernel stack
  306. */
  307. task_pt_regs(child)->orig_gpr2 = data;
  308. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  309. /*
  310. * prevent writes of padding hole between
  311. * orig_gpr2 and fp_regs on s390.
  312. */
  313. return 0;
  314. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  315. /*
  316. * floating point regs. are stored in the thread structure
  317. */
  318. if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
  319. (data & ~((unsigned long) FPC_VALID_MASK
  320. << (BITS_PER_LONG - 32))) != 0)
  321. return -EINVAL;
  322. offset = addr - (addr_t) &dummy->regs.fp_regs;
  323. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  324. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  325. /*
  326. * Handle access to the per_info structure.
  327. */
  328. addr -= (addr_t) &dummy->regs.per_info;
  329. __poke_user_per(child, addr, data);
  330. }
  331. return 0;
  332. }
  333. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  334. {
  335. addr_t mask;
  336. /*
  337. * Stupid gdb peeks/pokes the access registers in 64 bit with
  338. * an alignment of 4. Programmers from hell indeed...
  339. */
  340. mask = __ADDR_MASK;
  341. #ifdef CONFIG_64BIT
  342. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  343. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  344. mask = 3;
  345. #endif
  346. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  347. return -EIO;
  348. return __poke_user(child, addr, data);
  349. }
  350. long arch_ptrace(struct task_struct *child, long request,
  351. unsigned long addr, unsigned long data)
  352. {
  353. ptrace_area parea;
  354. int copied, ret;
  355. switch (request) {
  356. case PTRACE_PEEKUSR:
  357. /* read the word at location addr in the USER area. */
  358. return peek_user(child, addr, data);
  359. case PTRACE_POKEUSR:
  360. /* write the word at location addr in the USER area */
  361. return poke_user(child, addr, data);
  362. case PTRACE_PEEKUSR_AREA:
  363. case PTRACE_POKEUSR_AREA:
  364. if (copy_from_user(&parea, (void __force __user *) addr,
  365. sizeof(parea)))
  366. return -EFAULT;
  367. addr = parea.kernel_addr;
  368. data = parea.process_addr;
  369. copied = 0;
  370. while (copied < parea.len) {
  371. if (request == PTRACE_PEEKUSR_AREA)
  372. ret = peek_user(child, addr, data);
  373. else {
  374. addr_t utmp;
  375. if (get_user(utmp,
  376. (addr_t __force __user *) data))
  377. return -EFAULT;
  378. ret = poke_user(child, addr, utmp);
  379. }
  380. if (ret)
  381. return ret;
  382. addr += sizeof(unsigned long);
  383. data += sizeof(unsigned long);
  384. copied += sizeof(unsigned long);
  385. }
  386. return 0;
  387. case PTRACE_GET_LAST_BREAK:
  388. put_user(task_thread_info(child)->last_break,
  389. (unsigned long __user *) data);
  390. return 0;
  391. default:
  392. /* Removing high order bit from addr (only for 31 bit). */
  393. addr &= PSW_ADDR_INSN;
  394. return ptrace_request(child, request, addr, data);
  395. }
  396. }
  397. #ifdef CONFIG_COMPAT
  398. /*
  399. * Now the fun part starts... a 31 bit program running in the
  400. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  401. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  402. * to handle, the difference to the 64 bit versions of the requests
  403. * is that the access is done in multiples of 4 byte instead of
  404. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  405. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  406. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  407. * is a 31 bit program too, the content of struct user can be
  408. * emulated. A 31 bit program peeking into the struct user of
  409. * a 64 bit program is a no-no.
  410. */
  411. /*
  412. * Same as peek_user_per but for a 31 bit program.
  413. */
  414. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  415. addr_t addr)
  416. {
  417. struct compat_per_struct_kernel *dummy32 = NULL;
  418. if (addr == (addr_t) &dummy32->cr9)
  419. /* Control bits of the active per set. */
  420. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  421. PER_EVENT_IFETCH : child->thread.per_user.control;
  422. else if (addr == (addr_t) &dummy32->cr10)
  423. /* Start address of the active per set. */
  424. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  425. 0 : child->thread.per_user.start;
  426. else if (addr == (addr_t) &dummy32->cr11)
  427. /* End address of the active per set. */
  428. return test_thread_flag(TIF_SINGLE_STEP) ?
  429. PSW32_ADDR_INSN : child->thread.per_user.end;
  430. else if (addr == (addr_t) &dummy32->bits)
  431. /* Single-step bit. */
  432. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  433. 0x80000000 : 0;
  434. else if (addr == (addr_t) &dummy32->starting_addr)
  435. /* Start address of the user specified per set. */
  436. return (__u32) child->thread.per_user.start;
  437. else if (addr == (addr_t) &dummy32->ending_addr)
  438. /* End address of the user specified per set. */
  439. return (__u32) child->thread.per_user.end;
  440. else if (addr == (addr_t) &dummy32->perc_atmid)
  441. /* PER code, ATMID and AI of the last PER trap */
  442. return (__u32) child->thread.per_event.cause << 16;
  443. else if (addr == (addr_t) &dummy32->address)
  444. /* Address of the last PER trap */
  445. return (__u32) child->thread.per_event.address;
  446. else if (addr == (addr_t) &dummy32->access_id)
  447. /* Access id of the last PER trap */
  448. return (__u32) child->thread.per_event.paid << 24;
  449. return 0;
  450. }
  451. /*
  452. * Same as peek_user but for a 31 bit program.
  453. */
  454. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  455. {
  456. struct compat_user *dummy32 = NULL;
  457. addr_t offset;
  458. __u32 tmp;
  459. if (addr < (addr_t) &dummy32->regs.acrs) {
  460. struct pt_regs *regs = task_pt_regs(child);
  461. /*
  462. * psw and gprs are stored on the stack
  463. */
  464. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  465. /* Fake a 31 bit psw mask. */
  466. tmp = (__u32)(regs->psw.mask >> 32);
  467. tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
  468. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  469. /* Fake a 31 bit psw address. */
  470. tmp = (__u32) regs->psw.addr | PSW32_ADDR_AMODE;
  471. } else {
  472. /* gpr 0-15 */
  473. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  474. }
  475. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  476. /*
  477. * access registers are stored in the thread structure
  478. */
  479. offset = addr - (addr_t) &dummy32->regs.acrs;
  480. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  481. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  482. /*
  483. * orig_gpr2 is stored on the kernel stack
  484. */
  485. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  486. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  487. /*
  488. * prevent reads of padding hole between
  489. * orig_gpr2 and fp_regs on s390.
  490. */
  491. tmp = 0;
  492. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  493. /*
  494. * floating point regs. are stored in the thread structure
  495. */
  496. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  497. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  498. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  499. /*
  500. * Handle access to the per_info structure.
  501. */
  502. addr -= (addr_t) &dummy32->regs.per_info;
  503. tmp = __peek_user_per_compat(child, addr);
  504. } else
  505. tmp = 0;
  506. return tmp;
  507. }
  508. static int peek_user_compat(struct task_struct *child,
  509. addr_t addr, addr_t data)
  510. {
  511. __u32 tmp;
  512. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  513. return -EIO;
  514. tmp = __peek_user_compat(child, addr);
  515. return put_user(tmp, (__u32 __user *) data);
  516. }
  517. /*
  518. * Same as poke_user_per but for a 31 bit program.
  519. */
  520. static inline void __poke_user_per_compat(struct task_struct *child,
  521. addr_t addr, __u32 data)
  522. {
  523. struct compat_per_struct_kernel *dummy32 = NULL;
  524. if (addr == (addr_t) &dummy32->cr9)
  525. /* PER event mask of the user specified per set. */
  526. child->thread.per_user.control =
  527. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  528. else if (addr == (addr_t) &dummy32->starting_addr)
  529. /* Starting address of the user specified per set. */
  530. child->thread.per_user.start = data;
  531. else if (addr == (addr_t) &dummy32->ending_addr)
  532. /* Ending address of the user specified per set. */
  533. child->thread.per_user.end = data;
  534. }
  535. /*
  536. * Same as poke_user but for a 31 bit program.
  537. */
  538. static int __poke_user_compat(struct task_struct *child,
  539. addr_t addr, addr_t data)
  540. {
  541. struct compat_user *dummy32 = NULL;
  542. __u32 tmp = (__u32) data;
  543. addr_t offset;
  544. if (addr < (addr_t) &dummy32->regs.acrs) {
  545. struct pt_regs *regs = task_pt_regs(child);
  546. /*
  547. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  548. */
  549. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  550. /* Build a 64 bit psw mask from 31 bit mask. */
  551. if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
  552. /* Invalid psw mask. */
  553. return -EINVAL;
  554. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  555. (__u64)(tmp & PSW32_MASK_USER) << 32;
  556. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  557. /* Build a 64 bit psw address from 31 bit address. */
  558. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  559. /*
  560. * The debugger changed the instruction address,
  561. * reset system call restart, see signal.c:do_signal
  562. */
  563. task_thread_info(child)->system_call = 0;
  564. } else {
  565. /* gpr 0-15 */
  566. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  567. }
  568. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  569. /*
  570. * access registers are stored in the thread structure
  571. */
  572. offset = addr - (addr_t) &dummy32->regs.acrs;
  573. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  574. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  575. /*
  576. * orig_gpr2 is stored on the kernel stack
  577. */
  578. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  579. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  580. /*
  581. * prevent writess of padding hole between
  582. * orig_gpr2 and fp_regs on s390.
  583. */
  584. return 0;
  585. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  586. /*
  587. * floating point regs. are stored in the thread structure
  588. */
  589. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  590. (tmp & ~FPC_VALID_MASK) != 0)
  591. /* Invalid floating point control. */
  592. return -EINVAL;
  593. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  594. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  595. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  596. /*
  597. * Handle access to the per_info structure.
  598. */
  599. addr -= (addr_t) &dummy32->regs.per_info;
  600. __poke_user_per_compat(child, addr, data);
  601. }
  602. return 0;
  603. }
  604. static int poke_user_compat(struct task_struct *child,
  605. addr_t addr, addr_t data)
  606. {
  607. if (!is_compat_task() || (addr & 3) ||
  608. addr > sizeof(struct compat_user) - 3)
  609. return -EIO;
  610. return __poke_user_compat(child, addr, data);
  611. }
  612. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  613. compat_ulong_t caddr, compat_ulong_t cdata)
  614. {
  615. unsigned long addr = caddr;
  616. unsigned long data = cdata;
  617. compat_ptrace_area parea;
  618. int copied, ret;
  619. switch (request) {
  620. case PTRACE_PEEKUSR:
  621. /* read the word at location addr in the USER area. */
  622. return peek_user_compat(child, addr, data);
  623. case PTRACE_POKEUSR:
  624. /* write the word at location addr in the USER area */
  625. return poke_user_compat(child, addr, data);
  626. case PTRACE_PEEKUSR_AREA:
  627. case PTRACE_POKEUSR_AREA:
  628. if (copy_from_user(&parea, (void __force __user *) addr,
  629. sizeof(parea)))
  630. return -EFAULT;
  631. addr = parea.kernel_addr;
  632. data = parea.process_addr;
  633. copied = 0;
  634. while (copied < parea.len) {
  635. if (request == PTRACE_PEEKUSR_AREA)
  636. ret = peek_user_compat(child, addr, data);
  637. else {
  638. __u32 utmp;
  639. if (get_user(utmp,
  640. (__u32 __force __user *) data))
  641. return -EFAULT;
  642. ret = poke_user_compat(child, addr, utmp);
  643. }
  644. if (ret)
  645. return ret;
  646. addr += sizeof(unsigned int);
  647. data += sizeof(unsigned int);
  648. copied += sizeof(unsigned int);
  649. }
  650. return 0;
  651. case PTRACE_GET_LAST_BREAK:
  652. put_user(task_thread_info(child)->last_break,
  653. (unsigned int __user *) data);
  654. return 0;
  655. }
  656. return compat_ptrace_request(child, request, addr, data);
  657. }
  658. #endif
  659. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  660. {
  661. long ret = 0;
  662. /* Do the secure computing check first. */
  663. secure_computing(regs->gprs[2]);
  664. /*
  665. * The sysc_tracesys code in entry.S stored the system
  666. * call number to gprs[2].
  667. */
  668. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  669. (tracehook_report_syscall_entry(regs) ||
  670. regs->gprs[2] >= NR_syscalls)) {
  671. /*
  672. * Tracing decided this syscall should not happen or the
  673. * debugger stored an invalid system call number. Skip
  674. * the system call and the system call restart handling.
  675. */
  676. clear_thread_flag(TIF_SYSCALL);
  677. ret = -1;
  678. }
  679. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  680. trace_sys_enter(regs, regs->gprs[2]);
  681. if (unlikely(current->audit_context))
  682. audit_syscall_entry(is_compat_task() ?
  683. AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
  684. regs->gprs[2], regs->orig_gpr2,
  685. regs->gprs[3], regs->gprs[4],
  686. regs->gprs[5]);
  687. return ret ?: regs->gprs[2];
  688. }
  689. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  690. {
  691. if (unlikely(current->audit_context))
  692. audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
  693. regs->gprs[2]);
  694. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  695. trace_sys_exit(regs, regs->gprs[2]);
  696. if (test_thread_flag(TIF_SYSCALL_TRACE))
  697. tracehook_report_syscall_exit(regs, 0);
  698. }
  699. /*
  700. * user_regset definitions.
  701. */
  702. static int s390_regs_get(struct task_struct *target,
  703. const struct user_regset *regset,
  704. unsigned int pos, unsigned int count,
  705. void *kbuf, void __user *ubuf)
  706. {
  707. if (target == current)
  708. save_access_regs(target->thread.acrs);
  709. if (kbuf) {
  710. unsigned long *k = kbuf;
  711. while (count > 0) {
  712. *k++ = __peek_user(target, pos);
  713. count -= sizeof(*k);
  714. pos += sizeof(*k);
  715. }
  716. } else {
  717. unsigned long __user *u = ubuf;
  718. while (count > 0) {
  719. if (__put_user(__peek_user(target, pos), u++))
  720. return -EFAULT;
  721. count -= sizeof(*u);
  722. pos += sizeof(*u);
  723. }
  724. }
  725. return 0;
  726. }
  727. static int s390_regs_set(struct task_struct *target,
  728. const struct user_regset *regset,
  729. unsigned int pos, unsigned int count,
  730. const void *kbuf, const void __user *ubuf)
  731. {
  732. int rc = 0;
  733. if (target == current)
  734. save_access_regs(target->thread.acrs);
  735. if (kbuf) {
  736. const unsigned long *k = kbuf;
  737. while (count > 0 && !rc) {
  738. rc = __poke_user(target, pos, *k++);
  739. count -= sizeof(*k);
  740. pos += sizeof(*k);
  741. }
  742. } else {
  743. const unsigned long __user *u = ubuf;
  744. while (count > 0 && !rc) {
  745. unsigned long word;
  746. rc = __get_user(word, u++);
  747. if (rc)
  748. break;
  749. rc = __poke_user(target, pos, word);
  750. count -= sizeof(*u);
  751. pos += sizeof(*u);
  752. }
  753. }
  754. if (rc == 0 && target == current)
  755. restore_access_regs(target->thread.acrs);
  756. return rc;
  757. }
  758. static int s390_fpregs_get(struct task_struct *target,
  759. const struct user_regset *regset, unsigned int pos,
  760. unsigned int count, void *kbuf, void __user *ubuf)
  761. {
  762. if (target == current)
  763. save_fp_regs(&target->thread.fp_regs);
  764. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  765. &target->thread.fp_regs, 0, -1);
  766. }
  767. static int s390_fpregs_set(struct task_struct *target,
  768. const struct user_regset *regset, unsigned int pos,
  769. unsigned int count, const void *kbuf,
  770. const void __user *ubuf)
  771. {
  772. int rc = 0;
  773. if (target == current)
  774. save_fp_regs(&target->thread.fp_regs);
  775. /* If setting FPC, must validate it first. */
  776. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  777. u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
  778. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
  779. 0, offsetof(s390_fp_regs, fprs));
  780. if (rc)
  781. return rc;
  782. if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
  783. return -EINVAL;
  784. target->thread.fp_regs.fpc = fpc[0];
  785. }
  786. if (rc == 0 && count > 0)
  787. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  788. target->thread.fp_regs.fprs,
  789. offsetof(s390_fp_regs, fprs), -1);
  790. if (rc == 0 && target == current)
  791. restore_fp_regs(&target->thread.fp_regs);
  792. return rc;
  793. }
  794. #ifdef CONFIG_64BIT
  795. static int s390_last_break_get(struct task_struct *target,
  796. const struct user_regset *regset,
  797. unsigned int pos, unsigned int count,
  798. void *kbuf, void __user *ubuf)
  799. {
  800. if (count > 0) {
  801. if (kbuf) {
  802. unsigned long *k = kbuf;
  803. *k = task_thread_info(target)->last_break;
  804. } else {
  805. unsigned long __user *u = ubuf;
  806. if (__put_user(task_thread_info(target)->last_break, u))
  807. return -EFAULT;
  808. }
  809. }
  810. return 0;
  811. }
  812. #endif
  813. static int s390_system_call_get(struct task_struct *target,
  814. const struct user_regset *regset,
  815. unsigned int pos, unsigned int count,
  816. void *kbuf, void __user *ubuf)
  817. {
  818. unsigned int *data = &task_thread_info(target)->system_call;
  819. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  820. data, 0, sizeof(unsigned int));
  821. }
  822. static int s390_system_call_set(struct task_struct *target,
  823. const struct user_regset *regset,
  824. unsigned int pos, unsigned int count,
  825. const void *kbuf, const void __user *ubuf)
  826. {
  827. unsigned int *data = &task_thread_info(target)->system_call;
  828. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  829. data, 0, sizeof(unsigned int));
  830. }
  831. static const struct user_regset s390_regsets[] = {
  832. [REGSET_GENERAL] = {
  833. .core_note_type = NT_PRSTATUS,
  834. .n = sizeof(s390_regs) / sizeof(long),
  835. .size = sizeof(long),
  836. .align = sizeof(long),
  837. .get = s390_regs_get,
  838. .set = s390_regs_set,
  839. },
  840. [REGSET_FP] = {
  841. .core_note_type = NT_PRFPREG,
  842. .n = sizeof(s390_fp_regs) / sizeof(long),
  843. .size = sizeof(long),
  844. .align = sizeof(long),
  845. .get = s390_fpregs_get,
  846. .set = s390_fpregs_set,
  847. },
  848. #ifdef CONFIG_64BIT
  849. [REGSET_LAST_BREAK] = {
  850. .core_note_type = NT_S390_LAST_BREAK,
  851. .n = 1,
  852. .size = sizeof(long),
  853. .align = sizeof(long),
  854. .get = s390_last_break_get,
  855. },
  856. #endif
  857. [REGSET_SYSTEM_CALL] = {
  858. .core_note_type = NT_S390_SYSTEM_CALL,
  859. .n = 1,
  860. .size = sizeof(unsigned int),
  861. .align = sizeof(unsigned int),
  862. .get = s390_system_call_get,
  863. .set = s390_system_call_set,
  864. },
  865. };
  866. static const struct user_regset_view user_s390_view = {
  867. .name = UTS_MACHINE,
  868. .e_machine = EM_S390,
  869. .regsets = s390_regsets,
  870. .n = ARRAY_SIZE(s390_regsets)
  871. };
  872. #ifdef CONFIG_COMPAT
  873. static int s390_compat_regs_get(struct task_struct *target,
  874. const struct user_regset *regset,
  875. unsigned int pos, unsigned int count,
  876. void *kbuf, void __user *ubuf)
  877. {
  878. if (target == current)
  879. save_access_regs(target->thread.acrs);
  880. if (kbuf) {
  881. compat_ulong_t *k = kbuf;
  882. while (count > 0) {
  883. *k++ = __peek_user_compat(target, pos);
  884. count -= sizeof(*k);
  885. pos += sizeof(*k);
  886. }
  887. } else {
  888. compat_ulong_t __user *u = ubuf;
  889. while (count > 0) {
  890. if (__put_user(__peek_user_compat(target, pos), u++))
  891. return -EFAULT;
  892. count -= sizeof(*u);
  893. pos += sizeof(*u);
  894. }
  895. }
  896. return 0;
  897. }
  898. static int s390_compat_regs_set(struct task_struct *target,
  899. const struct user_regset *regset,
  900. unsigned int pos, unsigned int count,
  901. const void *kbuf, const void __user *ubuf)
  902. {
  903. int rc = 0;
  904. if (target == current)
  905. save_access_regs(target->thread.acrs);
  906. if (kbuf) {
  907. const compat_ulong_t *k = kbuf;
  908. while (count > 0 && !rc) {
  909. rc = __poke_user_compat(target, pos, *k++);
  910. count -= sizeof(*k);
  911. pos += sizeof(*k);
  912. }
  913. } else {
  914. const compat_ulong_t __user *u = ubuf;
  915. while (count > 0 && !rc) {
  916. compat_ulong_t word;
  917. rc = __get_user(word, u++);
  918. if (rc)
  919. break;
  920. rc = __poke_user_compat(target, pos, word);
  921. count -= sizeof(*u);
  922. pos += sizeof(*u);
  923. }
  924. }
  925. if (rc == 0 && target == current)
  926. restore_access_regs(target->thread.acrs);
  927. return rc;
  928. }
  929. static int s390_compat_regs_high_get(struct task_struct *target,
  930. const struct user_regset *regset,
  931. unsigned int pos, unsigned int count,
  932. void *kbuf, void __user *ubuf)
  933. {
  934. compat_ulong_t *gprs_high;
  935. gprs_high = (compat_ulong_t *)
  936. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  937. if (kbuf) {
  938. compat_ulong_t *k = kbuf;
  939. while (count > 0) {
  940. *k++ = *gprs_high;
  941. gprs_high += 2;
  942. count -= sizeof(*k);
  943. }
  944. } else {
  945. compat_ulong_t __user *u = ubuf;
  946. while (count > 0) {
  947. if (__put_user(*gprs_high, u++))
  948. return -EFAULT;
  949. gprs_high += 2;
  950. count -= sizeof(*u);
  951. }
  952. }
  953. return 0;
  954. }
  955. static int s390_compat_regs_high_set(struct task_struct *target,
  956. const struct user_regset *regset,
  957. unsigned int pos, unsigned int count,
  958. const void *kbuf, const void __user *ubuf)
  959. {
  960. compat_ulong_t *gprs_high;
  961. int rc = 0;
  962. gprs_high = (compat_ulong_t *)
  963. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  964. if (kbuf) {
  965. const compat_ulong_t *k = kbuf;
  966. while (count > 0) {
  967. *gprs_high = *k++;
  968. *gprs_high += 2;
  969. count -= sizeof(*k);
  970. }
  971. } else {
  972. const compat_ulong_t __user *u = ubuf;
  973. while (count > 0 && !rc) {
  974. unsigned long word;
  975. rc = __get_user(word, u++);
  976. if (rc)
  977. break;
  978. *gprs_high = word;
  979. *gprs_high += 2;
  980. count -= sizeof(*u);
  981. }
  982. }
  983. return rc;
  984. }
  985. static int s390_compat_last_break_get(struct task_struct *target,
  986. const struct user_regset *regset,
  987. unsigned int pos, unsigned int count,
  988. void *kbuf, void __user *ubuf)
  989. {
  990. compat_ulong_t last_break;
  991. if (count > 0) {
  992. last_break = task_thread_info(target)->last_break;
  993. if (kbuf) {
  994. unsigned long *k = kbuf;
  995. *k = last_break;
  996. } else {
  997. unsigned long __user *u = ubuf;
  998. if (__put_user(last_break, u))
  999. return -EFAULT;
  1000. }
  1001. }
  1002. return 0;
  1003. }
  1004. static const struct user_regset s390_compat_regsets[] = {
  1005. [REGSET_GENERAL] = {
  1006. .core_note_type = NT_PRSTATUS,
  1007. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1008. .size = sizeof(compat_long_t),
  1009. .align = sizeof(compat_long_t),
  1010. .get = s390_compat_regs_get,
  1011. .set = s390_compat_regs_set,
  1012. },
  1013. [REGSET_FP] = {
  1014. .core_note_type = NT_PRFPREG,
  1015. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1016. .size = sizeof(compat_long_t),
  1017. .align = sizeof(compat_long_t),
  1018. .get = s390_fpregs_get,
  1019. .set = s390_fpregs_set,
  1020. },
  1021. [REGSET_LAST_BREAK] = {
  1022. .core_note_type = NT_S390_LAST_BREAK,
  1023. .n = 1,
  1024. .size = sizeof(long),
  1025. .align = sizeof(long),
  1026. .get = s390_compat_last_break_get,
  1027. },
  1028. [REGSET_SYSTEM_CALL] = {
  1029. .core_note_type = NT_S390_SYSTEM_CALL,
  1030. .n = 1,
  1031. .size = sizeof(compat_uint_t),
  1032. .align = sizeof(compat_uint_t),
  1033. .get = s390_system_call_get,
  1034. .set = s390_system_call_set,
  1035. },
  1036. [REGSET_GENERAL_EXTENDED] = {
  1037. .core_note_type = NT_S390_HIGH_GPRS,
  1038. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1039. .size = sizeof(compat_long_t),
  1040. .align = sizeof(compat_long_t),
  1041. .get = s390_compat_regs_high_get,
  1042. .set = s390_compat_regs_high_set,
  1043. },
  1044. };
  1045. static const struct user_regset_view user_s390_compat_view = {
  1046. .name = "s390",
  1047. .e_machine = EM_S390,
  1048. .regsets = s390_compat_regsets,
  1049. .n = ARRAY_SIZE(s390_compat_regsets)
  1050. };
  1051. #endif
  1052. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1053. {
  1054. #ifdef CONFIG_COMPAT
  1055. if (test_tsk_thread_flag(task, TIF_31BIT))
  1056. return &user_s390_compat_view;
  1057. #endif
  1058. return &user_s390_view;
  1059. }
  1060. static const char *gpr_names[NUM_GPRS] = {
  1061. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1062. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1063. };
  1064. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1065. {
  1066. if (offset >= NUM_GPRS)
  1067. return 0;
  1068. return regs->gprs[offset];
  1069. }
  1070. int regs_query_register_offset(const char *name)
  1071. {
  1072. unsigned long offset;
  1073. if (!name || *name != 'r')
  1074. return -EINVAL;
  1075. if (strict_strtoul(name + 1, 10, &offset))
  1076. return -EINVAL;
  1077. if (offset >= NUM_GPRS)
  1078. return -EINVAL;
  1079. return offset;
  1080. }
  1081. const char *regs_query_register_name(unsigned int offset)
  1082. {
  1083. if (offset >= NUM_GPRS)
  1084. return NULL;
  1085. return gpr_names[offset];
  1086. }
  1087. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1088. {
  1089. unsigned long ksp = kernel_stack_pointer(regs);
  1090. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1091. }
  1092. /**
  1093. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1094. * @regs:pt_regs which contains kernel stack pointer.
  1095. * @n:stack entry number.
  1096. *
  1097. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1098. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1099. * this returns 0.
  1100. */
  1101. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1102. {
  1103. unsigned long addr;
  1104. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1105. if (!regs_within_kernel_stack(regs, addr))
  1106. return 0;
  1107. return *(unsigned long *)addr;
  1108. }