ptrace.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. enum s390_regset {
  38. REGSET_GENERAL,
  39. REGSET_FP,
  40. REGSET_LAST_BREAK,
  41. REGSET_TDB,
  42. REGSET_SYSTEM_CALL,
  43. REGSET_GENERAL_EXTENDED,
  44. };
  45. void update_cr_regs(struct task_struct *task)
  46. {
  47. struct pt_regs *regs = task_pt_regs(task);
  48. struct thread_struct *thread = &task->thread;
  49. struct per_regs old, new;
  50. #ifdef CONFIG_64BIT
  51. /* Take care of the enable/disable of transactional execution. */
  52. if (MACHINE_HAS_TE) {
  53. unsigned long cr[3], cr_new[3];
  54. __ctl_store(cr, 0, 2);
  55. cr_new[1] = cr[1];
  56. /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
  57. if (task->thread.per_flags & PER_FLAG_NO_TE)
  58. cr_new[0] = cr[0] & ~(3UL << 54);
  59. else
  60. cr_new[0] = cr[0] | (3UL << 54);
  61. /* Set or clear transaction execution TDC bits 62 and 63. */
  62. cr_new[2] = cr[2] & ~3UL;
  63. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  64. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
  65. cr_new[2] |= 1UL;
  66. else
  67. cr_new[2] |= 2UL;
  68. }
  69. if (memcmp(&cr_new, &cr, sizeof(cr)))
  70. __ctl_load(cr_new, 0, 2);
  71. }
  72. #endif
  73. /* Copy user specified PER registers */
  74. new.control = thread->per_user.control;
  75. new.start = thread->per_user.start;
  76. new.end = thread->per_user.end;
  77. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  78. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
  79. new.control |= PER_EVENT_IFETCH;
  80. #ifdef CONFIG_64BIT
  81. new.control |= PER_CONTROL_SUSPENSION;
  82. new.control |= PER_EVENT_TRANSACTION_END;
  83. #endif
  84. new.start = 0;
  85. new.end = PSW_ADDR_INSN;
  86. }
  87. /* Take care of the PER enablement bit in the PSW. */
  88. if (!(new.control & PER_EVENT_MASK)) {
  89. regs->psw.mask &= ~PSW_MASK_PER;
  90. return;
  91. }
  92. regs->psw.mask |= PSW_MASK_PER;
  93. __ctl_store(old, 9, 11);
  94. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  95. __ctl_load(new, 9, 11);
  96. }
  97. void user_enable_single_step(struct task_struct *task)
  98. {
  99. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  100. if (task == current)
  101. update_cr_regs(task);
  102. }
  103. void user_disable_single_step(struct task_struct *task)
  104. {
  105. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  106. if (task == current)
  107. update_cr_regs(task);
  108. }
  109. /*
  110. * Called by kernel/ptrace.c when detaching..
  111. *
  112. * Clear all debugging related fields.
  113. */
  114. void ptrace_disable(struct task_struct *task)
  115. {
  116. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  117. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  118. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  119. clear_tsk_thread_flag(task, TIF_PER_TRAP);
  120. task->thread.per_flags = 0;
  121. }
  122. #ifndef CONFIG_64BIT
  123. # define __ADDR_MASK 3
  124. #else
  125. # define __ADDR_MASK 7
  126. #endif
  127. static inline unsigned long __peek_user_per(struct task_struct *child,
  128. addr_t addr)
  129. {
  130. struct per_struct_kernel *dummy = NULL;
  131. if (addr == (addr_t) &dummy->cr9)
  132. /* Control bits of the active per set. */
  133. return test_thread_flag(TIF_SINGLE_STEP) ?
  134. PER_EVENT_IFETCH : child->thread.per_user.control;
  135. else if (addr == (addr_t) &dummy->cr10)
  136. /* Start address of the active per set. */
  137. return test_thread_flag(TIF_SINGLE_STEP) ?
  138. 0 : child->thread.per_user.start;
  139. else if (addr == (addr_t) &dummy->cr11)
  140. /* End address of the active per set. */
  141. return test_thread_flag(TIF_SINGLE_STEP) ?
  142. PSW_ADDR_INSN : child->thread.per_user.end;
  143. else if (addr == (addr_t) &dummy->bits)
  144. /* Single-step bit. */
  145. return test_thread_flag(TIF_SINGLE_STEP) ?
  146. (1UL << (BITS_PER_LONG - 1)) : 0;
  147. else if (addr == (addr_t) &dummy->starting_addr)
  148. /* Start address of the user specified per set. */
  149. return child->thread.per_user.start;
  150. else if (addr == (addr_t) &dummy->ending_addr)
  151. /* End address of the user specified per set. */
  152. return child->thread.per_user.end;
  153. else if (addr == (addr_t) &dummy->perc_atmid)
  154. /* PER code, ATMID and AI of the last PER trap */
  155. return (unsigned long)
  156. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  157. else if (addr == (addr_t) &dummy->address)
  158. /* Address of the last PER trap */
  159. return child->thread.per_event.address;
  160. else if (addr == (addr_t) &dummy->access_id)
  161. /* Access id of the last PER trap */
  162. return (unsigned long)
  163. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  164. return 0;
  165. }
  166. /*
  167. * Read the word at offset addr from the user area of a process. The
  168. * trouble here is that the information is littered over different
  169. * locations. The process registers are found on the kernel stack,
  170. * the floating point stuff and the trace settings are stored in
  171. * the task structure. In addition the different structures in
  172. * struct user contain pad bytes that should be read as zeroes.
  173. * Lovely...
  174. */
  175. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  176. {
  177. struct user *dummy = NULL;
  178. addr_t offset, tmp;
  179. if (addr < (addr_t) &dummy->regs.acrs) {
  180. /*
  181. * psw and gprs are stored on the stack
  182. */
  183. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  184. if (addr == (addr_t) &dummy->regs.psw.mask)
  185. /* Return a clean psw mask. */
  186. tmp = psw_user_bits | (tmp & PSW_MASK_USER);
  187. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  188. /*
  189. * access registers are stored in the thread structure
  190. */
  191. offset = addr - (addr_t) &dummy->regs.acrs;
  192. #ifdef CONFIG_64BIT
  193. /*
  194. * Very special case: old & broken 64 bit gdb reading
  195. * from acrs[15]. Result is a 64 bit value. Read the
  196. * 32 bit acrs[15] value and shift it by 32. Sick...
  197. */
  198. if (addr == (addr_t) &dummy->regs.acrs[15])
  199. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  200. else
  201. #endif
  202. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  203. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  204. /*
  205. * orig_gpr2 is stored on the kernel stack
  206. */
  207. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  208. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  209. /*
  210. * prevent reads of padding hole between
  211. * orig_gpr2 and fp_regs on s390.
  212. */
  213. tmp = 0;
  214. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  215. /*
  216. * floating point regs. are stored in the thread structure
  217. */
  218. offset = addr - (addr_t) &dummy->regs.fp_regs;
  219. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  220. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  221. tmp &= (unsigned long) FPC_VALID_MASK
  222. << (BITS_PER_LONG - 32);
  223. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  224. /*
  225. * Handle access to the per_info structure.
  226. */
  227. addr -= (addr_t) &dummy->regs.per_info;
  228. tmp = __peek_user_per(child, addr);
  229. } else
  230. tmp = 0;
  231. return tmp;
  232. }
  233. static int
  234. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  235. {
  236. addr_t tmp, mask;
  237. /*
  238. * Stupid gdb peeks/pokes the access registers in 64 bit with
  239. * an alignment of 4. Programmers from hell...
  240. */
  241. mask = __ADDR_MASK;
  242. #ifdef CONFIG_64BIT
  243. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  244. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  245. mask = 3;
  246. #endif
  247. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  248. return -EIO;
  249. tmp = __peek_user(child, addr);
  250. return put_user(tmp, (addr_t __user *) data);
  251. }
  252. static inline void __poke_user_per(struct task_struct *child,
  253. addr_t addr, addr_t data)
  254. {
  255. struct per_struct_kernel *dummy = NULL;
  256. /*
  257. * There are only three fields in the per_info struct that the
  258. * debugger user can write to.
  259. * 1) cr9: the debugger wants to set a new PER event mask
  260. * 2) starting_addr: the debugger wants to set a new starting
  261. * address to use with the PER event mask.
  262. * 3) ending_addr: the debugger wants to set a new ending
  263. * address to use with the PER event mask.
  264. * The user specified PER event mask and the start and end
  265. * addresses are used only if single stepping is not in effect.
  266. * Writes to any other field in per_info are ignored.
  267. */
  268. if (addr == (addr_t) &dummy->cr9)
  269. /* PER event mask of the user specified per set. */
  270. child->thread.per_user.control =
  271. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  272. else if (addr == (addr_t) &dummy->starting_addr)
  273. /* Starting address of the user specified per set. */
  274. child->thread.per_user.start = data;
  275. else if (addr == (addr_t) &dummy->ending_addr)
  276. /* Ending address of the user specified per set. */
  277. child->thread.per_user.end = data;
  278. }
  279. /*
  280. * Write a word to the user area of a process at location addr. This
  281. * operation does have an additional problem compared to peek_user.
  282. * Stores to the program status word and on the floating point
  283. * control register needs to get checked for validity.
  284. */
  285. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  286. {
  287. struct user *dummy = NULL;
  288. addr_t offset;
  289. if (addr < (addr_t) &dummy->regs.acrs) {
  290. /*
  291. * psw and gprs are stored on the stack
  292. */
  293. if (addr == (addr_t) &dummy->regs.psw.mask &&
  294. ((data & ~PSW_MASK_USER) != psw_user_bits ||
  295. ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
  296. /* Invalid psw mask. */
  297. return -EINVAL;
  298. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  299. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  300. /*
  301. * access registers are stored in the thread structure
  302. */
  303. offset = addr - (addr_t) &dummy->regs.acrs;
  304. #ifdef CONFIG_64BIT
  305. /*
  306. * Very special case: old & broken 64 bit gdb writing
  307. * to acrs[15] with a 64 bit value. Ignore the lower
  308. * half of the value and write the upper 32 bit to
  309. * acrs[15]. Sick...
  310. */
  311. if (addr == (addr_t) &dummy->regs.acrs[15])
  312. child->thread.acrs[15] = (unsigned int) (data >> 32);
  313. else
  314. #endif
  315. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  316. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  317. /*
  318. * orig_gpr2 is stored on the kernel stack
  319. */
  320. task_pt_regs(child)->orig_gpr2 = data;
  321. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  322. /*
  323. * prevent writes of padding hole between
  324. * orig_gpr2 and fp_regs on s390.
  325. */
  326. return 0;
  327. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  328. /*
  329. * floating point regs. are stored in the thread structure
  330. */
  331. if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
  332. (data & ~((unsigned long) FPC_VALID_MASK
  333. << (BITS_PER_LONG - 32))) != 0)
  334. return -EINVAL;
  335. offset = addr - (addr_t) &dummy->regs.fp_regs;
  336. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  337. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  338. /*
  339. * Handle access to the per_info structure.
  340. */
  341. addr -= (addr_t) &dummy->regs.per_info;
  342. __poke_user_per(child, addr, data);
  343. }
  344. return 0;
  345. }
  346. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  347. {
  348. addr_t mask;
  349. /*
  350. * Stupid gdb peeks/pokes the access registers in 64 bit with
  351. * an alignment of 4. Programmers from hell indeed...
  352. */
  353. mask = __ADDR_MASK;
  354. #ifdef CONFIG_64BIT
  355. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  356. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  357. mask = 3;
  358. #endif
  359. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  360. return -EIO;
  361. return __poke_user(child, addr, data);
  362. }
  363. long arch_ptrace(struct task_struct *child, long request,
  364. unsigned long addr, unsigned long data)
  365. {
  366. ptrace_area parea;
  367. int copied, ret;
  368. switch (request) {
  369. case PTRACE_PEEKUSR:
  370. /* read the word at location addr in the USER area. */
  371. return peek_user(child, addr, data);
  372. case PTRACE_POKEUSR:
  373. /* write the word at location addr in the USER area */
  374. return poke_user(child, addr, data);
  375. case PTRACE_PEEKUSR_AREA:
  376. case PTRACE_POKEUSR_AREA:
  377. if (copy_from_user(&parea, (void __force __user *) addr,
  378. sizeof(parea)))
  379. return -EFAULT;
  380. addr = parea.kernel_addr;
  381. data = parea.process_addr;
  382. copied = 0;
  383. while (copied < parea.len) {
  384. if (request == PTRACE_PEEKUSR_AREA)
  385. ret = peek_user(child, addr, data);
  386. else {
  387. addr_t utmp;
  388. if (get_user(utmp,
  389. (addr_t __force __user *) data))
  390. return -EFAULT;
  391. ret = poke_user(child, addr, utmp);
  392. }
  393. if (ret)
  394. return ret;
  395. addr += sizeof(unsigned long);
  396. data += sizeof(unsigned long);
  397. copied += sizeof(unsigned long);
  398. }
  399. return 0;
  400. case PTRACE_GET_LAST_BREAK:
  401. put_user(task_thread_info(child)->last_break,
  402. (unsigned long __user *) data);
  403. return 0;
  404. case PTRACE_ENABLE_TE:
  405. if (!MACHINE_HAS_TE)
  406. return -EIO;
  407. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  408. return 0;
  409. case PTRACE_DISABLE_TE:
  410. if (!MACHINE_HAS_TE)
  411. return -EIO;
  412. child->thread.per_flags |= PER_FLAG_NO_TE;
  413. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  414. return 0;
  415. case PTRACE_TE_ABORT_RAND:
  416. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  417. return -EIO;
  418. switch (data) {
  419. case 0UL:
  420. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  421. break;
  422. case 1UL:
  423. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  424. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  425. break;
  426. case 2UL:
  427. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  428. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  429. break;
  430. default:
  431. return -EINVAL;
  432. }
  433. return 0;
  434. default:
  435. /* Removing high order bit from addr (only for 31 bit). */
  436. addr &= PSW_ADDR_INSN;
  437. return ptrace_request(child, request, addr, data);
  438. }
  439. }
  440. #ifdef CONFIG_COMPAT
  441. /*
  442. * Now the fun part starts... a 31 bit program running in the
  443. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  444. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  445. * to handle, the difference to the 64 bit versions of the requests
  446. * is that the access is done in multiples of 4 byte instead of
  447. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  448. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  449. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  450. * is a 31 bit program too, the content of struct user can be
  451. * emulated. A 31 bit program peeking into the struct user of
  452. * a 64 bit program is a no-no.
  453. */
  454. /*
  455. * Same as peek_user_per but for a 31 bit program.
  456. */
  457. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  458. addr_t addr)
  459. {
  460. struct compat_per_struct_kernel *dummy32 = NULL;
  461. if (addr == (addr_t) &dummy32->cr9)
  462. /* Control bits of the active per set. */
  463. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  464. PER_EVENT_IFETCH : child->thread.per_user.control;
  465. else if (addr == (addr_t) &dummy32->cr10)
  466. /* Start address of the active per set. */
  467. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  468. 0 : child->thread.per_user.start;
  469. else if (addr == (addr_t) &dummy32->cr11)
  470. /* End address of the active per set. */
  471. return test_thread_flag(TIF_SINGLE_STEP) ?
  472. PSW32_ADDR_INSN : child->thread.per_user.end;
  473. else if (addr == (addr_t) &dummy32->bits)
  474. /* Single-step bit. */
  475. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  476. 0x80000000 : 0;
  477. else if (addr == (addr_t) &dummy32->starting_addr)
  478. /* Start address of the user specified per set. */
  479. return (__u32) child->thread.per_user.start;
  480. else if (addr == (addr_t) &dummy32->ending_addr)
  481. /* End address of the user specified per set. */
  482. return (__u32) child->thread.per_user.end;
  483. else if (addr == (addr_t) &dummy32->perc_atmid)
  484. /* PER code, ATMID and AI of the last PER trap */
  485. return (__u32) child->thread.per_event.cause << 16;
  486. else if (addr == (addr_t) &dummy32->address)
  487. /* Address of the last PER trap */
  488. return (__u32) child->thread.per_event.address;
  489. else if (addr == (addr_t) &dummy32->access_id)
  490. /* Access id of the last PER trap */
  491. return (__u32) child->thread.per_event.paid << 24;
  492. return 0;
  493. }
  494. /*
  495. * Same as peek_user but for a 31 bit program.
  496. */
  497. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  498. {
  499. struct compat_user *dummy32 = NULL;
  500. addr_t offset;
  501. __u32 tmp;
  502. if (addr < (addr_t) &dummy32->regs.acrs) {
  503. struct pt_regs *regs = task_pt_regs(child);
  504. /*
  505. * psw and gprs are stored on the stack
  506. */
  507. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  508. /* Fake a 31 bit psw mask. */
  509. tmp = (__u32)(regs->psw.mask >> 32);
  510. tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
  511. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  512. /* Fake a 31 bit psw address. */
  513. tmp = (__u32) regs->psw.addr |
  514. (__u32)(regs->psw.mask & PSW_MASK_BA);
  515. } else {
  516. /* gpr 0-15 */
  517. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  518. }
  519. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  520. /*
  521. * access registers are stored in the thread structure
  522. */
  523. offset = addr - (addr_t) &dummy32->regs.acrs;
  524. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  525. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  526. /*
  527. * orig_gpr2 is stored on the kernel stack
  528. */
  529. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  530. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  531. /*
  532. * prevent reads of padding hole between
  533. * orig_gpr2 and fp_regs on s390.
  534. */
  535. tmp = 0;
  536. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  537. /*
  538. * floating point regs. are stored in the thread structure
  539. */
  540. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  541. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  542. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  543. /*
  544. * Handle access to the per_info structure.
  545. */
  546. addr -= (addr_t) &dummy32->regs.per_info;
  547. tmp = __peek_user_per_compat(child, addr);
  548. } else
  549. tmp = 0;
  550. return tmp;
  551. }
  552. static int peek_user_compat(struct task_struct *child,
  553. addr_t addr, addr_t data)
  554. {
  555. __u32 tmp;
  556. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  557. return -EIO;
  558. tmp = __peek_user_compat(child, addr);
  559. return put_user(tmp, (__u32 __user *) data);
  560. }
  561. /*
  562. * Same as poke_user_per but for a 31 bit program.
  563. */
  564. static inline void __poke_user_per_compat(struct task_struct *child,
  565. addr_t addr, __u32 data)
  566. {
  567. struct compat_per_struct_kernel *dummy32 = NULL;
  568. if (addr == (addr_t) &dummy32->cr9)
  569. /* PER event mask of the user specified per set. */
  570. child->thread.per_user.control =
  571. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  572. else if (addr == (addr_t) &dummy32->starting_addr)
  573. /* Starting address of the user specified per set. */
  574. child->thread.per_user.start = data;
  575. else if (addr == (addr_t) &dummy32->ending_addr)
  576. /* Ending address of the user specified per set. */
  577. child->thread.per_user.end = data;
  578. }
  579. /*
  580. * Same as poke_user but for a 31 bit program.
  581. */
  582. static int __poke_user_compat(struct task_struct *child,
  583. addr_t addr, addr_t data)
  584. {
  585. struct compat_user *dummy32 = NULL;
  586. __u32 tmp = (__u32) data;
  587. addr_t offset;
  588. if (addr < (addr_t) &dummy32->regs.acrs) {
  589. struct pt_regs *regs = task_pt_regs(child);
  590. /*
  591. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  592. */
  593. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  594. /* Build a 64 bit psw mask from 31 bit mask. */
  595. if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
  596. /* Invalid psw mask. */
  597. return -EINVAL;
  598. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  599. (regs->psw.mask & PSW_MASK_BA) |
  600. (__u64)(tmp & PSW32_MASK_USER) << 32;
  601. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  602. /* Build a 64 bit psw address from 31 bit address. */
  603. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  604. /* Transfer 31 bit amode bit to psw mask. */
  605. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  606. (__u64)(tmp & PSW32_ADDR_AMODE);
  607. } else {
  608. /* gpr 0-15 */
  609. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  610. }
  611. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  612. /*
  613. * access registers are stored in the thread structure
  614. */
  615. offset = addr - (addr_t) &dummy32->regs.acrs;
  616. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  617. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  618. /*
  619. * orig_gpr2 is stored on the kernel stack
  620. */
  621. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  622. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  623. /*
  624. * prevent writess of padding hole between
  625. * orig_gpr2 and fp_regs on s390.
  626. */
  627. return 0;
  628. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  629. /*
  630. * floating point regs. are stored in the thread structure
  631. */
  632. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  633. (tmp & ~FPC_VALID_MASK) != 0)
  634. /* Invalid floating point control. */
  635. return -EINVAL;
  636. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  637. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  638. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  639. /*
  640. * Handle access to the per_info structure.
  641. */
  642. addr -= (addr_t) &dummy32->regs.per_info;
  643. __poke_user_per_compat(child, addr, data);
  644. }
  645. return 0;
  646. }
  647. static int poke_user_compat(struct task_struct *child,
  648. addr_t addr, addr_t data)
  649. {
  650. if (!is_compat_task() || (addr & 3) ||
  651. addr > sizeof(struct compat_user) - 3)
  652. return -EIO;
  653. return __poke_user_compat(child, addr, data);
  654. }
  655. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  656. compat_ulong_t caddr, compat_ulong_t cdata)
  657. {
  658. unsigned long addr = caddr;
  659. unsigned long data = cdata;
  660. compat_ptrace_area parea;
  661. int copied, ret;
  662. switch (request) {
  663. case PTRACE_PEEKUSR:
  664. /* read the word at location addr in the USER area. */
  665. return peek_user_compat(child, addr, data);
  666. case PTRACE_POKEUSR:
  667. /* write the word at location addr in the USER area */
  668. return poke_user_compat(child, addr, data);
  669. case PTRACE_PEEKUSR_AREA:
  670. case PTRACE_POKEUSR_AREA:
  671. if (copy_from_user(&parea, (void __force __user *) addr,
  672. sizeof(parea)))
  673. return -EFAULT;
  674. addr = parea.kernel_addr;
  675. data = parea.process_addr;
  676. copied = 0;
  677. while (copied < parea.len) {
  678. if (request == PTRACE_PEEKUSR_AREA)
  679. ret = peek_user_compat(child, addr, data);
  680. else {
  681. __u32 utmp;
  682. if (get_user(utmp,
  683. (__u32 __force __user *) data))
  684. return -EFAULT;
  685. ret = poke_user_compat(child, addr, utmp);
  686. }
  687. if (ret)
  688. return ret;
  689. addr += sizeof(unsigned int);
  690. data += sizeof(unsigned int);
  691. copied += sizeof(unsigned int);
  692. }
  693. return 0;
  694. case PTRACE_GET_LAST_BREAK:
  695. put_user(task_thread_info(child)->last_break,
  696. (unsigned int __user *) data);
  697. return 0;
  698. }
  699. return compat_ptrace_request(child, request, addr, data);
  700. }
  701. #endif
  702. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  703. {
  704. long ret = 0;
  705. /* Do the secure computing check first. */
  706. if (secure_computing(regs->gprs[2])) {
  707. /* seccomp failures shouldn't expose any additional code. */
  708. ret = -1;
  709. goto out;
  710. }
  711. /*
  712. * The sysc_tracesys code in entry.S stored the system
  713. * call number to gprs[2].
  714. */
  715. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  716. (tracehook_report_syscall_entry(regs) ||
  717. regs->gprs[2] >= NR_syscalls)) {
  718. /*
  719. * Tracing decided this syscall should not happen or the
  720. * debugger stored an invalid system call number. Skip
  721. * the system call and the system call restart handling.
  722. */
  723. clear_thread_flag(TIF_SYSCALL);
  724. ret = -1;
  725. }
  726. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  727. trace_sys_enter(regs, regs->gprs[2]);
  728. audit_syscall_entry(is_compat_task() ?
  729. AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
  730. regs->gprs[2], regs->orig_gpr2,
  731. regs->gprs[3], regs->gprs[4],
  732. regs->gprs[5]);
  733. out:
  734. return ret ?: regs->gprs[2];
  735. }
  736. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  737. {
  738. audit_syscall_exit(regs);
  739. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  740. trace_sys_exit(regs, regs->gprs[2]);
  741. if (test_thread_flag(TIF_SYSCALL_TRACE))
  742. tracehook_report_syscall_exit(regs, 0);
  743. }
  744. /*
  745. * user_regset definitions.
  746. */
  747. static int s390_regs_get(struct task_struct *target,
  748. const struct user_regset *regset,
  749. unsigned int pos, unsigned int count,
  750. void *kbuf, void __user *ubuf)
  751. {
  752. if (target == current)
  753. save_access_regs(target->thread.acrs);
  754. if (kbuf) {
  755. unsigned long *k = kbuf;
  756. while (count > 0) {
  757. *k++ = __peek_user(target, pos);
  758. count -= sizeof(*k);
  759. pos += sizeof(*k);
  760. }
  761. } else {
  762. unsigned long __user *u = ubuf;
  763. while (count > 0) {
  764. if (__put_user(__peek_user(target, pos), u++))
  765. return -EFAULT;
  766. count -= sizeof(*u);
  767. pos += sizeof(*u);
  768. }
  769. }
  770. return 0;
  771. }
  772. static int s390_regs_set(struct task_struct *target,
  773. const struct user_regset *regset,
  774. unsigned int pos, unsigned int count,
  775. const void *kbuf, const void __user *ubuf)
  776. {
  777. int rc = 0;
  778. if (target == current)
  779. save_access_regs(target->thread.acrs);
  780. if (kbuf) {
  781. const unsigned long *k = kbuf;
  782. while (count > 0 && !rc) {
  783. rc = __poke_user(target, pos, *k++);
  784. count -= sizeof(*k);
  785. pos += sizeof(*k);
  786. }
  787. } else {
  788. const unsigned long __user *u = ubuf;
  789. while (count > 0 && !rc) {
  790. unsigned long word;
  791. rc = __get_user(word, u++);
  792. if (rc)
  793. break;
  794. rc = __poke_user(target, pos, word);
  795. count -= sizeof(*u);
  796. pos += sizeof(*u);
  797. }
  798. }
  799. if (rc == 0 && target == current)
  800. restore_access_regs(target->thread.acrs);
  801. return rc;
  802. }
  803. static int s390_fpregs_get(struct task_struct *target,
  804. const struct user_regset *regset, unsigned int pos,
  805. unsigned int count, void *kbuf, void __user *ubuf)
  806. {
  807. if (target == current)
  808. save_fp_regs(&target->thread.fp_regs);
  809. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  810. &target->thread.fp_regs, 0, -1);
  811. }
  812. static int s390_fpregs_set(struct task_struct *target,
  813. const struct user_regset *regset, unsigned int pos,
  814. unsigned int count, const void *kbuf,
  815. const void __user *ubuf)
  816. {
  817. int rc = 0;
  818. if (target == current)
  819. save_fp_regs(&target->thread.fp_regs);
  820. /* If setting FPC, must validate it first. */
  821. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  822. u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
  823. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
  824. 0, offsetof(s390_fp_regs, fprs));
  825. if (rc)
  826. return rc;
  827. if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
  828. return -EINVAL;
  829. target->thread.fp_regs.fpc = fpc[0];
  830. }
  831. if (rc == 0 && count > 0)
  832. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  833. target->thread.fp_regs.fprs,
  834. offsetof(s390_fp_regs, fprs), -1);
  835. if (rc == 0 && target == current)
  836. restore_fp_regs(&target->thread.fp_regs);
  837. return rc;
  838. }
  839. #ifdef CONFIG_64BIT
  840. static int s390_last_break_get(struct task_struct *target,
  841. const struct user_regset *regset,
  842. unsigned int pos, unsigned int count,
  843. void *kbuf, void __user *ubuf)
  844. {
  845. if (count > 0) {
  846. if (kbuf) {
  847. unsigned long *k = kbuf;
  848. *k = task_thread_info(target)->last_break;
  849. } else {
  850. unsigned long __user *u = ubuf;
  851. if (__put_user(task_thread_info(target)->last_break, u))
  852. return -EFAULT;
  853. }
  854. }
  855. return 0;
  856. }
  857. static int s390_last_break_set(struct task_struct *target,
  858. const struct user_regset *regset,
  859. unsigned int pos, unsigned int count,
  860. const void *kbuf, const void __user *ubuf)
  861. {
  862. return 0;
  863. }
  864. static int s390_tdb_get(struct task_struct *target,
  865. const struct user_regset *regset,
  866. unsigned int pos, unsigned int count,
  867. void *kbuf, void __user *ubuf)
  868. {
  869. struct pt_regs *regs = task_pt_regs(target);
  870. unsigned char *data;
  871. if (!(regs->int_code & 0x200))
  872. return -ENODATA;
  873. data = target->thread.trap_tdb;
  874. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  875. }
  876. static int s390_tdb_set(struct task_struct *target,
  877. const struct user_regset *regset,
  878. unsigned int pos, unsigned int count,
  879. const void *kbuf, const void __user *ubuf)
  880. {
  881. return 0;
  882. }
  883. #endif
  884. static int s390_system_call_get(struct task_struct *target,
  885. const struct user_regset *regset,
  886. unsigned int pos, unsigned int count,
  887. void *kbuf, void __user *ubuf)
  888. {
  889. unsigned int *data = &task_thread_info(target)->system_call;
  890. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  891. data, 0, sizeof(unsigned int));
  892. }
  893. static int s390_system_call_set(struct task_struct *target,
  894. const struct user_regset *regset,
  895. unsigned int pos, unsigned int count,
  896. const void *kbuf, const void __user *ubuf)
  897. {
  898. unsigned int *data = &task_thread_info(target)->system_call;
  899. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  900. data, 0, sizeof(unsigned int));
  901. }
  902. static const struct user_regset s390_regsets[] = {
  903. [REGSET_GENERAL] = {
  904. .core_note_type = NT_PRSTATUS,
  905. .n = sizeof(s390_regs) / sizeof(long),
  906. .size = sizeof(long),
  907. .align = sizeof(long),
  908. .get = s390_regs_get,
  909. .set = s390_regs_set,
  910. },
  911. [REGSET_FP] = {
  912. .core_note_type = NT_PRFPREG,
  913. .n = sizeof(s390_fp_regs) / sizeof(long),
  914. .size = sizeof(long),
  915. .align = sizeof(long),
  916. .get = s390_fpregs_get,
  917. .set = s390_fpregs_set,
  918. },
  919. #ifdef CONFIG_64BIT
  920. [REGSET_LAST_BREAK] = {
  921. .core_note_type = NT_S390_LAST_BREAK,
  922. .n = 1,
  923. .size = sizeof(long),
  924. .align = sizeof(long),
  925. .get = s390_last_break_get,
  926. .set = s390_last_break_set,
  927. },
  928. [REGSET_TDB] = {
  929. .core_note_type = NT_S390_TDB,
  930. .n = 1,
  931. .size = 256,
  932. .align = 1,
  933. .get = s390_tdb_get,
  934. .set = s390_tdb_set,
  935. },
  936. #endif
  937. [REGSET_SYSTEM_CALL] = {
  938. .core_note_type = NT_S390_SYSTEM_CALL,
  939. .n = 1,
  940. .size = sizeof(unsigned int),
  941. .align = sizeof(unsigned int),
  942. .get = s390_system_call_get,
  943. .set = s390_system_call_set,
  944. },
  945. };
  946. static const struct user_regset_view user_s390_view = {
  947. .name = UTS_MACHINE,
  948. .e_machine = EM_S390,
  949. .regsets = s390_regsets,
  950. .n = ARRAY_SIZE(s390_regsets)
  951. };
  952. #ifdef CONFIG_COMPAT
  953. static int s390_compat_regs_get(struct task_struct *target,
  954. const struct user_regset *regset,
  955. unsigned int pos, unsigned int count,
  956. void *kbuf, void __user *ubuf)
  957. {
  958. if (target == current)
  959. save_access_regs(target->thread.acrs);
  960. if (kbuf) {
  961. compat_ulong_t *k = kbuf;
  962. while (count > 0) {
  963. *k++ = __peek_user_compat(target, pos);
  964. count -= sizeof(*k);
  965. pos += sizeof(*k);
  966. }
  967. } else {
  968. compat_ulong_t __user *u = ubuf;
  969. while (count > 0) {
  970. if (__put_user(__peek_user_compat(target, pos), u++))
  971. return -EFAULT;
  972. count -= sizeof(*u);
  973. pos += sizeof(*u);
  974. }
  975. }
  976. return 0;
  977. }
  978. static int s390_compat_regs_set(struct task_struct *target,
  979. const struct user_regset *regset,
  980. unsigned int pos, unsigned int count,
  981. const void *kbuf, const void __user *ubuf)
  982. {
  983. int rc = 0;
  984. if (target == current)
  985. save_access_regs(target->thread.acrs);
  986. if (kbuf) {
  987. const compat_ulong_t *k = kbuf;
  988. while (count > 0 && !rc) {
  989. rc = __poke_user_compat(target, pos, *k++);
  990. count -= sizeof(*k);
  991. pos += sizeof(*k);
  992. }
  993. } else {
  994. const compat_ulong_t __user *u = ubuf;
  995. while (count > 0 && !rc) {
  996. compat_ulong_t word;
  997. rc = __get_user(word, u++);
  998. if (rc)
  999. break;
  1000. rc = __poke_user_compat(target, pos, word);
  1001. count -= sizeof(*u);
  1002. pos += sizeof(*u);
  1003. }
  1004. }
  1005. if (rc == 0 && target == current)
  1006. restore_access_regs(target->thread.acrs);
  1007. return rc;
  1008. }
  1009. static int s390_compat_regs_high_get(struct task_struct *target,
  1010. const struct user_regset *regset,
  1011. unsigned int pos, unsigned int count,
  1012. void *kbuf, void __user *ubuf)
  1013. {
  1014. compat_ulong_t *gprs_high;
  1015. gprs_high = (compat_ulong_t *)
  1016. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1017. if (kbuf) {
  1018. compat_ulong_t *k = kbuf;
  1019. while (count > 0) {
  1020. *k++ = *gprs_high;
  1021. gprs_high += 2;
  1022. count -= sizeof(*k);
  1023. }
  1024. } else {
  1025. compat_ulong_t __user *u = ubuf;
  1026. while (count > 0) {
  1027. if (__put_user(*gprs_high, u++))
  1028. return -EFAULT;
  1029. gprs_high += 2;
  1030. count -= sizeof(*u);
  1031. }
  1032. }
  1033. return 0;
  1034. }
  1035. static int s390_compat_regs_high_set(struct task_struct *target,
  1036. const struct user_regset *regset,
  1037. unsigned int pos, unsigned int count,
  1038. const void *kbuf, const void __user *ubuf)
  1039. {
  1040. compat_ulong_t *gprs_high;
  1041. int rc = 0;
  1042. gprs_high = (compat_ulong_t *)
  1043. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1044. if (kbuf) {
  1045. const compat_ulong_t *k = kbuf;
  1046. while (count > 0) {
  1047. *gprs_high = *k++;
  1048. *gprs_high += 2;
  1049. count -= sizeof(*k);
  1050. }
  1051. } else {
  1052. const compat_ulong_t __user *u = ubuf;
  1053. while (count > 0 && !rc) {
  1054. unsigned long word;
  1055. rc = __get_user(word, u++);
  1056. if (rc)
  1057. break;
  1058. *gprs_high = word;
  1059. *gprs_high += 2;
  1060. count -= sizeof(*u);
  1061. }
  1062. }
  1063. return rc;
  1064. }
  1065. static int s390_compat_last_break_get(struct task_struct *target,
  1066. const struct user_regset *regset,
  1067. unsigned int pos, unsigned int count,
  1068. void *kbuf, void __user *ubuf)
  1069. {
  1070. compat_ulong_t last_break;
  1071. if (count > 0) {
  1072. last_break = task_thread_info(target)->last_break;
  1073. if (kbuf) {
  1074. unsigned long *k = kbuf;
  1075. *k = last_break;
  1076. } else {
  1077. unsigned long __user *u = ubuf;
  1078. if (__put_user(last_break, u))
  1079. return -EFAULT;
  1080. }
  1081. }
  1082. return 0;
  1083. }
  1084. static int s390_compat_last_break_set(struct task_struct *target,
  1085. const struct user_regset *regset,
  1086. unsigned int pos, unsigned int count,
  1087. const void *kbuf, const void __user *ubuf)
  1088. {
  1089. return 0;
  1090. }
  1091. static const struct user_regset s390_compat_regsets[] = {
  1092. [REGSET_GENERAL] = {
  1093. .core_note_type = NT_PRSTATUS,
  1094. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1095. .size = sizeof(compat_long_t),
  1096. .align = sizeof(compat_long_t),
  1097. .get = s390_compat_regs_get,
  1098. .set = s390_compat_regs_set,
  1099. },
  1100. [REGSET_FP] = {
  1101. .core_note_type = NT_PRFPREG,
  1102. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1103. .size = sizeof(compat_long_t),
  1104. .align = sizeof(compat_long_t),
  1105. .get = s390_fpregs_get,
  1106. .set = s390_fpregs_set,
  1107. },
  1108. [REGSET_LAST_BREAK] = {
  1109. .core_note_type = NT_S390_LAST_BREAK,
  1110. .n = 1,
  1111. .size = sizeof(long),
  1112. .align = sizeof(long),
  1113. .get = s390_compat_last_break_get,
  1114. .set = s390_compat_last_break_set,
  1115. },
  1116. [REGSET_TDB] = {
  1117. .core_note_type = NT_S390_TDB,
  1118. .n = 1,
  1119. .size = 256,
  1120. .align = 1,
  1121. .get = s390_tdb_get,
  1122. .set = s390_tdb_set,
  1123. },
  1124. [REGSET_SYSTEM_CALL] = {
  1125. .core_note_type = NT_S390_SYSTEM_CALL,
  1126. .n = 1,
  1127. .size = sizeof(compat_uint_t),
  1128. .align = sizeof(compat_uint_t),
  1129. .get = s390_system_call_get,
  1130. .set = s390_system_call_set,
  1131. },
  1132. [REGSET_GENERAL_EXTENDED] = {
  1133. .core_note_type = NT_S390_HIGH_GPRS,
  1134. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1135. .size = sizeof(compat_long_t),
  1136. .align = sizeof(compat_long_t),
  1137. .get = s390_compat_regs_high_get,
  1138. .set = s390_compat_regs_high_set,
  1139. },
  1140. };
  1141. static const struct user_regset_view user_s390_compat_view = {
  1142. .name = "s390",
  1143. .e_machine = EM_S390,
  1144. .regsets = s390_compat_regsets,
  1145. .n = ARRAY_SIZE(s390_compat_regsets)
  1146. };
  1147. #endif
  1148. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1149. {
  1150. #ifdef CONFIG_COMPAT
  1151. if (test_tsk_thread_flag(task, TIF_31BIT))
  1152. return &user_s390_compat_view;
  1153. #endif
  1154. return &user_s390_view;
  1155. }
  1156. static const char *gpr_names[NUM_GPRS] = {
  1157. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1158. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1159. };
  1160. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1161. {
  1162. if (offset >= NUM_GPRS)
  1163. return 0;
  1164. return regs->gprs[offset];
  1165. }
  1166. int regs_query_register_offset(const char *name)
  1167. {
  1168. unsigned long offset;
  1169. if (!name || *name != 'r')
  1170. return -EINVAL;
  1171. if (strict_strtoul(name + 1, 10, &offset))
  1172. return -EINVAL;
  1173. if (offset >= NUM_GPRS)
  1174. return -EINVAL;
  1175. return offset;
  1176. }
  1177. const char *regs_query_register_name(unsigned int offset)
  1178. {
  1179. if (offset >= NUM_GPRS)
  1180. return NULL;
  1181. return gpr_names[offset];
  1182. }
  1183. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1184. {
  1185. unsigned long ksp = kernel_stack_pointer(regs);
  1186. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1187. }
  1188. /**
  1189. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1190. * @regs:pt_regs which contains kernel stack pointer.
  1191. * @n:stack entry number.
  1192. *
  1193. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1194. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1195. * this returns 0.
  1196. */
  1197. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1198. {
  1199. unsigned long addr;
  1200. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1201. if (!regs_within_kernel_stack(regs, addr))
  1202. return 0;
  1203. return *(unsigned long *)addr;
  1204. }