compat.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * linux/kernel/compat.c
  3. *
  4. * Kernel compatibililty routines for e.g. 32 bit syscall support
  5. * on 64 bit kernels.
  6. *
  7. * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/linkage.h>
  14. #include <linux/compat.h>
  15. #include <linux/errno.h>
  16. #include <linux/time.h>
  17. #include <linux/signal.h>
  18. #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
  19. #include <linux/futex.h> /* for FUTEX_WAIT */
  20. #include <linux/syscalls.h>
  21. #include <linux/unistd.h>
  22. #include <linux/security.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/bug.h>
  25. int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
  26. {
  27. return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
  28. __get_user(ts->tv_sec, &cts->tv_sec) ||
  29. __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  30. }
  31. int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
  32. {
  33. return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
  34. __put_user(ts->tv_sec, &cts->tv_sec) ||
  35. __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  36. }
  37. static long compat_nanosleep_restart(struct restart_block *restart)
  38. {
  39. unsigned long expire = restart->arg0, now = jiffies;
  40. struct compat_timespec __user *rmtp;
  41. /* Did it expire while we handled signals? */
  42. if (!time_after(expire, now))
  43. return 0;
  44. current->state = TASK_INTERRUPTIBLE;
  45. expire = schedule_timeout(expire - now);
  46. if (expire == 0)
  47. return 0;
  48. rmtp = (struct compat_timespec __user *)restart->arg1;
  49. if (rmtp) {
  50. struct compat_timespec ct;
  51. struct timespec t;
  52. jiffies_to_timespec(expire, &t);
  53. ct.tv_sec = t.tv_sec;
  54. ct.tv_nsec = t.tv_nsec;
  55. if (copy_to_user(rmtp, &ct, sizeof(ct)))
  56. return -EFAULT;
  57. }
  58. /* The 'restart' block is already filled in */
  59. return -ERESTART_RESTARTBLOCK;
  60. }
  61. asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
  62. struct compat_timespec __user *rmtp)
  63. {
  64. struct timespec t;
  65. struct restart_block *restart;
  66. unsigned long expire;
  67. if (get_compat_timespec(&t, rqtp))
  68. return -EFAULT;
  69. if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
  70. return -EINVAL;
  71. expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
  72. current->state = TASK_INTERRUPTIBLE;
  73. expire = schedule_timeout(expire);
  74. if (expire == 0)
  75. return 0;
  76. if (rmtp) {
  77. jiffies_to_timespec(expire, &t);
  78. if (put_compat_timespec(&t, rmtp))
  79. return -EFAULT;
  80. }
  81. restart = &current_thread_info()->restart_block;
  82. restart->fn = compat_nanosleep_restart;
  83. restart->arg0 = jiffies + expire;
  84. restart->arg1 = (unsigned long) rmtp;
  85. return -ERESTART_RESTARTBLOCK;
  86. }
  87. static inline long get_compat_itimerval(struct itimerval *o,
  88. struct compat_itimerval __user *i)
  89. {
  90. return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
  91. (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
  92. __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
  93. __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
  94. __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
  95. }
  96. static inline long put_compat_itimerval(struct compat_itimerval __user *o,
  97. struct itimerval *i)
  98. {
  99. return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
  100. (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
  101. __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
  102. __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
  103. __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
  104. }
  105. asmlinkage long compat_sys_getitimer(int which,
  106. struct compat_itimerval __user *it)
  107. {
  108. struct itimerval kit;
  109. int error;
  110. error = do_getitimer(which, &kit);
  111. if (!error && put_compat_itimerval(it, &kit))
  112. error = -EFAULT;
  113. return error;
  114. }
  115. asmlinkage long compat_sys_setitimer(int which,
  116. struct compat_itimerval __user *in,
  117. struct compat_itimerval __user *out)
  118. {
  119. struct itimerval kin, kout;
  120. int error;
  121. if (in) {
  122. if (get_compat_itimerval(&kin, in))
  123. return -EFAULT;
  124. } else
  125. memset(&kin, 0, sizeof(kin));
  126. error = do_setitimer(which, &kin, out ? &kout : NULL);
  127. if (error || !out)
  128. return error;
  129. if (put_compat_itimerval(out, &kout))
  130. return -EFAULT;
  131. return 0;
  132. }
  133. asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
  134. {
  135. /*
  136. * In the SMP world we might just be unlucky and have one of
  137. * the times increment as we use it. Since the value is an
  138. * atomically safe type this is just fine. Conceptually its
  139. * as if the syscall took an instant longer to occur.
  140. */
  141. if (tbuf) {
  142. struct compat_tms tmp;
  143. struct task_struct *tsk = current;
  144. struct task_struct *t;
  145. cputime_t utime, stime, cutime, cstime;
  146. read_lock(&tasklist_lock);
  147. utime = tsk->signal->utime;
  148. stime = tsk->signal->stime;
  149. t = tsk;
  150. do {
  151. utime = cputime_add(utime, t->utime);
  152. stime = cputime_add(stime, t->stime);
  153. t = next_thread(t);
  154. } while (t != tsk);
  155. /*
  156. * While we have tasklist_lock read-locked, no dying thread
  157. * can be updating current->signal->[us]time. Instead,
  158. * we got their counts included in the live thread loop.
  159. * However, another thread can come in right now and
  160. * do a wait call that updates current->signal->c[us]time.
  161. * To make sure we always see that pair updated atomically,
  162. * we take the siglock around fetching them.
  163. */
  164. spin_lock_irq(&tsk->sighand->siglock);
  165. cutime = tsk->signal->cutime;
  166. cstime = tsk->signal->cstime;
  167. spin_unlock_irq(&tsk->sighand->siglock);
  168. read_unlock(&tasklist_lock);
  169. tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
  170. tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
  171. tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
  172. tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
  173. if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
  174. return -EFAULT;
  175. }
  176. return compat_jiffies_to_clock_t(jiffies);
  177. }
  178. /*
  179. * Assumption: old_sigset_t and compat_old_sigset_t are both
  180. * types that can be passed to put_user()/get_user().
  181. */
  182. asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
  183. {
  184. old_sigset_t s;
  185. long ret;
  186. mm_segment_t old_fs = get_fs();
  187. set_fs(KERNEL_DS);
  188. ret = sys_sigpending((old_sigset_t __user *) &s);
  189. set_fs(old_fs);
  190. if (ret == 0)
  191. ret = put_user(s, set);
  192. return ret;
  193. }
  194. asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
  195. compat_old_sigset_t __user *oset)
  196. {
  197. old_sigset_t s;
  198. long ret;
  199. mm_segment_t old_fs;
  200. if (set && get_user(s, set))
  201. return -EFAULT;
  202. old_fs = get_fs();
  203. set_fs(KERNEL_DS);
  204. ret = sys_sigprocmask(how,
  205. set ? (old_sigset_t __user *) &s : NULL,
  206. oset ? (old_sigset_t __user *) &s : NULL);
  207. set_fs(old_fs);
  208. if (ret == 0)
  209. if (oset)
  210. ret = put_user(s, oset);
  211. return ret;
  212. }
  213. #ifdef CONFIG_FUTEX
  214. asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
  215. struct compat_timespec __user *utime, u32 __user *uaddr2,
  216. int val3)
  217. {
  218. struct timespec t;
  219. unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
  220. int val2 = 0;
  221. if ((op == FUTEX_WAIT) && utime) {
  222. if (get_compat_timespec(&t, utime))
  223. return -EFAULT;
  224. timeout = timespec_to_jiffies(&t) + 1;
  225. }
  226. if (op >= FUTEX_REQUEUE)
  227. val2 = (int) (unsigned long) utime;
  228. return do_futex((unsigned long)uaddr, op, val, timeout,
  229. (unsigned long)uaddr2, val2, val3);
  230. }
  231. #endif
  232. asmlinkage long compat_sys_setrlimit(unsigned int resource,
  233. struct compat_rlimit __user *rlim)
  234. {
  235. struct rlimit r;
  236. int ret;
  237. mm_segment_t old_fs = get_fs ();
  238. if (resource >= RLIM_NLIMITS)
  239. return -EINVAL;
  240. if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
  241. __get_user(r.rlim_cur, &rlim->rlim_cur) ||
  242. __get_user(r.rlim_max, &rlim->rlim_max))
  243. return -EFAULT;
  244. if (r.rlim_cur == COMPAT_RLIM_INFINITY)
  245. r.rlim_cur = RLIM_INFINITY;
  246. if (r.rlim_max == COMPAT_RLIM_INFINITY)
  247. r.rlim_max = RLIM_INFINITY;
  248. set_fs(KERNEL_DS);
  249. ret = sys_setrlimit(resource, (struct rlimit __user *) &r);
  250. set_fs(old_fs);
  251. return ret;
  252. }
  253. #ifdef COMPAT_RLIM_OLD_INFINITY
  254. asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
  255. struct compat_rlimit __user *rlim)
  256. {
  257. struct rlimit r;
  258. int ret;
  259. mm_segment_t old_fs = get_fs();
  260. set_fs(KERNEL_DS);
  261. ret = sys_old_getrlimit(resource, &r);
  262. set_fs(old_fs);
  263. if (!ret) {
  264. if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY)
  265. r.rlim_cur = COMPAT_RLIM_INFINITY;
  266. if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY)
  267. r.rlim_max = COMPAT_RLIM_INFINITY;
  268. if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
  269. __put_user(r.rlim_cur, &rlim->rlim_cur) ||
  270. __put_user(r.rlim_max, &rlim->rlim_max))
  271. return -EFAULT;
  272. }
  273. return ret;
  274. }
  275. #endif
  276. asmlinkage long compat_sys_getrlimit (unsigned int resource,
  277. struct compat_rlimit __user *rlim)
  278. {
  279. struct rlimit r;
  280. int ret;
  281. mm_segment_t old_fs = get_fs();
  282. set_fs(KERNEL_DS);
  283. ret = sys_getrlimit(resource, (struct rlimit __user *) &r);
  284. set_fs(old_fs);
  285. if (!ret) {
  286. if (r.rlim_cur > COMPAT_RLIM_INFINITY)
  287. r.rlim_cur = COMPAT_RLIM_INFINITY;
  288. if (r.rlim_max > COMPAT_RLIM_INFINITY)
  289. r.rlim_max = COMPAT_RLIM_INFINITY;
  290. if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
  291. __put_user(r.rlim_cur, &rlim->rlim_cur) ||
  292. __put_user(r.rlim_max, &rlim->rlim_max))
  293. return -EFAULT;
  294. }
  295. return ret;
  296. }
  297. int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
  298. {
  299. if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) ||
  300. __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) ||
  301. __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) ||
  302. __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) ||
  303. __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) ||
  304. __put_user(r->ru_maxrss, &ru->ru_maxrss) ||
  305. __put_user(r->ru_ixrss, &ru->ru_ixrss) ||
  306. __put_user(r->ru_idrss, &ru->ru_idrss) ||
  307. __put_user(r->ru_isrss, &ru->ru_isrss) ||
  308. __put_user(r->ru_minflt, &ru->ru_minflt) ||
  309. __put_user(r->ru_majflt, &ru->ru_majflt) ||
  310. __put_user(r->ru_nswap, &ru->ru_nswap) ||
  311. __put_user(r->ru_inblock, &ru->ru_inblock) ||
  312. __put_user(r->ru_oublock, &ru->ru_oublock) ||
  313. __put_user(r->ru_msgsnd, &ru->ru_msgsnd) ||
  314. __put_user(r->ru_msgrcv, &ru->ru_msgrcv) ||
  315. __put_user(r->ru_nsignals, &ru->ru_nsignals) ||
  316. __put_user(r->ru_nvcsw, &ru->ru_nvcsw) ||
  317. __put_user(r->ru_nivcsw, &ru->ru_nivcsw))
  318. return -EFAULT;
  319. return 0;
  320. }
  321. asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
  322. {
  323. struct rusage r;
  324. int ret;
  325. mm_segment_t old_fs = get_fs();
  326. set_fs(KERNEL_DS);
  327. ret = sys_getrusage(who, (struct rusage __user *) &r);
  328. set_fs(old_fs);
  329. if (ret)
  330. return ret;
  331. if (put_compat_rusage(&r, ru))
  332. return -EFAULT;
  333. return 0;
  334. }
  335. asmlinkage long
  336. compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
  337. struct compat_rusage __user *ru)
  338. {
  339. if (!ru) {
  340. return sys_wait4(pid, stat_addr, options, NULL);
  341. } else {
  342. struct rusage r;
  343. int ret;
  344. unsigned int status;
  345. mm_segment_t old_fs = get_fs();
  346. set_fs (KERNEL_DS);
  347. ret = sys_wait4(pid,
  348. (stat_addr ?
  349. (unsigned int __user *) &status : NULL),
  350. options, (struct rusage __user *) &r);
  351. set_fs (old_fs);
  352. if (ret > 0) {
  353. if (put_compat_rusage(&r, ru))
  354. return -EFAULT;
  355. if (stat_addr && put_user(status, stat_addr))
  356. return -EFAULT;
  357. }
  358. return ret;
  359. }
  360. }
  361. asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
  362. struct compat_siginfo __user *uinfo, int options,
  363. struct compat_rusage __user *uru)
  364. {
  365. siginfo_t info;
  366. struct rusage ru;
  367. long ret;
  368. mm_segment_t old_fs = get_fs();
  369. memset(&info, 0, sizeof(info));
  370. set_fs(KERNEL_DS);
  371. ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
  372. uru ? (struct rusage __user *)&ru : NULL);
  373. set_fs(old_fs);
  374. if ((ret < 0) || (info.si_signo == 0))
  375. return ret;
  376. if (uru) {
  377. ret = put_compat_rusage(&ru, uru);
  378. if (ret)
  379. return ret;
  380. }
  381. BUG_ON(info.si_code & __SI_MASK);
  382. info.si_code |= __SI_CHLD;
  383. return copy_siginfo_to_user32(uinfo, &info);
  384. }
  385. static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
  386. unsigned len, cpumask_t *new_mask)
  387. {
  388. unsigned long *k;
  389. if (len < sizeof(cpumask_t))
  390. memset(new_mask, 0, sizeof(cpumask_t));
  391. else if (len > sizeof(cpumask_t))
  392. len = sizeof(cpumask_t);
  393. k = cpus_addr(*new_mask);
  394. return compat_get_bitmap(k, user_mask_ptr, len * 8);
  395. }
  396. asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
  397. unsigned int len,
  398. compat_ulong_t __user *user_mask_ptr)
  399. {
  400. cpumask_t new_mask;
  401. int retval;
  402. retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask);
  403. if (retval)
  404. return retval;
  405. return sched_setaffinity(pid, new_mask);
  406. }
  407. asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
  408. compat_ulong_t __user *user_mask_ptr)
  409. {
  410. int ret;
  411. cpumask_t mask;
  412. unsigned long *k;
  413. unsigned int min_length = sizeof(cpumask_t);
  414. if (NR_CPUS <= BITS_PER_COMPAT_LONG)
  415. min_length = sizeof(compat_ulong_t);
  416. if (len < min_length)
  417. return -EINVAL;
  418. ret = sched_getaffinity(pid, &mask);
  419. if (ret < 0)
  420. return ret;
  421. k = cpus_addr(mask);
  422. ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
  423. if (ret)
  424. return ret;
  425. return min_length;
  426. }
  427. static int get_compat_itimerspec(struct itimerspec *dst,
  428. struct compat_itimerspec __user *src)
  429. {
  430. if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
  431. get_compat_timespec(&dst->it_value, &src->it_value))
  432. return -EFAULT;
  433. return 0;
  434. }
  435. static int put_compat_itimerspec(struct compat_itimerspec __user *dst,
  436. struct itimerspec *src)
  437. {
  438. if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
  439. put_compat_timespec(&src->it_value, &dst->it_value))
  440. return -EFAULT;
  441. return 0;
  442. }
  443. long compat_sys_timer_settime(timer_t timer_id, int flags,
  444. struct compat_itimerspec __user *new,
  445. struct compat_itimerspec __user *old)
  446. {
  447. long err;
  448. mm_segment_t oldfs;
  449. struct itimerspec newts, oldts;
  450. if (!new)
  451. return -EINVAL;
  452. if (get_compat_itimerspec(&newts, new))
  453. return -EFAULT;
  454. oldfs = get_fs();
  455. set_fs(KERNEL_DS);
  456. err = sys_timer_settime(timer_id, flags,
  457. (struct itimerspec __user *) &newts,
  458. (struct itimerspec __user *) &oldts);
  459. set_fs(oldfs);
  460. if (!err && old && put_compat_itimerspec(old, &oldts))
  461. return -EFAULT;
  462. return err;
  463. }
  464. long compat_sys_timer_gettime(timer_t timer_id,
  465. struct compat_itimerspec __user *setting)
  466. {
  467. long err;
  468. mm_segment_t oldfs;
  469. struct itimerspec ts;
  470. oldfs = get_fs();
  471. set_fs(KERNEL_DS);
  472. err = sys_timer_gettime(timer_id,
  473. (struct itimerspec __user *) &ts);
  474. set_fs(oldfs);
  475. if (!err && put_compat_itimerspec(setting, &ts))
  476. return -EFAULT;
  477. return err;
  478. }
  479. long compat_sys_clock_settime(clockid_t which_clock,
  480. struct compat_timespec __user *tp)
  481. {
  482. long err;
  483. mm_segment_t oldfs;
  484. struct timespec ts;
  485. if (get_compat_timespec(&ts, tp))
  486. return -EFAULT;
  487. oldfs = get_fs();
  488. set_fs(KERNEL_DS);
  489. err = sys_clock_settime(which_clock,
  490. (struct timespec __user *) &ts);
  491. set_fs(oldfs);
  492. return err;
  493. }
  494. long compat_sys_clock_gettime(clockid_t which_clock,
  495. struct compat_timespec __user *tp)
  496. {
  497. long err;
  498. mm_segment_t oldfs;
  499. struct timespec ts;
  500. oldfs = get_fs();
  501. set_fs(KERNEL_DS);
  502. err = sys_clock_gettime(which_clock,
  503. (struct timespec __user *) &ts);
  504. set_fs(oldfs);
  505. if (!err && put_compat_timespec(&ts, tp))
  506. return -EFAULT;
  507. return err;
  508. }
  509. long compat_sys_clock_getres(clockid_t which_clock,
  510. struct compat_timespec __user *tp)
  511. {
  512. long err;
  513. mm_segment_t oldfs;
  514. struct timespec ts;
  515. oldfs = get_fs();
  516. set_fs(KERNEL_DS);
  517. err = sys_clock_getres(which_clock,
  518. (struct timespec __user *) &ts);
  519. set_fs(oldfs);
  520. if (!err && tp && put_compat_timespec(&ts, tp))
  521. return -EFAULT;
  522. return err;
  523. }
  524. long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
  525. struct compat_timespec __user *rqtp,
  526. struct compat_timespec __user *rmtp)
  527. {
  528. long err;
  529. mm_segment_t oldfs;
  530. struct timespec in, out;
  531. if (get_compat_timespec(&in, rqtp))
  532. return -EFAULT;
  533. oldfs = get_fs();
  534. set_fs(KERNEL_DS);
  535. err = sys_clock_nanosleep(which_clock, flags,
  536. (struct timespec __user *) &in,
  537. (struct timespec __user *) &out);
  538. set_fs(oldfs);
  539. if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
  540. put_compat_timespec(&out, rmtp))
  541. return -EFAULT;
  542. return err;
  543. }
  544. /*
  545. * We currently only need the following fields from the sigevent
  546. * structure: sigev_value, sigev_signo, sig_notify and (sometimes
  547. * sigev_notify_thread_id). The others are handled in user mode.
  548. * We also assume that copying sigev_value.sival_int is sufficient
  549. * to keep all the bits of sigev_value.sival_ptr intact.
  550. */
  551. int get_compat_sigevent(struct sigevent *event,
  552. const struct compat_sigevent __user *u_event)
  553. {
  554. memset(event, 0, sizeof(*event));
  555. return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) ||
  556. __get_user(event->sigev_value.sival_int,
  557. &u_event->sigev_value.sival_int) ||
  558. __get_user(event->sigev_signo, &u_event->sigev_signo) ||
  559. __get_user(event->sigev_notify, &u_event->sigev_notify) ||
  560. __get_user(event->sigev_notify_thread_id,
  561. &u_event->sigev_notify_thread_id))
  562. ? -EFAULT : 0;
  563. }
  564. /* timer_create is architecture specific because it needs sigevent conversion */
  565. long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask,
  566. unsigned long bitmap_size)
  567. {
  568. int i, j;
  569. unsigned long m;
  570. compat_ulong_t um;
  571. unsigned long nr_compat_longs;
  572. /* align bitmap up to nearest compat_long_t boundary */
  573. bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
  574. if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
  575. return -EFAULT;
  576. nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
  577. for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
  578. m = 0;
  579. for (j = 0; j < sizeof(m)/sizeof(um); j++) {
  580. /*
  581. * We dont want to read past the end of the userspace
  582. * bitmap. We must however ensure the end of the
  583. * kernel bitmap is zeroed.
  584. */
  585. if (nr_compat_longs-- > 0) {
  586. if (__get_user(um, umask))
  587. return -EFAULT;
  588. } else {
  589. um = 0;
  590. }
  591. umask++;
  592. m |= (long)um << (j * BITS_PER_COMPAT_LONG);
  593. }
  594. *mask++ = m;
  595. }
  596. return 0;
  597. }
  598. long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
  599. unsigned long bitmap_size)
  600. {
  601. int i, j;
  602. unsigned long m;
  603. compat_ulong_t um;
  604. unsigned long nr_compat_longs;
  605. /* align bitmap up to nearest compat_long_t boundary */
  606. bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
  607. if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
  608. return -EFAULT;
  609. nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
  610. for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
  611. m = *mask++;
  612. for (j = 0; j < sizeof(m)/sizeof(um); j++) {
  613. um = m;
  614. /*
  615. * We dont want to write past the end of the userspace
  616. * bitmap.
  617. */
  618. if (nr_compat_longs-- > 0) {
  619. if (__put_user(um, umask))
  620. return -EFAULT;
  621. }
  622. umask++;
  623. m >>= 4*sizeof(um);
  624. m >>= 4*sizeof(um);
  625. }
  626. }
  627. return 0;
  628. }
  629. void
  630. sigset_from_compat (sigset_t *set, compat_sigset_t *compat)
  631. {
  632. switch (_NSIG_WORDS) {
  633. #if defined (__COMPAT_ENDIAN_SWAP__)
  634. case 4: set->sig[3] = compat->sig[7] | (((long)compat->sig[6]) << 32 );
  635. case 3: set->sig[2] = compat->sig[5] | (((long)compat->sig[4]) << 32 );
  636. case 2: set->sig[1] = compat->sig[3] | (((long)compat->sig[2]) << 32 );
  637. case 1: set->sig[0] = compat->sig[1] | (((long)compat->sig[0]) << 32 );
  638. #else
  639. case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
  640. case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
  641. case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
  642. case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
  643. #endif
  644. }
  645. }
  646. asmlinkage long
  647. compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
  648. struct compat_siginfo __user *uinfo,
  649. struct compat_timespec __user *uts, compat_size_t sigsetsize)
  650. {
  651. compat_sigset_t s32;
  652. sigset_t s;
  653. int sig;
  654. struct timespec t;
  655. siginfo_t info;
  656. long ret, timeout = 0;
  657. if (sigsetsize != sizeof(sigset_t))
  658. return -EINVAL;
  659. if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
  660. return -EFAULT;
  661. sigset_from_compat(&s, &s32);
  662. sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP));
  663. signotset(&s);
  664. if (uts) {
  665. if (get_compat_timespec (&t, uts))
  666. return -EFAULT;
  667. if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0
  668. || t.tv_sec < 0)
  669. return -EINVAL;
  670. }
  671. spin_lock_irq(&current->sighand->siglock);
  672. sig = dequeue_signal(current, &s, &info);
  673. if (!sig) {
  674. timeout = MAX_SCHEDULE_TIMEOUT;
  675. if (uts)
  676. timeout = timespec_to_jiffies(&t)
  677. +(t.tv_sec || t.tv_nsec);
  678. if (timeout) {
  679. current->real_blocked = current->blocked;
  680. sigandsets(&current->blocked, &current->blocked, &s);
  681. recalc_sigpending();
  682. spin_unlock_irq(&current->sighand->siglock);
  683. current->state = TASK_INTERRUPTIBLE;
  684. timeout = schedule_timeout(timeout);
  685. spin_lock_irq(&current->sighand->siglock);
  686. sig = dequeue_signal(current, &s, &info);
  687. current->blocked = current->real_blocked;
  688. siginitset(&current->real_blocked, 0);
  689. recalc_sigpending();
  690. }
  691. }
  692. spin_unlock_irq(&current->sighand->siglock);
  693. if (sig) {
  694. ret = sig;
  695. if (uinfo) {
  696. if (copy_siginfo_to_user32(uinfo, &info))
  697. ret = -EFAULT;
  698. }
  699. }else {
  700. ret = timeout?-EINTR:-EAGAIN;
  701. }
  702. return ret;
  703. }
  704. #ifdef __ARCH_WANT_COMPAT_SYS_TIME
  705. /* compat_time_t is a 32 bit "long" and needs to get converted. */
  706. asmlinkage long compat_sys_time(compat_time_t __user * tloc)
  707. {
  708. compat_time_t i;
  709. struct timeval tv;
  710. do_gettimeofday(&tv);
  711. i = tv.tv_sec;
  712. if (tloc) {
  713. if (put_user(i,tloc))
  714. i = -EFAULT;
  715. }
  716. return i;
  717. }
  718. asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
  719. {
  720. struct timespec tv;
  721. int err;
  722. if (get_user(tv.tv_sec, tptr))
  723. return -EFAULT;
  724. tv.tv_nsec = 0;
  725. err = security_settime(&tv, NULL);
  726. if (err)
  727. return err;
  728. do_settimeofday(&tv);
  729. return 0;
  730. }
  731. #endif /* __ARCH_WANT_COMPAT_SYS_TIME */