compat.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. /*
  2. * linux/kernel/compat.c
  3. *
  4. * Kernel compatibililty routines for e.g. 32 bit syscall support
  5. * on 64 bit kernels.
  6. *
  7. * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/linkage.h>
  14. #include <linux/compat.h>
  15. #include <linux/errno.h>
  16. #include <linux/time.h>
  17. #include <linux/signal.h>
  18. #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
  19. #include <linux/syscalls.h>
  20. #include <linux/unistd.h>
  21. #include <linux/security.h>
  22. #include <linux/timex.h>
  23. #include <linux/migrate.h>
  24. #include <linux/posix-timers.h>
  25. #include <asm/uaccess.h>
  26. int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
  27. {
  28. return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
  29. __get_user(ts->tv_sec, &cts->tv_sec) ||
  30. __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  31. }
  32. int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
  33. {
  34. return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
  35. __put_user(ts->tv_sec, &cts->tv_sec) ||
  36. __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  37. }
  38. static long compat_nanosleep_restart(struct restart_block *restart)
  39. {
  40. struct compat_timespec __user *rmtp;
  41. struct timespec rmt;
  42. mm_segment_t oldfs;
  43. long ret;
  44. rmtp = (struct compat_timespec __user *)(restart->arg1);
  45. restart->arg1 = (unsigned long)&rmt;
  46. oldfs = get_fs();
  47. set_fs(KERNEL_DS);
  48. ret = hrtimer_nanosleep_restart(restart);
  49. set_fs(oldfs);
  50. if (ret) {
  51. restart->arg1 = (unsigned long)rmtp;
  52. if (rmtp && put_compat_timespec(&rmt, rmtp))
  53. return -EFAULT;
  54. }
  55. return ret;
  56. }
  57. asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
  58. struct compat_timespec __user *rmtp)
  59. {
  60. struct timespec tu, rmt;
  61. mm_segment_t oldfs;
  62. long ret;
  63. if (get_compat_timespec(&tu, rqtp))
  64. return -EFAULT;
  65. if (!timespec_valid(&tu))
  66. return -EINVAL;
  67. oldfs = get_fs();
  68. set_fs(KERNEL_DS);
  69. ret = hrtimer_nanosleep(&tu,
  70. rmtp ? (struct timespec __user *)&rmt : NULL,
  71. HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  72. set_fs(oldfs);
  73. if (ret) {
  74. struct restart_block *restart
  75. = &current_thread_info()->restart_block;
  76. restart->fn = compat_nanosleep_restart;
  77. restart->arg1 = (unsigned long)rmtp;
  78. if (rmtp && put_compat_timespec(&rmt, rmtp))
  79. return -EFAULT;
  80. }
  81. return ret;
  82. }
  83. static inline long get_compat_itimerval(struct itimerval *o,
  84. struct compat_itimerval __user *i)
  85. {
  86. return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
  87. (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
  88. __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
  89. __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
  90. __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
  91. }
  92. static inline long put_compat_itimerval(struct compat_itimerval __user *o,
  93. struct itimerval *i)
  94. {
  95. return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
  96. (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
  97. __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
  98. __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
  99. __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
  100. }
  101. asmlinkage long compat_sys_getitimer(int which,
  102. struct compat_itimerval __user *it)
  103. {
  104. struct itimerval kit;
  105. int error;
  106. error = do_getitimer(which, &kit);
  107. if (!error && put_compat_itimerval(it, &kit))
  108. error = -EFAULT;
  109. return error;
  110. }
  111. asmlinkage long compat_sys_setitimer(int which,
  112. struct compat_itimerval __user *in,
  113. struct compat_itimerval __user *out)
  114. {
  115. struct itimerval kin, kout;
  116. int error;
  117. if (in) {
  118. if (get_compat_itimerval(&kin, in))
  119. return -EFAULT;
  120. } else
  121. memset(&kin, 0, sizeof(kin));
  122. error = do_setitimer(which, &kin, out ? &kout : NULL);
  123. if (error || !out)
  124. return error;
  125. if (put_compat_itimerval(out, &kout))
  126. return -EFAULT;
  127. return 0;
  128. }
  129. asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
  130. {
  131. /*
  132. * In the SMP world we might just be unlucky and have one of
  133. * the times increment as we use it. Since the value is an
  134. * atomically safe type this is just fine. Conceptually its
  135. * as if the syscall took an instant longer to occur.
  136. */
  137. if (tbuf) {
  138. struct compat_tms tmp;
  139. struct task_struct *tsk = current;
  140. struct task_struct *t;
  141. cputime_t utime, stime, cutime, cstime;
  142. read_lock(&tasklist_lock);
  143. utime = tsk->signal->utime;
  144. stime = tsk->signal->stime;
  145. t = tsk;
  146. do {
  147. utime = cputime_add(utime, t->utime);
  148. stime = cputime_add(stime, t->stime);
  149. t = next_thread(t);
  150. } while (t != tsk);
  151. /*
  152. * While we have tasklist_lock read-locked, no dying thread
  153. * can be updating current->signal->[us]time. Instead,
  154. * we got their counts included in the live thread loop.
  155. * However, another thread can come in right now and
  156. * do a wait call that updates current->signal->c[us]time.
  157. * To make sure we always see that pair updated atomically,
  158. * we take the siglock around fetching them.
  159. */
  160. spin_lock_irq(&tsk->sighand->siglock);
  161. cutime = tsk->signal->cutime;
  162. cstime = tsk->signal->cstime;
  163. spin_unlock_irq(&tsk->sighand->siglock);
  164. read_unlock(&tasklist_lock);
  165. tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
  166. tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
  167. tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
  168. tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
  169. if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
  170. return -EFAULT;
  171. }
  172. return compat_jiffies_to_clock_t(jiffies);
  173. }
  174. /*
  175. * Assumption: old_sigset_t and compat_old_sigset_t are both
  176. * types that can be passed to put_user()/get_user().
  177. */
  178. asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
  179. {
  180. old_sigset_t s;
  181. long ret;
  182. mm_segment_t old_fs = get_fs();
  183. set_fs(KERNEL_DS);
  184. ret = sys_sigpending((old_sigset_t __user *) &s);
  185. set_fs(old_fs);
  186. if (ret == 0)
  187. ret = put_user(s, set);
  188. return ret;
  189. }
  190. asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
  191. compat_old_sigset_t __user *oset)
  192. {
  193. old_sigset_t s;
  194. long ret;
  195. mm_segment_t old_fs;
  196. if (set && get_user(s, set))
  197. return -EFAULT;
  198. old_fs = get_fs();
  199. set_fs(KERNEL_DS);
  200. ret = sys_sigprocmask(how,
  201. set ? (old_sigset_t __user *) &s : NULL,
  202. oset ? (old_sigset_t __user *) &s : NULL);
  203. set_fs(old_fs);
  204. if (ret == 0)
  205. if (oset)
  206. ret = put_user(s, oset);
  207. return ret;
  208. }
  209. asmlinkage long compat_sys_setrlimit(unsigned int resource,
  210. struct compat_rlimit __user *rlim)
  211. {
  212. struct rlimit r;
  213. int ret;
  214. mm_segment_t old_fs = get_fs ();
  215. if (resource >= RLIM_NLIMITS)
  216. return -EINVAL;
  217. if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
  218. __get_user(r.rlim_cur, &rlim->rlim_cur) ||
  219. __get_user(r.rlim_max, &rlim->rlim_max))
  220. return -EFAULT;
  221. if (r.rlim_cur == COMPAT_RLIM_INFINITY)
  222. r.rlim_cur = RLIM_INFINITY;
  223. if (r.rlim_max == COMPAT_RLIM_INFINITY)
  224. r.rlim_max = RLIM_INFINITY;
  225. set_fs(KERNEL_DS);
  226. ret = sys_setrlimit(resource, (struct rlimit __user *) &r);
  227. set_fs(old_fs);
  228. return ret;
  229. }
  230. #ifdef COMPAT_RLIM_OLD_INFINITY
  231. asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
  232. struct compat_rlimit __user *rlim)
  233. {
  234. struct rlimit r;
  235. int ret;
  236. mm_segment_t old_fs = get_fs();
  237. set_fs(KERNEL_DS);
  238. ret = sys_old_getrlimit(resource, &r);
  239. set_fs(old_fs);
  240. if (!ret) {
  241. if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY)
  242. r.rlim_cur = COMPAT_RLIM_INFINITY;
  243. if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY)
  244. r.rlim_max = COMPAT_RLIM_INFINITY;
  245. if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
  246. __put_user(r.rlim_cur, &rlim->rlim_cur) ||
  247. __put_user(r.rlim_max, &rlim->rlim_max))
  248. return -EFAULT;
  249. }
  250. return ret;
  251. }
  252. #endif
  253. asmlinkage long compat_sys_getrlimit (unsigned int resource,
  254. struct compat_rlimit __user *rlim)
  255. {
  256. struct rlimit r;
  257. int ret;
  258. mm_segment_t old_fs = get_fs();
  259. set_fs(KERNEL_DS);
  260. ret = sys_getrlimit(resource, (struct rlimit __user *) &r);
  261. set_fs(old_fs);
  262. if (!ret) {
  263. if (r.rlim_cur > COMPAT_RLIM_INFINITY)
  264. r.rlim_cur = COMPAT_RLIM_INFINITY;
  265. if (r.rlim_max > COMPAT_RLIM_INFINITY)
  266. r.rlim_max = COMPAT_RLIM_INFINITY;
  267. if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
  268. __put_user(r.rlim_cur, &rlim->rlim_cur) ||
  269. __put_user(r.rlim_max, &rlim->rlim_max))
  270. return -EFAULT;
  271. }
  272. return ret;
  273. }
  274. int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
  275. {
  276. if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) ||
  277. __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) ||
  278. __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) ||
  279. __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) ||
  280. __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) ||
  281. __put_user(r->ru_maxrss, &ru->ru_maxrss) ||
  282. __put_user(r->ru_ixrss, &ru->ru_ixrss) ||
  283. __put_user(r->ru_idrss, &ru->ru_idrss) ||
  284. __put_user(r->ru_isrss, &ru->ru_isrss) ||
  285. __put_user(r->ru_minflt, &ru->ru_minflt) ||
  286. __put_user(r->ru_majflt, &ru->ru_majflt) ||
  287. __put_user(r->ru_nswap, &ru->ru_nswap) ||
  288. __put_user(r->ru_inblock, &ru->ru_inblock) ||
  289. __put_user(r->ru_oublock, &ru->ru_oublock) ||
  290. __put_user(r->ru_msgsnd, &ru->ru_msgsnd) ||
  291. __put_user(r->ru_msgrcv, &ru->ru_msgrcv) ||
  292. __put_user(r->ru_nsignals, &ru->ru_nsignals) ||
  293. __put_user(r->ru_nvcsw, &ru->ru_nvcsw) ||
  294. __put_user(r->ru_nivcsw, &ru->ru_nivcsw))
  295. return -EFAULT;
  296. return 0;
  297. }
  298. asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
  299. {
  300. struct rusage r;
  301. int ret;
  302. mm_segment_t old_fs = get_fs();
  303. set_fs(KERNEL_DS);
  304. ret = sys_getrusage(who, (struct rusage __user *) &r);
  305. set_fs(old_fs);
  306. if (ret)
  307. return ret;
  308. if (put_compat_rusage(&r, ru))
  309. return -EFAULT;
  310. return 0;
  311. }
  312. asmlinkage long
  313. compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
  314. struct compat_rusage __user *ru)
  315. {
  316. if (!ru) {
  317. return sys_wait4(pid, stat_addr, options, NULL);
  318. } else {
  319. struct rusage r;
  320. int ret;
  321. unsigned int status;
  322. mm_segment_t old_fs = get_fs();
  323. set_fs (KERNEL_DS);
  324. ret = sys_wait4(pid,
  325. (stat_addr ?
  326. (unsigned int __user *) &status : NULL),
  327. options, (struct rusage __user *) &r);
  328. set_fs (old_fs);
  329. if (ret > 0) {
  330. if (put_compat_rusage(&r, ru))
  331. return -EFAULT;
  332. if (stat_addr && put_user(status, stat_addr))
  333. return -EFAULT;
  334. }
  335. return ret;
  336. }
  337. }
  338. asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
  339. struct compat_siginfo __user *uinfo, int options,
  340. struct compat_rusage __user *uru)
  341. {
  342. siginfo_t info;
  343. struct rusage ru;
  344. long ret;
  345. mm_segment_t old_fs = get_fs();
  346. memset(&info, 0, sizeof(info));
  347. set_fs(KERNEL_DS);
  348. ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
  349. uru ? (struct rusage __user *)&ru : NULL);
  350. set_fs(old_fs);
  351. if ((ret < 0) || (info.si_signo == 0))
  352. return ret;
  353. if (uru) {
  354. ret = put_compat_rusage(&ru, uru);
  355. if (ret)
  356. return ret;
  357. }
  358. BUG_ON(info.si_code & __SI_MASK);
  359. info.si_code |= __SI_CHLD;
  360. return copy_siginfo_to_user32(uinfo, &info);
  361. }
  362. static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
  363. unsigned len, cpumask_t *new_mask)
  364. {
  365. unsigned long *k;
  366. if (len < sizeof(cpumask_t))
  367. memset(new_mask, 0, sizeof(cpumask_t));
  368. else if (len > sizeof(cpumask_t))
  369. len = sizeof(cpumask_t);
  370. k = cpus_addr(*new_mask);
  371. return compat_get_bitmap(k, user_mask_ptr, len * 8);
  372. }
  373. asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
  374. unsigned int len,
  375. compat_ulong_t __user *user_mask_ptr)
  376. {
  377. cpumask_t new_mask;
  378. int retval;
  379. retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask);
  380. if (retval)
  381. return retval;
  382. return sched_setaffinity(pid, new_mask);
  383. }
  384. asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
  385. compat_ulong_t __user *user_mask_ptr)
  386. {
  387. int ret;
  388. cpumask_t mask;
  389. unsigned long *k;
  390. unsigned int min_length = sizeof(cpumask_t);
  391. if (NR_CPUS <= BITS_PER_COMPAT_LONG)
  392. min_length = sizeof(compat_ulong_t);
  393. if (len < min_length)
  394. return -EINVAL;
  395. ret = sched_getaffinity(pid, &mask);
  396. if (ret < 0)
  397. return ret;
  398. k = cpus_addr(mask);
  399. ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
  400. if (ret)
  401. return ret;
  402. return min_length;
  403. }
  404. int get_compat_itimerspec(struct itimerspec *dst,
  405. const struct compat_itimerspec __user *src)
  406. {
  407. if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
  408. get_compat_timespec(&dst->it_value, &src->it_value))
  409. return -EFAULT;
  410. return 0;
  411. }
  412. int put_compat_itimerspec(struct compat_itimerspec __user *dst,
  413. const struct itimerspec *src)
  414. {
  415. if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
  416. put_compat_timespec(&src->it_value, &dst->it_value))
  417. return -EFAULT;
  418. return 0;
  419. }
  420. long compat_sys_timer_create(clockid_t which_clock,
  421. struct compat_sigevent __user *timer_event_spec,
  422. timer_t __user *created_timer_id)
  423. {
  424. struct sigevent __user *event = NULL;
  425. if (timer_event_spec) {
  426. struct sigevent kevent;
  427. event = compat_alloc_user_space(sizeof(*event));
  428. if (get_compat_sigevent(&kevent, timer_event_spec) ||
  429. copy_to_user(event, &kevent, sizeof(*event)))
  430. return -EFAULT;
  431. }
  432. return sys_timer_create(which_clock, event, created_timer_id);
  433. }
  434. long compat_sys_timer_settime(timer_t timer_id, int flags,
  435. struct compat_itimerspec __user *new,
  436. struct compat_itimerspec __user *old)
  437. {
  438. long err;
  439. mm_segment_t oldfs;
  440. struct itimerspec newts, oldts;
  441. if (!new)
  442. return -EINVAL;
  443. if (get_compat_itimerspec(&newts, new))
  444. return -EFAULT;
  445. oldfs = get_fs();
  446. set_fs(KERNEL_DS);
  447. err = sys_timer_settime(timer_id, flags,
  448. (struct itimerspec __user *) &newts,
  449. (struct itimerspec __user *) &oldts);
  450. set_fs(oldfs);
  451. if (!err && old && put_compat_itimerspec(old, &oldts))
  452. return -EFAULT;
  453. return err;
  454. }
  455. long compat_sys_timer_gettime(timer_t timer_id,
  456. struct compat_itimerspec __user *setting)
  457. {
  458. long err;
  459. mm_segment_t oldfs;
  460. struct itimerspec ts;
  461. oldfs = get_fs();
  462. set_fs(KERNEL_DS);
  463. err = sys_timer_gettime(timer_id,
  464. (struct itimerspec __user *) &ts);
  465. set_fs(oldfs);
  466. if (!err && put_compat_itimerspec(setting, &ts))
  467. return -EFAULT;
  468. return err;
  469. }
  470. long compat_sys_clock_settime(clockid_t which_clock,
  471. struct compat_timespec __user *tp)
  472. {
  473. long err;
  474. mm_segment_t oldfs;
  475. struct timespec ts;
  476. if (get_compat_timespec(&ts, tp))
  477. return -EFAULT;
  478. oldfs = get_fs();
  479. set_fs(KERNEL_DS);
  480. err = sys_clock_settime(which_clock,
  481. (struct timespec __user *) &ts);
  482. set_fs(oldfs);
  483. return err;
  484. }
  485. long compat_sys_clock_gettime(clockid_t which_clock,
  486. struct compat_timespec __user *tp)
  487. {
  488. long err;
  489. mm_segment_t oldfs;
  490. struct timespec ts;
  491. oldfs = get_fs();
  492. set_fs(KERNEL_DS);
  493. err = sys_clock_gettime(which_clock,
  494. (struct timespec __user *) &ts);
  495. set_fs(oldfs);
  496. if (!err && put_compat_timespec(&ts, tp))
  497. return -EFAULT;
  498. return err;
  499. }
  500. long compat_sys_clock_getres(clockid_t which_clock,
  501. struct compat_timespec __user *tp)
  502. {
  503. long err;
  504. mm_segment_t oldfs;
  505. struct timespec ts;
  506. oldfs = get_fs();
  507. set_fs(KERNEL_DS);
  508. err = sys_clock_getres(which_clock,
  509. (struct timespec __user *) &ts);
  510. set_fs(oldfs);
  511. if (!err && tp && put_compat_timespec(&ts, tp))
  512. return -EFAULT;
  513. return err;
  514. }
  515. static long compat_clock_nanosleep_restart(struct restart_block *restart)
  516. {
  517. long err;
  518. mm_segment_t oldfs;
  519. struct timespec tu;
  520. struct compat_timespec *rmtp = (struct compat_timespec *)(restart->arg1);
  521. restart->arg1 = (unsigned long) &tu;
  522. oldfs = get_fs();
  523. set_fs(KERNEL_DS);
  524. err = clock_nanosleep_restart(restart);
  525. set_fs(oldfs);
  526. if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
  527. put_compat_timespec(&tu, rmtp))
  528. return -EFAULT;
  529. if (err == -ERESTART_RESTARTBLOCK) {
  530. restart->fn = compat_clock_nanosleep_restart;
  531. restart->arg1 = (unsigned long) rmtp;
  532. }
  533. return err;
  534. }
  535. long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
  536. struct compat_timespec __user *rqtp,
  537. struct compat_timespec __user *rmtp)
  538. {
  539. long err;
  540. mm_segment_t oldfs;
  541. struct timespec in, out;
  542. struct restart_block *restart;
  543. if (get_compat_timespec(&in, rqtp))
  544. return -EFAULT;
  545. oldfs = get_fs();
  546. set_fs(KERNEL_DS);
  547. err = sys_clock_nanosleep(which_clock, flags,
  548. (struct timespec __user *) &in,
  549. (struct timespec __user *) &out);
  550. set_fs(oldfs);
  551. if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
  552. put_compat_timespec(&out, rmtp))
  553. return -EFAULT;
  554. if (err == -ERESTART_RESTARTBLOCK) {
  555. restart = &current_thread_info()->restart_block;
  556. restart->fn = compat_clock_nanosleep_restart;
  557. restart->arg1 = (unsigned long) rmtp;
  558. }
  559. return err;
  560. }
  561. /*
  562. * We currently only need the following fields from the sigevent
  563. * structure: sigev_value, sigev_signo, sig_notify and (sometimes
  564. * sigev_notify_thread_id). The others are handled in user mode.
  565. * We also assume that copying sigev_value.sival_int is sufficient
  566. * to keep all the bits of sigev_value.sival_ptr intact.
  567. */
  568. int get_compat_sigevent(struct sigevent *event,
  569. const struct compat_sigevent __user *u_event)
  570. {
  571. memset(event, 0, sizeof(*event));
  572. return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) ||
  573. __get_user(event->sigev_value.sival_int,
  574. &u_event->sigev_value.sival_int) ||
  575. __get_user(event->sigev_signo, &u_event->sigev_signo) ||
  576. __get_user(event->sigev_notify, &u_event->sigev_notify) ||
  577. __get_user(event->sigev_notify_thread_id,
  578. &u_event->sigev_notify_thread_id))
  579. ? -EFAULT : 0;
  580. }
  581. long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
  582. unsigned long bitmap_size)
  583. {
  584. int i, j;
  585. unsigned long m;
  586. compat_ulong_t um;
  587. unsigned long nr_compat_longs;
  588. /* align bitmap up to nearest compat_long_t boundary */
  589. bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
  590. if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
  591. return -EFAULT;
  592. nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
  593. for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
  594. m = 0;
  595. for (j = 0; j < sizeof(m)/sizeof(um); j++) {
  596. /*
  597. * We dont want to read past the end of the userspace
  598. * bitmap. We must however ensure the end of the
  599. * kernel bitmap is zeroed.
  600. */
  601. if (nr_compat_longs-- > 0) {
  602. if (__get_user(um, umask))
  603. return -EFAULT;
  604. } else {
  605. um = 0;
  606. }
  607. umask++;
  608. m |= (long)um << (j * BITS_PER_COMPAT_LONG);
  609. }
  610. *mask++ = m;
  611. }
  612. return 0;
  613. }
  614. long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
  615. unsigned long bitmap_size)
  616. {
  617. int i, j;
  618. unsigned long m;
  619. compat_ulong_t um;
  620. unsigned long nr_compat_longs;
  621. /* align bitmap up to nearest compat_long_t boundary */
  622. bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
  623. if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
  624. return -EFAULT;
  625. nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
  626. for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
  627. m = *mask++;
  628. for (j = 0; j < sizeof(m)/sizeof(um); j++) {
  629. um = m;
  630. /*
  631. * We dont want to write past the end of the userspace
  632. * bitmap.
  633. */
  634. if (nr_compat_longs-- > 0) {
  635. if (__put_user(um, umask))
  636. return -EFAULT;
  637. }
  638. umask++;
  639. m >>= 4*sizeof(um);
  640. m >>= 4*sizeof(um);
  641. }
  642. }
  643. return 0;
  644. }
  645. void
  646. sigset_from_compat (sigset_t *set, compat_sigset_t *compat)
  647. {
  648. switch (_NSIG_WORDS) {
  649. case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
  650. case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
  651. case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
  652. case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
  653. }
  654. }
  655. asmlinkage long
  656. compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
  657. struct compat_siginfo __user *uinfo,
  658. struct compat_timespec __user *uts, compat_size_t sigsetsize)
  659. {
  660. compat_sigset_t s32;
  661. sigset_t s;
  662. int sig;
  663. struct timespec t;
  664. siginfo_t info;
  665. long ret, timeout = 0;
  666. if (sigsetsize != sizeof(sigset_t))
  667. return -EINVAL;
  668. if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
  669. return -EFAULT;
  670. sigset_from_compat(&s, &s32);
  671. sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP));
  672. signotset(&s);
  673. if (uts) {
  674. if (get_compat_timespec (&t, uts))
  675. return -EFAULT;
  676. if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0
  677. || t.tv_sec < 0)
  678. return -EINVAL;
  679. }
  680. spin_lock_irq(&current->sighand->siglock);
  681. sig = dequeue_signal(current, &s, &info);
  682. if (!sig) {
  683. timeout = MAX_SCHEDULE_TIMEOUT;
  684. if (uts)
  685. timeout = timespec_to_jiffies(&t)
  686. +(t.tv_sec || t.tv_nsec);
  687. if (timeout) {
  688. current->real_blocked = current->blocked;
  689. sigandsets(&current->blocked, &current->blocked, &s);
  690. recalc_sigpending();
  691. spin_unlock_irq(&current->sighand->siglock);
  692. timeout = schedule_timeout_interruptible(timeout);
  693. spin_lock_irq(&current->sighand->siglock);
  694. sig = dequeue_signal(current, &s, &info);
  695. current->blocked = current->real_blocked;
  696. siginitset(&current->real_blocked, 0);
  697. recalc_sigpending();
  698. }
  699. }
  700. spin_unlock_irq(&current->sighand->siglock);
  701. if (sig) {
  702. ret = sig;
  703. if (uinfo) {
  704. if (copy_siginfo_to_user32(uinfo, &info))
  705. ret = -EFAULT;
  706. }
  707. }else {
  708. ret = timeout?-EINTR:-EAGAIN;
  709. }
  710. return ret;
  711. }
  712. #ifdef __ARCH_WANT_COMPAT_SYS_TIME
  713. /* compat_time_t is a 32 bit "long" and needs to get converted. */
  714. asmlinkage long compat_sys_time(compat_time_t __user * tloc)
  715. {
  716. compat_time_t i;
  717. struct timeval tv;
  718. do_gettimeofday(&tv);
  719. i = tv.tv_sec;
  720. if (tloc) {
  721. if (put_user(i,tloc))
  722. i = -EFAULT;
  723. }
  724. return i;
  725. }
  726. asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
  727. {
  728. struct timespec tv;
  729. int err;
  730. if (get_user(tv.tv_sec, tptr))
  731. return -EFAULT;
  732. tv.tv_nsec = 0;
  733. err = security_settime(&tv, NULL);
  734. if (err)
  735. return err;
  736. do_settimeofday(&tv);
  737. return 0;
  738. }
  739. #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
  740. #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
  741. asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize)
  742. {
  743. sigset_t newset;
  744. compat_sigset_t newset32;
  745. /* XXX: Don't preclude handling different sized sigset_t's. */
  746. if (sigsetsize != sizeof(sigset_t))
  747. return -EINVAL;
  748. if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
  749. return -EFAULT;
  750. sigset_from_compat(&newset, &newset32);
  751. sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  752. spin_lock_irq(&current->sighand->siglock);
  753. current->saved_sigmask = current->blocked;
  754. current->blocked = newset;
  755. recalc_sigpending();
  756. spin_unlock_irq(&current->sighand->siglock);
  757. current->state = TASK_INTERRUPTIBLE;
  758. schedule();
  759. set_thread_flag(TIF_RESTORE_SIGMASK);
  760. return -ERESTARTNOHAND;
  761. }
  762. #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
  763. asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
  764. {
  765. struct timex txc;
  766. int ret;
  767. memset(&txc, 0, sizeof(struct timex));
  768. if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
  769. __get_user(txc.modes, &utp->modes) ||
  770. __get_user(txc.offset, &utp->offset) ||
  771. __get_user(txc.freq, &utp->freq) ||
  772. __get_user(txc.maxerror, &utp->maxerror) ||
  773. __get_user(txc.esterror, &utp->esterror) ||
  774. __get_user(txc.status, &utp->status) ||
  775. __get_user(txc.constant, &utp->constant) ||
  776. __get_user(txc.precision, &utp->precision) ||
  777. __get_user(txc.tolerance, &utp->tolerance) ||
  778. __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
  779. __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
  780. __get_user(txc.tick, &utp->tick) ||
  781. __get_user(txc.ppsfreq, &utp->ppsfreq) ||
  782. __get_user(txc.jitter, &utp->jitter) ||
  783. __get_user(txc.shift, &utp->shift) ||
  784. __get_user(txc.stabil, &utp->stabil) ||
  785. __get_user(txc.jitcnt, &utp->jitcnt) ||
  786. __get_user(txc.calcnt, &utp->calcnt) ||
  787. __get_user(txc.errcnt, &utp->errcnt) ||
  788. __get_user(txc.stbcnt, &utp->stbcnt))
  789. return -EFAULT;
  790. ret = do_adjtimex(&txc);
  791. if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
  792. __put_user(txc.modes, &utp->modes) ||
  793. __put_user(txc.offset, &utp->offset) ||
  794. __put_user(txc.freq, &utp->freq) ||
  795. __put_user(txc.maxerror, &utp->maxerror) ||
  796. __put_user(txc.esterror, &utp->esterror) ||
  797. __put_user(txc.status, &utp->status) ||
  798. __put_user(txc.constant, &utp->constant) ||
  799. __put_user(txc.precision, &utp->precision) ||
  800. __put_user(txc.tolerance, &utp->tolerance) ||
  801. __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
  802. __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
  803. __put_user(txc.tick, &utp->tick) ||
  804. __put_user(txc.ppsfreq, &utp->ppsfreq) ||
  805. __put_user(txc.jitter, &utp->jitter) ||
  806. __put_user(txc.shift, &utp->shift) ||
  807. __put_user(txc.stabil, &utp->stabil) ||
  808. __put_user(txc.jitcnt, &utp->jitcnt) ||
  809. __put_user(txc.calcnt, &utp->calcnt) ||
  810. __put_user(txc.errcnt, &utp->errcnt) ||
  811. __put_user(txc.stbcnt, &utp->stbcnt))
  812. ret = -EFAULT;
  813. return ret;
  814. }
  815. #ifdef CONFIG_NUMA
  816. asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
  817. compat_uptr_t __user *pages32,
  818. const int __user *nodes,
  819. int __user *status,
  820. int flags)
  821. {
  822. const void __user * __user *pages;
  823. int i;
  824. pages = compat_alloc_user_space(nr_pages * sizeof(void *));
  825. for (i = 0; i < nr_pages; i++) {
  826. compat_uptr_t p;
  827. if (get_user(p, pages32 + i) ||
  828. put_user(compat_ptr(p), pages + i))
  829. return -EFAULT;
  830. }
  831. return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
  832. }
  833. asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
  834. compat_ulong_t maxnode,
  835. const compat_ulong_t __user *old_nodes,
  836. const compat_ulong_t __user *new_nodes)
  837. {
  838. unsigned long __user *old = NULL;
  839. unsigned long __user *new = NULL;
  840. nodemask_t tmp_mask;
  841. unsigned long nr_bits;
  842. unsigned long size;
  843. nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
  844. size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
  845. if (old_nodes) {
  846. if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
  847. return -EFAULT;
  848. old = compat_alloc_user_space(new_nodes ? size * 2 : size);
  849. if (new_nodes)
  850. new = old + size / sizeof(unsigned long);
  851. if (copy_to_user(old, nodes_addr(tmp_mask), size))
  852. return -EFAULT;
  853. }
  854. if (new_nodes) {
  855. if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
  856. return -EFAULT;
  857. if (new == NULL)
  858. new = compat_alloc_user_space(size);
  859. if (copy_to_user(new, nodes_addr(tmp_mask), size))
  860. return -EFAULT;
  861. }
  862. return sys_migrate_pages(pid, nr_bits + 1, old, new);
  863. }
  864. #endif
  865. struct compat_sysinfo {
  866. s32 uptime;
  867. u32 loads[3];
  868. u32 totalram;
  869. u32 freeram;
  870. u32 sharedram;
  871. u32 bufferram;
  872. u32 totalswap;
  873. u32 freeswap;
  874. u16 procs;
  875. u16 pad;
  876. u32 totalhigh;
  877. u32 freehigh;
  878. u32 mem_unit;
  879. char _f[20-2*sizeof(u32)-sizeof(int)];
  880. };
  881. asmlinkage long
  882. compat_sys_sysinfo(struct compat_sysinfo __user *info)
  883. {
  884. struct sysinfo s;
  885. do_sysinfo(&s);
  886. /* Check to see if any memory value is too large for 32-bit and scale
  887. * down if needed
  888. */
  889. if ((s.totalram >> 32) || (s.totalswap >> 32)) {
  890. int bitcount = 0;
  891. while (s.mem_unit < PAGE_SIZE) {
  892. s.mem_unit <<= 1;
  893. bitcount++;
  894. }
  895. s.totalram >>= bitcount;
  896. s.freeram >>= bitcount;
  897. s.sharedram >>= bitcount;
  898. s.bufferram >>= bitcount;
  899. s.totalswap >>= bitcount;
  900. s.freeswap >>= bitcount;
  901. s.totalhigh >>= bitcount;
  902. s.freehigh >>= bitcount;
  903. }
  904. if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
  905. __put_user (s.uptime, &info->uptime) ||
  906. __put_user (s.loads[0], &info->loads[0]) ||
  907. __put_user (s.loads[1], &info->loads[1]) ||
  908. __put_user (s.loads[2], &info->loads[2]) ||
  909. __put_user (s.totalram, &info->totalram) ||
  910. __put_user (s.freeram, &info->freeram) ||
  911. __put_user (s.sharedram, &info->sharedram) ||
  912. __put_user (s.bufferram, &info->bufferram) ||
  913. __put_user (s.totalswap, &info->totalswap) ||
  914. __put_user (s.freeswap, &info->freeswap) ||
  915. __put_user (s.procs, &info->procs) ||
  916. __put_user (s.totalhigh, &info->totalhigh) ||
  917. __put_user (s.freehigh, &info->freehigh) ||
  918. __put_user (s.mem_unit, &info->mem_unit))
  919. return -EFAULT;
  920. return 0;
  921. }