sys_ia32.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680
  1. /*
  2. * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
  3. *
  4. * Copyright (C) 2000 VA Linux Co
  5. * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
  6. * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  9. * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
  10. * David Mosberger-Tang <davidm@hpl.hp.com>
  11. * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
  12. *
  13. * These routines maintain argument size conversion between 32bit and 64bit
  14. * environment.
  15. */
  16. #include <linux/config.h>
  17. #include <linux/kernel.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/sysctl.h>
  20. #include <linux/sched.h>
  21. #include <linux/fs.h>
  22. #include <linux/file.h>
  23. #include <linux/signal.h>
  24. #include <linux/resource.h>
  25. #include <linux/times.h>
  26. #include <linux/utsname.h>
  27. #include <linux/timex.h>
  28. #include <linux/smp.h>
  29. #include <linux/smp_lock.h>
  30. #include <linux/sem.h>
  31. #include <linux/msg.h>
  32. #include <linux/mm.h>
  33. #include <linux/shm.h>
  34. #include <linux/slab.h>
  35. #include <linux/uio.h>
  36. #include <linux/nfs_fs.h>
  37. #include <linux/quota.h>
  38. #include <linux/syscalls.h>
  39. #include <linux/sunrpc/svc.h>
  40. #include <linux/nfsd/nfsd.h>
  41. #include <linux/nfsd/cache.h>
  42. #include <linux/nfsd/xdr.h>
  43. #include <linux/nfsd/syscall.h>
  44. #include <linux/poll.h>
  45. #include <linux/eventpoll.h>
  46. #include <linux/personality.h>
  47. #include <linux/ptrace.h>
  48. #include <linux/stat.h>
  49. #include <linux/ipc.h>
  50. #include <linux/capability.h>
  51. #include <linux/compat.h>
  52. #include <linux/vfs.h>
  53. #include <linux/mman.h>
  54. #include <linux/mutex.h>
  55. #include <asm/intrinsics.h>
  56. #include <asm/types.h>
  57. #include <asm/uaccess.h>
  58. #include <asm/unistd.h>
  59. #include "ia32priv.h"
  60. #include <net/scm.h>
  61. #include <net/sock.h>
  62. #define DEBUG 0
  63. #if DEBUG
  64. # define DBG(fmt...) printk(KERN_DEBUG fmt)
  65. #else
  66. # define DBG(fmt...)
  67. #endif
  68. #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
  69. #define OFFSET4K(a) ((a) & 0xfff)
  70. #define PAGE_START(addr) ((addr) & PAGE_MASK)
  71. #define MINSIGSTKSZ_IA32 2048
  72. #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
  73. #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
  74. /*
  75. * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
  76. * while doing so.
  77. */
  78. /* XXX make per-mm: */
  79. static DEFINE_MUTEX(ia32_mmap_mutex);
  80. asmlinkage long
  81. sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
  82. struct pt_regs *regs)
  83. {
  84. long error;
  85. char *filename;
  86. unsigned long old_map_base, old_task_size, tssd;
  87. filename = getname(name);
  88. error = PTR_ERR(filename);
  89. if (IS_ERR(filename))
  90. return error;
  91. old_map_base = current->thread.map_base;
  92. old_task_size = current->thread.task_size;
  93. tssd = ia64_get_kr(IA64_KR_TSSD);
  94. /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
  95. current->thread.map_base = DEFAULT_MAP_BASE;
  96. current->thread.task_size = DEFAULT_TASK_SIZE;
  97. ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
  98. ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
  99. error = compat_do_execve(filename, argv, envp, regs);
  100. putname(filename);
  101. if (error < 0) {
  102. /* oops, execve failed, switch back to old values... */
  103. ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
  104. ia64_set_kr(IA64_KR_TSSD, tssd);
  105. current->thread.map_base = old_map_base;
  106. current->thread.task_size = old_task_size;
  107. }
  108. return error;
  109. }
  110. int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
  111. {
  112. int err;
  113. if ((u64) stat->size > MAX_NON_LFS ||
  114. !old_valid_dev(stat->dev) ||
  115. !old_valid_dev(stat->rdev))
  116. return -EOVERFLOW;
  117. if (clear_user(ubuf, sizeof(*ubuf)))
  118. return -EFAULT;
  119. err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
  120. err |= __put_user(stat->ino, &ubuf->st_ino);
  121. err |= __put_user(stat->mode, &ubuf->st_mode);
  122. err |= __put_user(stat->nlink, &ubuf->st_nlink);
  123. err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
  124. err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
  125. err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
  126. err |= __put_user(stat->size, &ubuf->st_size);
  127. err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
  128. err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
  129. err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
  130. err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  131. err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
  132. err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  133. err |= __put_user(stat->blksize, &ubuf->st_blksize);
  134. err |= __put_user(stat->blocks, &ubuf->st_blocks);
  135. return err;
  136. }
  137. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  138. static int
  139. get_page_prot (struct vm_area_struct *vma, unsigned long addr)
  140. {
  141. int prot = 0;
  142. if (!vma || vma->vm_start > addr)
  143. return 0;
  144. if (vma->vm_flags & VM_READ)
  145. prot |= PROT_READ;
  146. if (vma->vm_flags & VM_WRITE)
  147. prot |= PROT_WRITE;
  148. if (vma->vm_flags & VM_EXEC)
  149. prot |= PROT_EXEC;
  150. return prot;
  151. }
  152. /*
  153. * Map a subpage by creating an anonymous page that contains the union of the old page and
  154. * the subpage.
  155. */
  156. static unsigned long
  157. mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
  158. loff_t off)
  159. {
  160. void *page = NULL;
  161. struct inode *inode;
  162. unsigned long ret = 0;
  163. struct vm_area_struct *vma = find_vma(current->mm, start);
  164. int old_prot = get_page_prot(vma, start);
  165. DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
  166. file, start, end, prot, flags, off);
  167. /* Optimize the case where the old mmap and the new mmap are both anonymous */
  168. if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
  169. if (clear_user((void __user *) start, end - start)) {
  170. ret = -EFAULT;
  171. goto out;
  172. }
  173. goto skip_mmap;
  174. }
  175. page = (void *) get_zeroed_page(GFP_KERNEL);
  176. if (!page)
  177. return -ENOMEM;
  178. if (old_prot)
  179. copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
  180. down_write(&current->mm->mmap_sem);
  181. {
  182. ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
  183. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  184. }
  185. up_write(&current->mm->mmap_sem);
  186. if (IS_ERR((void *) ret))
  187. goto out;
  188. if (old_prot) {
  189. /* copy back the old page contents. */
  190. if (offset_in_page(start))
  191. copy_to_user((void __user *) PAGE_START(start), page,
  192. offset_in_page(start));
  193. if (offset_in_page(end))
  194. copy_to_user((void __user *) end, page + offset_in_page(end),
  195. PAGE_SIZE - offset_in_page(end));
  196. }
  197. if (!(flags & MAP_ANONYMOUS)) {
  198. /* read the file contents */
  199. inode = file->f_dentry->d_inode;
  200. if (!inode->i_fop || !file->f_op->read
  201. || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
  202. {
  203. ret = -EINVAL;
  204. goto out;
  205. }
  206. }
  207. skip_mmap:
  208. if (!(prot & PROT_WRITE))
  209. ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
  210. out:
  211. if (page)
  212. free_page((unsigned long) page);
  213. return ret;
  214. }
  215. /* SLAB cache for partial_page structures */
  216. kmem_cache_t *partial_page_cachep;
  217. /*
  218. * init partial_page_list.
  219. * return 0 means kmalloc fail.
  220. */
  221. struct partial_page_list*
  222. ia32_init_pp_list(void)
  223. {
  224. struct partial_page_list *p;
  225. if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
  226. return p;
  227. p->pp_head = NULL;
  228. p->ppl_rb = RB_ROOT;
  229. p->pp_hint = NULL;
  230. atomic_set(&p->pp_count, 1);
  231. return p;
  232. }
  233. /*
  234. * Search for the partial page with @start in partial page list @ppl.
  235. * If finds the partial page, return the found partial page.
  236. * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
  237. * be used by later __ia32_insert_pp().
  238. */
  239. static struct partial_page *
  240. __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
  241. struct partial_page **pprev, struct rb_node ***rb_link,
  242. struct rb_node **rb_parent)
  243. {
  244. struct partial_page *pp;
  245. struct rb_node **__rb_link, *__rb_parent, *rb_prev;
  246. pp = ppl->pp_hint;
  247. if (pp && pp->base == start)
  248. return pp;
  249. __rb_link = &ppl->ppl_rb.rb_node;
  250. rb_prev = __rb_parent = NULL;
  251. while (*__rb_link) {
  252. __rb_parent = *__rb_link;
  253. pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
  254. if (pp->base == start) {
  255. ppl->pp_hint = pp;
  256. return pp;
  257. } else if (pp->base < start) {
  258. rb_prev = __rb_parent;
  259. __rb_link = &__rb_parent->rb_right;
  260. } else {
  261. __rb_link = &__rb_parent->rb_left;
  262. }
  263. }
  264. *rb_link = __rb_link;
  265. *rb_parent = __rb_parent;
  266. *pprev = NULL;
  267. if (rb_prev)
  268. *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
  269. return NULL;
  270. }
  271. /*
  272. * insert @pp into @ppl.
  273. */
  274. static void
  275. __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
  276. struct partial_page *prev, struct rb_node **rb_link,
  277. struct rb_node *rb_parent)
  278. {
  279. /* link list */
  280. if (prev) {
  281. pp->next = prev->next;
  282. prev->next = pp;
  283. } else {
  284. ppl->pp_head = pp;
  285. if (rb_parent)
  286. pp->next = rb_entry(rb_parent,
  287. struct partial_page, pp_rb);
  288. else
  289. pp->next = NULL;
  290. }
  291. /* link rb */
  292. rb_link_node(&pp->pp_rb, rb_parent, rb_link);
  293. rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
  294. ppl->pp_hint = pp;
  295. }
  296. /*
  297. * delete @pp from partial page list @ppl.
  298. */
  299. static void
  300. __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
  301. struct partial_page *prev)
  302. {
  303. if (prev) {
  304. prev->next = pp->next;
  305. if (ppl->pp_hint == pp)
  306. ppl->pp_hint = prev;
  307. } else {
  308. ppl->pp_head = pp->next;
  309. if (ppl->pp_hint == pp)
  310. ppl->pp_hint = pp->next;
  311. }
  312. rb_erase(&pp->pp_rb, &ppl->ppl_rb);
  313. kmem_cache_free(partial_page_cachep, pp);
  314. }
  315. static struct partial_page *
  316. __pp_prev(struct partial_page *pp)
  317. {
  318. struct rb_node *prev = rb_prev(&pp->pp_rb);
  319. if (prev)
  320. return rb_entry(prev, struct partial_page, pp_rb);
  321. else
  322. return NULL;
  323. }
  324. /*
  325. * Delete partial pages with address between @start and @end.
  326. * @start and @end are page aligned.
  327. */
  328. static void
  329. __ia32_delete_pp_range(unsigned int start, unsigned int end)
  330. {
  331. struct partial_page *pp, *prev;
  332. struct rb_node **rb_link, *rb_parent;
  333. if (start >= end)
  334. return;
  335. pp = __ia32_find_pp(current->thread.ppl, start, &prev,
  336. &rb_link, &rb_parent);
  337. if (pp)
  338. prev = __pp_prev(pp);
  339. else {
  340. if (prev)
  341. pp = prev->next;
  342. else
  343. pp = current->thread.ppl->pp_head;
  344. }
  345. while (pp && pp->base < end) {
  346. struct partial_page *tmp = pp->next;
  347. __ia32_delete_pp(current->thread.ppl, pp, prev);
  348. pp = tmp;
  349. }
  350. }
  351. /*
  352. * Set the range between @start and @end in bitmap.
  353. * @start and @end should be IA32 page aligned and in the same IA64 page.
  354. */
  355. static int
  356. __ia32_set_pp(unsigned int start, unsigned int end, int flags)
  357. {
  358. struct partial_page *pp, *prev;
  359. struct rb_node ** rb_link, *rb_parent;
  360. unsigned int pstart, start_bit, end_bit, i;
  361. pstart = PAGE_START(start);
  362. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  363. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  364. if (end_bit == 0)
  365. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  366. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  367. &rb_link, &rb_parent);
  368. if (pp) {
  369. for (i = start_bit; i < end_bit; i++)
  370. set_bit(i, &pp->bitmap);
  371. /*
  372. * Check: if this partial page has been set to a full page,
  373. * then delete it.
  374. */
  375. if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
  376. PAGE_SIZE/IA32_PAGE_SIZE) {
  377. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  378. }
  379. return 0;
  380. }
  381. /*
  382. * MAP_FIXED may lead to overlapping mmap.
  383. * In this case, the requested mmap area may already mmaped as a full
  384. * page. So check vma before adding a new partial page.
  385. */
  386. if (flags & MAP_FIXED) {
  387. struct vm_area_struct *vma = find_vma(current->mm, pstart);
  388. if (vma && vma->vm_start <= pstart)
  389. return 0;
  390. }
  391. /* new a partial_page */
  392. pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  393. if (!pp)
  394. return -ENOMEM;
  395. pp->base = pstart;
  396. pp->bitmap = 0;
  397. for (i=start_bit; i<end_bit; i++)
  398. set_bit(i, &(pp->bitmap));
  399. pp->next = NULL;
  400. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  401. return 0;
  402. }
  403. /*
  404. * @start and @end should be IA32 page aligned, but don't need to be in the
  405. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  406. * page, then call __ia32_set_pp().
  407. */
  408. static void
  409. ia32_set_pp(unsigned int start, unsigned int end, int flags)
  410. {
  411. down_write(&current->mm->mmap_sem);
  412. if (flags & MAP_FIXED) {
  413. /*
  414. * MAP_FIXED may lead to overlapping mmap. When this happens,
  415. * a series of complete IA64 pages results in deletion of
  416. * old partial pages in that range.
  417. */
  418. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  419. }
  420. if (end < PAGE_ALIGN(start)) {
  421. __ia32_set_pp(start, end, flags);
  422. } else {
  423. if (offset_in_page(start))
  424. __ia32_set_pp(start, PAGE_ALIGN(start), flags);
  425. if (offset_in_page(end))
  426. __ia32_set_pp(PAGE_START(end), end, flags);
  427. }
  428. up_write(&current->mm->mmap_sem);
  429. }
  430. /*
  431. * Unset the range between @start and @end in bitmap.
  432. * @start and @end should be IA32 page aligned and in the same IA64 page.
  433. * After doing that, if the bitmap is 0, then free the page and return 1,
  434. * else return 0;
  435. * If not find the partial page in the list, then
  436. * If the vma exists, then the full page is set to a partial page;
  437. * Else return -ENOMEM.
  438. */
  439. static int
  440. __ia32_unset_pp(unsigned int start, unsigned int end)
  441. {
  442. struct partial_page *pp, *prev;
  443. struct rb_node ** rb_link, *rb_parent;
  444. unsigned int pstart, start_bit, end_bit, i;
  445. struct vm_area_struct *vma;
  446. pstart = PAGE_START(start);
  447. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  448. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  449. if (end_bit == 0)
  450. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  451. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  452. &rb_link, &rb_parent);
  453. if (pp) {
  454. for (i = start_bit; i < end_bit; i++)
  455. clear_bit(i, &pp->bitmap);
  456. if (pp->bitmap == 0) {
  457. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  458. return 1;
  459. }
  460. return 0;
  461. }
  462. vma = find_vma(current->mm, pstart);
  463. if (!vma || vma->vm_start > pstart) {
  464. return -ENOMEM;
  465. }
  466. /* new a partial_page */
  467. pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  468. if (!pp)
  469. return -ENOMEM;
  470. pp->base = pstart;
  471. pp->bitmap = 0;
  472. for (i = 0; i < start_bit; i++)
  473. set_bit(i, &(pp->bitmap));
  474. for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
  475. set_bit(i, &(pp->bitmap));
  476. pp->next = NULL;
  477. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  478. return 0;
  479. }
  480. /*
  481. * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
  482. * __ia32_delete_pp_range(). Unset possible partial pages by calling
  483. * __ia32_unset_pp().
  484. * The returned value see __ia32_unset_pp().
  485. */
  486. static int
  487. ia32_unset_pp(unsigned int *startp, unsigned int *endp)
  488. {
  489. unsigned int start = *startp, end = *endp;
  490. int ret = 0;
  491. down_write(&current->mm->mmap_sem);
  492. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  493. if (end < PAGE_ALIGN(start)) {
  494. ret = __ia32_unset_pp(start, end);
  495. if (ret == 1) {
  496. *startp = PAGE_START(start);
  497. *endp = PAGE_ALIGN(end);
  498. }
  499. if (ret == 0) {
  500. /* to shortcut sys_munmap() in sys32_munmap() */
  501. *startp = PAGE_START(start);
  502. *endp = PAGE_START(end);
  503. }
  504. } else {
  505. if (offset_in_page(start)) {
  506. ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
  507. if (ret == 1)
  508. *startp = PAGE_START(start);
  509. if (ret == 0)
  510. *startp = PAGE_ALIGN(start);
  511. if (ret < 0)
  512. goto out;
  513. }
  514. if (offset_in_page(end)) {
  515. ret = __ia32_unset_pp(PAGE_START(end), end);
  516. if (ret == 1)
  517. *endp = PAGE_ALIGN(end);
  518. if (ret == 0)
  519. *endp = PAGE_START(end);
  520. }
  521. }
  522. out:
  523. up_write(&current->mm->mmap_sem);
  524. return ret;
  525. }
  526. /*
  527. * Compare the range between @start and @end with bitmap in partial page.
  528. * @start and @end should be IA32 page aligned and in the same IA64 page.
  529. */
  530. static int
  531. __ia32_compare_pp(unsigned int start, unsigned int end)
  532. {
  533. struct partial_page *pp, *prev;
  534. struct rb_node ** rb_link, *rb_parent;
  535. unsigned int pstart, start_bit, end_bit, size;
  536. unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
  537. pstart = PAGE_START(start);
  538. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  539. &rb_link, &rb_parent);
  540. if (!pp)
  541. return 1;
  542. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  543. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  544. size = sizeof(pp->bitmap) * 8;
  545. first_bit = find_first_bit(&pp->bitmap, size);
  546. next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
  547. if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
  548. /* exceeds the first range in bitmap */
  549. return -ENOMEM;
  550. } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
  551. first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
  552. if ((next_zero_bit < first_bit) && (first_bit < size))
  553. return 1; /* has next range */
  554. else
  555. return 0; /* no next range */
  556. } else
  557. return 1;
  558. }
  559. /*
  560. * @start and @end should be IA32 page aligned, but don't need to be in the
  561. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  562. * page, then call __ia32_compare_pp().
  563. *
  564. * Take this as example: the range is the 1st and 2nd 4K page.
  565. * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
  566. * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
  567. * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
  568. * bitmap = 00000101.
  569. */
  570. static int
  571. ia32_compare_pp(unsigned int *startp, unsigned int *endp)
  572. {
  573. unsigned int start = *startp, end = *endp;
  574. int retval = 0;
  575. down_write(&current->mm->mmap_sem);
  576. if (end < PAGE_ALIGN(start)) {
  577. retval = __ia32_compare_pp(start, end);
  578. if (retval == 0) {
  579. *startp = PAGE_START(start);
  580. *endp = PAGE_ALIGN(end);
  581. }
  582. } else {
  583. if (offset_in_page(start)) {
  584. retval = __ia32_compare_pp(start,
  585. PAGE_ALIGN(start));
  586. if (retval == 0)
  587. *startp = PAGE_START(start);
  588. if (retval < 0)
  589. goto out;
  590. }
  591. if (offset_in_page(end)) {
  592. retval = __ia32_compare_pp(PAGE_START(end), end);
  593. if (retval == 0)
  594. *endp = PAGE_ALIGN(end);
  595. }
  596. }
  597. out:
  598. up_write(&current->mm->mmap_sem);
  599. return retval;
  600. }
  601. static void
  602. __ia32_drop_pp_list(struct partial_page_list *ppl)
  603. {
  604. struct partial_page *pp = ppl->pp_head;
  605. while (pp) {
  606. struct partial_page *next = pp->next;
  607. kmem_cache_free(partial_page_cachep, pp);
  608. pp = next;
  609. }
  610. kfree(ppl);
  611. }
  612. void
  613. ia32_drop_partial_page_list(struct task_struct *task)
  614. {
  615. struct partial_page_list* ppl = task->thread.ppl;
  616. if (ppl && atomic_dec_and_test(&ppl->pp_count))
  617. __ia32_drop_pp_list(ppl);
  618. }
  619. /*
  620. * Copy current->thread.ppl to ppl (already initialized).
  621. */
  622. static int
  623. __ia32_copy_pp_list(struct partial_page_list *ppl)
  624. {
  625. struct partial_page *pp, *tmp, *prev;
  626. struct rb_node **rb_link, *rb_parent;
  627. ppl->pp_head = NULL;
  628. ppl->pp_hint = NULL;
  629. ppl->ppl_rb = RB_ROOT;
  630. rb_link = &ppl->ppl_rb.rb_node;
  631. rb_parent = NULL;
  632. prev = NULL;
  633. for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
  634. tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  635. if (!tmp)
  636. return -ENOMEM;
  637. *tmp = *pp;
  638. __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
  639. prev = tmp;
  640. rb_link = &tmp->pp_rb.rb_right;
  641. rb_parent = &tmp->pp_rb;
  642. }
  643. return 0;
  644. }
  645. int
  646. ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
  647. {
  648. int retval = 0;
  649. if (clone_flags & CLONE_VM) {
  650. atomic_inc(&current->thread.ppl->pp_count);
  651. p->thread.ppl = current->thread.ppl;
  652. } else {
  653. p->thread.ppl = ia32_init_pp_list();
  654. if (!p->thread.ppl)
  655. return -ENOMEM;
  656. down_write(&current->mm->mmap_sem);
  657. {
  658. retval = __ia32_copy_pp_list(p->thread.ppl);
  659. }
  660. up_write(&current->mm->mmap_sem);
  661. }
  662. return retval;
  663. }
  664. static unsigned long
  665. emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
  666. loff_t off)
  667. {
  668. unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
  669. struct inode *inode;
  670. loff_t poff;
  671. end = start + len;
  672. pstart = PAGE_START(start);
  673. pend = PAGE_ALIGN(end);
  674. if (flags & MAP_FIXED) {
  675. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  676. if (start > pstart) {
  677. if (flags & MAP_SHARED)
  678. printk(KERN_INFO
  679. "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
  680. current->comm, current->pid, start);
  681. ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
  682. off);
  683. if (IS_ERR((void *) ret))
  684. return ret;
  685. pstart += PAGE_SIZE;
  686. if (pstart >= pend)
  687. goto out; /* done */
  688. }
  689. if (end < pend) {
  690. if (flags & MAP_SHARED)
  691. printk(KERN_INFO
  692. "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
  693. current->comm, current->pid, end);
  694. ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
  695. (off + len) - offset_in_page(end));
  696. if (IS_ERR((void *) ret))
  697. return ret;
  698. pend -= PAGE_SIZE;
  699. if (pstart >= pend)
  700. goto out; /* done */
  701. }
  702. } else {
  703. /*
  704. * If a start address was specified, use it if the entire rounded out area
  705. * is available.
  706. */
  707. if (start && !pstart)
  708. fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
  709. tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
  710. if (tmp != pstart) {
  711. pstart = tmp;
  712. start = pstart + offset_in_page(off); /* make start congruent with off */
  713. end = start + len;
  714. pend = PAGE_ALIGN(end);
  715. }
  716. }
  717. poff = off + (pstart - start); /* note: (pstart - start) may be negative */
  718. is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
  719. if ((flags & MAP_SHARED) && !is_congruent)
  720. printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
  721. "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
  722. DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
  723. is_congruent ? "congruent" : "not congruent", poff);
  724. down_write(&current->mm->mmap_sem);
  725. {
  726. if (!(flags & MAP_ANONYMOUS) && is_congruent)
  727. ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
  728. else
  729. ret = do_mmap(NULL, pstart, pend - pstart,
  730. prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
  731. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  732. }
  733. up_write(&current->mm->mmap_sem);
  734. if (IS_ERR((void *) ret))
  735. return ret;
  736. if (!is_congruent) {
  737. /* read the file contents */
  738. inode = file->f_dentry->d_inode;
  739. if (!inode->i_fop || !file->f_op->read
  740. || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
  741. < 0))
  742. {
  743. sys_munmap(pstart, pend - pstart);
  744. return -EINVAL;
  745. }
  746. if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
  747. return -EINVAL;
  748. }
  749. if (!(flags & MAP_FIXED))
  750. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  751. out:
  752. return start;
  753. }
  754. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  755. static inline unsigned int
  756. get_prot32 (unsigned int prot)
  757. {
  758. if (prot & PROT_WRITE)
  759. /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
  760. prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
  761. else if (prot & (PROT_READ | PROT_EXEC))
  762. /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
  763. prot |= (PROT_READ | PROT_EXEC);
  764. return prot;
  765. }
  766. unsigned long
  767. ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
  768. loff_t offset)
  769. {
  770. DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
  771. file, addr, len, prot, flags, offset);
  772. if (file && (!file->f_op || !file->f_op->mmap))
  773. return -ENODEV;
  774. len = IA32_PAGE_ALIGN(len);
  775. if (len == 0)
  776. return addr;
  777. if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
  778. {
  779. if (flags & MAP_FIXED)
  780. return -ENOMEM;
  781. else
  782. return -EINVAL;
  783. }
  784. if (OFFSET4K(offset))
  785. return -EINVAL;
  786. prot = get_prot32(prot);
  787. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  788. mutex_lock(&ia32_mmap_mutex);
  789. {
  790. addr = emulate_mmap(file, addr, len, prot, flags, offset);
  791. }
  792. mutex_unlock(&ia32_mmap_mutex);
  793. #else
  794. down_write(&current->mm->mmap_sem);
  795. {
  796. addr = do_mmap(file, addr, len, prot, flags, offset);
  797. }
  798. up_write(&current->mm->mmap_sem);
  799. #endif
  800. DBG("ia32_do_mmap: returning 0x%lx\n", addr);
  801. return addr;
  802. }
  803. /*
  804. * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
  805. * system calls used a memory block for parameter passing..
  806. */
  807. struct mmap_arg_struct {
  808. unsigned int addr;
  809. unsigned int len;
  810. unsigned int prot;
  811. unsigned int flags;
  812. unsigned int fd;
  813. unsigned int offset;
  814. };
  815. asmlinkage long
  816. sys32_mmap (struct mmap_arg_struct __user *arg)
  817. {
  818. struct mmap_arg_struct a;
  819. struct file *file = NULL;
  820. unsigned long addr;
  821. int flags;
  822. if (copy_from_user(&a, arg, sizeof(a)))
  823. return -EFAULT;
  824. if (OFFSET4K(a.offset))
  825. return -EINVAL;
  826. flags = a.flags;
  827. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  828. if (!(flags & MAP_ANONYMOUS)) {
  829. file = fget(a.fd);
  830. if (!file)
  831. return -EBADF;
  832. }
  833. addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
  834. if (file)
  835. fput(file);
  836. return addr;
  837. }
  838. asmlinkage long
  839. sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
  840. unsigned int fd, unsigned int pgoff)
  841. {
  842. struct file *file = NULL;
  843. unsigned long retval;
  844. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  845. if (!(flags & MAP_ANONYMOUS)) {
  846. file = fget(fd);
  847. if (!file)
  848. return -EBADF;
  849. }
  850. retval = ia32_do_mmap(file, addr, len, prot, flags,
  851. (unsigned long) pgoff << IA32_PAGE_SHIFT);
  852. if (file)
  853. fput(file);
  854. return retval;
  855. }
  856. asmlinkage long
  857. sys32_munmap (unsigned int start, unsigned int len)
  858. {
  859. unsigned int end = start + len;
  860. long ret;
  861. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  862. ret = sys_munmap(start, end - start);
  863. #else
  864. if (OFFSET4K(start))
  865. return -EINVAL;
  866. end = IA32_PAGE_ALIGN(end);
  867. if (start >= end)
  868. return -EINVAL;
  869. ret = ia32_unset_pp(&start, &end);
  870. if (ret < 0)
  871. return ret;
  872. if (start >= end)
  873. return 0;
  874. mutex_lock(&ia32_mmap_mutex);
  875. ret = sys_munmap(start, end - start);
  876. mutex_unlock(&ia32_mmap_mutex);
  877. #endif
  878. return ret;
  879. }
  880. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  881. /*
  882. * When mprotect()ing a partial page, we set the permission to the union of the old
  883. * settings and the new settings. In other words, it's only possible to make access to a
  884. * partial page less restrictive.
  885. */
  886. static long
  887. mprotect_subpage (unsigned long address, int new_prot)
  888. {
  889. int old_prot;
  890. struct vm_area_struct *vma;
  891. if (new_prot == PROT_NONE)
  892. return 0; /* optimize case where nothing changes... */
  893. vma = find_vma(current->mm, address);
  894. old_prot = get_page_prot(vma, address);
  895. return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
  896. }
  897. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  898. asmlinkage long
  899. sys32_mprotect (unsigned int start, unsigned int len, int prot)
  900. {
  901. unsigned int end = start + len;
  902. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  903. long retval = 0;
  904. #endif
  905. prot = get_prot32(prot);
  906. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  907. return sys_mprotect(start, end - start, prot);
  908. #else
  909. if (OFFSET4K(start))
  910. return -EINVAL;
  911. end = IA32_PAGE_ALIGN(end);
  912. if (end < start)
  913. return -EINVAL;
  914. retval = ia32_compare_pp(&start, &end);
  915. if (retval < 0)
  916. return retval;
  917. mutex_lock(&ia32_mmap_mutex);
  918. {
  919. if (offset_in_page(start)) {
  920. /* start address is 4KB aligned but not page aligned. */
  921. retval = mprotect_subpage(PAGE_START(start), prot);
  922. if (retval < 0)
  923. goto out;
  924. start = PAGE_ALIGN(start);
  925. if (start >= end)
  926. goto out; /* retval is already zero... */
  927. }
  928. if (offset_in_page(end)) {
  929. /* end address is 4KB aligned but not page aligned. */
  930. retval = mprotect_subpage(PAGE_START(end), prot);
  931. if (retval < 0)
  932. goto out;
  933. end = PAGE_START(end);
  934. }
  935. retval = sys_mprotect(start, end - start, prot);
  936. }
  937. out:
  938. mutex_unlock(&ia32_mmap_mutex);
  939. return retval;
  940. #endif
  941. }
  942. asmlinkage long
  943. sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
  944. unsigned int flags, unsigned int new_addr)
  945. {
  946. long ret;
  947. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  948. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  949. #else
  950. unsigned int old_end, new_end;
  951. if (OFFSET4K(addr))
  952. return -EINVAL;
  953. old_len = IA32_PAGE_ALIGN(old_len);
  954. new_len = IA32_PAGE_ALIGN(new_len);
  955. old_end = addr + old_len;
  956. new_end = addr + new_len;
  957. if (!new_len)
  958. return -EINVAL;
  959. if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
  960. return -EINVAL;
  961. if (old_len >= new_len) {
  962. ret = sys32_munmap(addr + new_len, old_len - new_len);
  963. if (ret && old_len != new_len)
  964. return ret;
  965. ret = addr;
  966. if (!(flags & MREMAP_FIXED) || (new_addr == addr))
  967. return ret;
  968. old_len = new_len;
  969. }
  970. addr = PAGE_START(addr);
  971. old_len = PAGE_ALIGN(old_end) - addr;
  972. new_len = PAGE_ALIGN(new_end) - addr;
  973. mutex_lock(&ia32_mmap_mutex);
  974. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  975. mutex_unlock(&ia32_mmap_mutex);
  976. if ((ret >= 0) && (old_len < new_len)) {
  977. /* mremap expanded successfully */
  978. ia32_set_pp(old_end, new_end, flags);
  979. }
  980. #endif
  981. return ret;
  982. }
  983. asmlinkage long
  984. sys32_pipe (int __user *fd)
  985. {
  986. int retval;
  987. int fds[2];
  988. retval = do_pipe(fds);
  989. if (retval)
  990. goto out;
  991. if (copy_to_user(fd, fds, sizeof(fds)))
  992. retval = -EFAULT;
  993. out:
  994. return retval;
  995. }
  996. static inline long
  997. get_tv32 (struct timeval *o, struct compat_timeval __user *i)
  998. {
  999. return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
  1000. (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
  1001. }
  1002. static inline long
  1003. put_tv32 (struct compat_timeval __user *o, struct timeval *i)
  1004. {
  1005. return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
  1006. (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
  1007. }
  1008. asmlinkage unsigned long
  1009. sys32_alarm (unsigned int seconds)
  1010. {
  1011. struct itimerval it_new, it_old;
  1012. unsigned int oldalarm;
  1013. it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
  1014. it_new.it_value.tv_sec = seconds;
  1015. it_new.it_value.tv_usec = 0;
  1016. do_setitimer(ITIMER_REAL, &it_new, &it_old);
  1017. oldalarm = it_old.it_value.tv_sec;
  1018. /* ehhh.. We can't return 0 if we have an alarm pending.. */
  1019. /* And we'd better return too much than too little anyway */
  1020. if (it_old.it_value.tv_usec)
  1021. oldalarm++;
  1022. return oldalarm;
  1023. }
  1024. /* Translations due to time_t size differences. Which affects all
  1025. sorts of things, like timeval and itimerval. */
  1026. extern struct timezone sys_tz;
  1027. asmlinkage long
  1028. sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1029. {
  1030. if (tv) {
  1031. struct timeval ktv;
  1032. do_gettimeofday(&ktv);
  1033. if (put_tv32(tv, &ktv))
  1034. return -EFAULT;
  1035. }
  1036. if (tz) {
  1037. if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
  1038. return -EFAULT;
  1039. }
  1040. return 0;
  1041. }
  1042. asmlinkage long
  1043. sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1044. {
  1045. struct timeval ktv;
  1046. struct timespec kts;
  1047. struct timezone ktz;
  1048. if (tv) {
  1049. if (get_tv32(&ktv, tv))
  1050. return -EFAULT;
  1051. kts.tv_sec = ktv.tv_sec;
  1052. kts.tv_nsec = ktv.tv_usec * 1000;
  1053. }
  1054. if (tz) {
  1055. if (copy_from_user(&ktz, tz, sizeof(ktz)))
  1056. return -EFAULT;
  1057. }
  1058. return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
  1059. }
  1060. struct getdents32_callback {
  1061. struct compat_dirent __user *current_dir;
  1062. struct compat_dirent __user *previous;
  1063. int count;
  1064. int error;
  1065. };
  1066. struct readdir32_callback {
  1067. struct old_linux32_dirent __user * dirent;
  1068. int count;
  1069. };
  1070. static int
  1071. filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
  1072. unsigned int d_type)
  1073. {
  1074. struct compat_dirent __user * dirent;
  1075. struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
  1076. int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4);
  1077. buf->error = -EINVAL; /* only used if we fail.. */
  1078. if (reclen > buf->count)
  1079. return -EINVAL;
  1080. buf->error = -EFAULT; /* only used if we fail.. */
  1081. dirent = buf->previous;
  1082. if (dirent)
  1083. if (put_user(offset, &dirent->d_off))
  1084. return -EFAULT;
  1085. dirent = buf->current_dir;
  1086. buf->previous = dirent;
  1087. if (put_user(ino, &dirent->d_ino)
  1088. || put_user(reclen, &dirent->d_reclen)
  1089. || copy_to_user(dirent->d_name, name, namlen)
  1090. || put_user(0, dirent->d_name + namlen))
  1091. return -EFAULT;
  1092. dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen);
  1093. buf->current_dir = dirent;
  1094. buf->count -= reclen;
  1095. return 0;
  1096. }
  1097. asmlinkage long
  1098. sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count)
  1099. {
  1100. struct file * file;
  1101. struct compat_dirent __user * lastdirent;
  1102. struct getdents32_callback buf;
  1103. int error;
  1104. error = -EBADF;
  1105. file = fget(fd);
  1106. if (!file)
  1107. goto out;
  1108. buf.current_dir = dirent;
  1109. buf.previous = NULL;
  1110. buf.count = count;
  1111. buf.error = 0;
  1112. error = vfs_readdir(file, filldir32, &buf);
  1113. if (error < 0)
  1114. goto out_putf;
  1115. error = buf.error;
  1116. lastdirent = buf.previous;
  1117. if (lastdirent) {
  1118. error = -EINVAL;
  1119. if (put_user(file->f_pos, &lastdirent->d_off))
  1120. goto out_putf;
  1121. error = count - buf.count;
  1122. }
  1123. out_putf:
  1124. fput(file);
  1125. out:
  1126. return error;
  1127. }
  1128. static int
  1129. fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
  1130. unsigned int d_type)
  1131. {
  1132. struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
  1133. struct old_linux32_dirent __user * dirent;
  1134. if (buf->count)
  1135. return -EINVAL;
  1136. buf->count++;
  1137. dirent = buf->dirent;
  1138. if (put_user(ino, &dirent->d_ino)
  1139. || put_user(offset, &dirent->d_offset)
  1140. || put_user(namlen, &dirent->d_namlen)
  1141. || copy_to_user(dirent->d_name, name, namlen)
  1142. || put_user(0, dirent->d_name + namlen))
  1143. return -EFAULT;
  1144. return 0;
  1145. }
  1146. asmlinkage long
  1147. sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count)
  1148. {
  1149. int error;
  1150. struct file * file;
  1151. struct readdir32_callback buf;
  1152. error = -EBADF;
  1153. file = fget(fd);
  1154. if (!file)
  1155. goto out;
  1156. buf.count = 0;
  1157. buf.dirent = dirent;
  1158. error = vfs_readdir(file, fillonedir32, &buf);
  1159. if (error >= 0)
  1160. error = buf.count;
  1161. fput(file);
  1162. out:
  1163. return error;
  1164. }
  1165. struct sel_arg_struct {
  1166. unsigned int n;
  1167. unsigned int inp;
  1168. unsigned int outp;
  1169. unsigned int exp;
  1170. unsigned int tvp;
  1171. };
  1172. asmlinkage long
  1173. sys32_old_select (struct sel_arg_struct __user *arg)
  1174. {
  1175. struct sel_arg_struct a;
  1176. if (copy_from_user(&a, arg, sizeof(a)))
  1177. return -EFAULT;
  1178. return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
  1179. compat_ptr(a.exp), compat_ptr(a.tvp));
  1180. }
  1181. #define SEMOP 1
  1182. #define SEMGET 2
  1183. #define SEMCTL 3
  1184. #define SEMTIMEDOP 4
  1185. #define MSGSND 11
  1186. #define MSGRCV 12
  1187. #define MSGGET 13
  1188. #define MSGCTL 14
  1189. #define SHMAT 21
  1190. #define SHMDT 22
  1191. #define SHMGET 23
  1192. #define SHMCTL 24
  1193. asmlinkage long
  1194. sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
  1195. {
  1196. int version;
  1197. version = call >> 16; /* hack for backward compatibility */
  1198. call &= 0xffff;
  1199. switch (call) {
  1200. case SEMTIMEDOP:
  1201. if (fifth)
  1202. return compat_sys_semtimedop(first, compat_ptr(ptr),
  1203. second, compat_ptr(fifth));
  1204. /* else fall through for normal semop() */
  1205. case SEMOP:
  1206. /* struct sembuf is the same on 32 and 64bit :)) */
  1207. return sys_semtimedop(first, compat_ptr(ptr), second,
  1208. NULL);
  1209. case SEMGET:
  1210. return sys_semget(first, second, third);
  1211. case SEMCTL:
  1212. return compat_sys_semctl(first, second, third, compat_ptr(ptr));
  1213. case MSGSND:
  1214. return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
  1215. case MSGRCV:
  1216. return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
  1217. case MSGGET:
  1218. return sys_msgget((key_t) first, second);
  1219. case MSGCTL:
  1220. return compat_sys_msgctl(first, second, compat_ptr(ptr));
  1221. case SHMAT:
  1222. return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
  1223. break;
  1224. case SHMDT:
  1225. return sys_shmdt(compat_ptr(ptr));
  1226. case SHMGET:
  1227. return sys_shmget(first, (unsigned)second, third);
  1228. case SHMCTL:
  1229. return compat_sys_shmctl(first, second, compat_ptr(ptr));
  1230. default:
  1231. return -ENOSYS;
  1232. }
  1233. return -EINVAL;
  1234. }
  1235. asmlinkage long
  1236. compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
  1237. struct compat_rusage *ru);
  1238. asmlinkage long
  1239. sys32_waitpid (int pid, unsigned int *stat_addr, int options)
  1240. {
  1241. return compat_sys_wait4(pid, stat_addr, options, NULL);
  1242. }
  1243. static unsigned int
  1244. ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
  1245. {
  1246. size_t copied;
  1247. unsigned int ret;
  1248. copied = access_process_vm(child, addr, val, sizeof(*val), 0);
  1249. return (copied != sizeof(ret)) ? -EIO : 0;
  1250. }
  1251. static unsigned int
  1252. ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
  1253. {
  1254. if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
  1255. return -EIO;
  1256. return 0;
  1257. }
  1258. /*
  1259. * The order in which registers are stored in the ptrace regs structure
  1260. */
  1261. #define PT_EBX 0
  1262. #define PT_ECX 1
  1263. #define PT_EDX 2
  1264. #define PT_ESI 3
  1265. #define PT_EDI 4
  1266. #define PT_EBP 5
  1267. #define PT_EAX 6
  1268. #define PT_DS 7
  1269. #define PT_ES 8
  1270. #define PT_FS 9
  1271. #define PT_GS 10
  1272. #define PT_ORIG_EAX 11
  1273. #define PT_EIP 12
  1274. #define PT_CS 13
  1275. #define PT_EFL 14
  1276. #define PT_UESP 15
  1277. #define PT_SS 16
  1278. static unsigned int
  1279. getreg (struct task_struct *child, int regno)
  1280. {
  1281. struct pt_regs *child_regs;
  1282. child_regs = task_pt_regs(child);
  1283. switch (regno / sizeof(int)) {
  1284. case PT_EBX: return child_regs->r11;
  1285. case PT_ECX: return child_regs->r9;
  1286. case PT_EDX: return child_regs->r10;
  1287. case PT_ESI: return child_regs->r14;
  1288. case PT_EDI: return child_regs->r15;
  1289. case PT_EBP: return child_regs->r13;
  1290. case PT_EAX: return child_regs->r8;
  1291. case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
  1292. case PT_EIP: return child_regs->cr_iip;
  1293. case PT_UESP: return child_regs->r12;
  1294. case PT_EFL: return child->thread.eflag;
  1295. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1296. return __USER_DS;
  1297. case PT_CS: return __USER_CS;
  1298. default:
  1299. printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
  1300. break;
  1301. }
  1302. return 0;
  1303. }
  1304. static void
  1305. putreg (struct task_struct *child, int regno, unsigned int value)
  1306. {
  1307. struct pt_regs *child_regs;
  1308. child_regs = task_pt_regs(child);
  1309. switch (regno / sizeof(int)) {
  1310. case PT_EBX: child_regs->r11 = value; break;
  1311. case PT_ECX: child_regs->r9 = value; break;
  1312. case PT_EDX: child_regs->r10 = value; break;
  1313. case PT_ESI: child_regs->r14 = value; break;
  1314. case PT_EDI: child_regs->r15 = value; break;
  1315. case PT_EBP: child_regs->r13 = value; break;
  1316. case PT_EAX: child_regs->r8 = value; break;
  1317. case PT_ORIG_EAX: child_regs->r1 = value; break;
  1318. case PT_EIP: child_regs->cr_iip = value; break;
  1319. case PT_UESP: child_regs->r12 = value; break;
  1320. case PT_EFL: child->thread.eflag = value; break;
  1321. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1322. if (value != __USER_DS)
  1323. printk(KERN_ERR
  1324. "ia32.putreg: attempt to set invalid segment register %d = %x\n",
  1325. regno, value);
  1326. break;
  1327. case PT_CS:
  1328. if (value != __USER_CS)
  1329. printk(KERN_ERR
  1330. "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
  1331. regno, value);
  1332. break;
  1333. default:
  1334. printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
  1335. break;
  1336. }
  1337. }
  1338. static void
  1339. put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1340. struct switch_stack *swp, int tos)
  1341. {
  1342. struct _fpreg_ia32 *f;
  1343. char buf[32];
  1344. f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
  1345. if ((regno += tos) >= 8)
  1346. regno -= 8;
  1347. switch (regno) {
  1348. case 0:
  1349. ia64f2ia32f(f, &ptp->f8);
  1350. break;
  1351. case 1:
  1352. ia64f2ia32f(f, &ptp->f9);
  1353. break;
  1354. case 2:
  1355. ia64f2ia32f(f, &ptp->f10);
  1356. break;
  1357. case 3:
  1358. ia64f2ia32f(f, &ptp->f11);
  1359. break;
  1360. case 4:
  1361. case 5:
  1362. case 6:
  1363. case 7:
  1364. ia64f2ia32f(f, &swp->f12 + (regno - 4));
  1365. break;
  1366. }
  1367. copy_to_user(reg, f, sizeof(*reg));
  1368. }
  1369. static void
  1370. get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1371. struct switch_stack *swp, int tos)
  1372. {
  1373. if ((regno += tos) >= 8)
  1374. regno -= 8;
  1375. switch (regno) {
  1376. case 0:
  1377. copy_from_user(&ptp->f8, reg, sizeof(*reg));
  1378. break;
  1379. case 1:
  1380. copy_from_user(&ptp->f9, reg, sizeof(*reg));
  1381. break;
  1382. case 2:
  1383. copy_from_user(&ptp->f10, reg, sizeof(*reg));
  1384. break;
  1385. case 3:
  1386. copy_from_user(&ptp->f11, reg, sizeof(*reg));
  1387. break;
  1388. case 4:
  1389. case 5:
  1390. case 6:
  1391. case 7:
  1392. copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
  1393. break;
  1394. }
  1395. return;
  1396. }
  1397. int
  1398. save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1399. {
  1400. struct switch_stack *swp;
  1401. struct pt_regs *ptp;
  1402. int i, tos;
  1403. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1404. return -EFAULT;
  1405. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1406. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1407. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1408. __put_user(tsk->thread.fir, &save->fip);
  1409. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1410. __put_user(tsk->thread.fdr, &save->foo);
  1411. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1412. /*
  1413. * Stack frames start with 16-bytes of temp space
  1414. */
  1415. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1416. ptp = task_pt_regs(tsk);
  1417. tos = (tsk->thread.fsr >> 11) & 7;
  1418. for (i = 0; i < 8; i++)
  1419. put_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1420. return 0;
  1421. }
  1422. static int
  1423. restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1424. {
  1425. struct switch_stack *swp;
  1426. struct pt_regs *ptp;
  1427. int i, tos;
  1428. unsigned int fsrlo, fsrhi, num32;
  1429. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1430. return(-EFAULT);
  1431. __get_user(num32, (unsigned int __user *)&save->cwd);
  1432. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1433. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1434. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1435. num32 = (fsrhi << 16) | fsrlo;
  1436. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1437. __get_user(num32, (unsigned int __user *)&save->fip);
  1438. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1439. __get_user(num32, (unsigned int __user *)&save->foo);
  1440. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1441. /*
  1442. * Stack frames start with 16-bytes of temp space
  1443. */
  1444. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1445. ptp = task_pt_regs(tsk);
  1446. tos = (tsk->thread.fsr >> 11) & 7;
  1447. for (i = 0; i < 8; i++)
  1448. get_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1449. return 0;
  1450. }
  1451. int
  1452. save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1453. {
  1454. struct switch_stack *swp;
  1455. struct pt_regs *ptp;
  1456. int i, tos;
  1457. unsigned long mxcsr=0;
  1458. unsigned long num128[2];
  1459. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1460. return -EFAULT;
  1461. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1462. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1463. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1464. __put_user(tsk->thread.fir, &save->fip);
  1465. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1466. __put_user(tsk->thread.fdr, &save->foo);
  1467. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1468. /*
  1469. * Stack frames start with 16-bytes of temp space
  1470. */
  1471. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1472. ptp = task_pt_regs(tsk);
  1473. tos = (tsk->thread.fsr >> 11) & 7;
  1474. for (i = 0; i < 8; i++)
  1475. put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1476. mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
  1477. __put_user(mxcsr & 0xffff, &save->mxcsr);
  1478. for (i = 0; i < 8; i++) {
  1479. memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
  1480. memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
  1481. copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
  1482. }
  1483. return 0;
  1484. }
  1485. static int
  1486. restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1487. {
  1488. struct switch_stack *swp;
  1489. struct pt_regs *ptp;
  1490. int i, tos;
  1491. unsigned int fsrlo, fsrhi, num32;
  1492. int mxcsr;
  1493. unsigned long num64;
  1494. unsigned long num128[2];
  1495. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1496. return(-EFAULT);
  1497. __get_user(num32, (unsigned int __user *)&save->cwd);
  1498. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1499. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1500. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1501. num32 = (fsrhi << 16) | fsrlo;
  1502. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1503. __get_user(num32, (unsigned int __user *)&save->fip);
  1504. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1505. __get_user(num32, (unsigned int __user *)&save->foo);
  1506. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1507. /*
  1508. * Stack frames start with 16-bytes of temp space
  1509. */
  1510. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1511. ptp = task_pt_regs(tsk);
  1512. tos = (tsk->thread.fsr >> 11) & 7;
  1513. for (i = 0; i < 8; i++)
  1514. get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1515. __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
  1516. num64 = mxcsr & 0xff10;
  1517. tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
  1518. num64 = mxcsr & 0x3f;
  1519. tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
  1520. for (i = 0; i < 8; i++) {
  1521. copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
  1522. memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
  1523. memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
  1524. }
  1525. return 0;
  1526. }
  1527. asmlinkage long
  1528. sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
  1529. {
  1530. struct task_struct *child;
  1531. unsigned int value, tmp;
  1532. long i, ret;
  1533. lock_kernel();
  1534. if (request == PTRACE_TRACEME) {
  1535. ret = ptrace_traceme();
  1536. goto out;
  1537. }
  1538. child = ptrace_get_task_struct(pid);
  1539. if (IS_ERR(child)) {
  1540. ret = PTR_ERR(child);
  1541. goto out;
  1542. }
  1543. if (request == PTRACE_ATTACH) {
  1544. ret = sys_ptrace(request, pid, addr, data);
  1545. goto out_tsk;
  1546. }
  1547. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  1548. if (ret < 0)
  1549. goto out_tsk;
  1550. switch (request) {
  1551. case PTRACE_PEEKTEXT:
  1552. case PTRACE_PEEKDATA: /* read word at location addr */
  1553. ret = ia32_peek(child, addr, &value);
  1554. if (ret == 0)
  1555. ret = put_user(value, (unsigned int __user *) compat_ptr(data));
  1556. else
  1557. ret = -EIO;
  1558. goto out_tsk;
  1559. case PTRACE_POKETEXT:
  1560. case PTRACE_POKEDATA: /* write the word at location addr */
  1561. ret = ia32_poke(child, addr, data);
  1562. goto out_tsk;
  1563. case PTRACE_PEEKUSR: /* read word at addr in USER area */
  1564. ret = -EIO;
  1565. if ((addr & 3) || addr > 17*sizeof(int))
  1566. break;
  1567. tmp = getreg(child, addr);
  1568. if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
  1569. ret = 0;
  1570. break;
  1571. case PTRACE_POKEUSR: /* write word at addr in USER area */
  1572. ret = -EIO;
  1573. if ((addr & 3) || addr > 17*sizeof(int))
  1574. break;
  1575. putreg(child, addr, data);
  1576. ret = 0;
  1577. break;
  1578. case IA32_PTRACE_GETREGS:
  1579. if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
  1580. ret = -EIO;
  1581. break;
  1582. }
  1583. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1584. put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
  1585. data += sizeof(int);
  1586. }
  1587. ret = 0;
  1588. break;
  1589. case IA32_PTRACE_SETREGS:
  1590. if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
  1591. ret = -EIO;
  1592. break;
  1593. }
  1594. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1595. get_user(tmp, (unsigned int __user *) compat_ptr(data));
  1596. putreg(child, i, tmp);
  1597. data += sizeof(int);
  1598. }
  1599. ret = 0;
  1600. break;
  1601. case IA32_PTRACE_GETFPREGS:
  1602. ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1603. compat_ptr(data));
  1604. break;
  1605. case IA32_PTRACE_GETFPXREGS:
  1606. ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1607. compat_ptr(data));
  1608. break;
  1609. case IA32_PTRACE_SETFPREGS:
  1610. ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1611. compat_ptr(data));
  1612. break;
  1613. case IA32_PTRACE_SETFPXREGS:
  1614. ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1615. compat_ptr(data));
  1616. break;
  1617. case PTRACE_GETEVENTMSG:
  1618. ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
  1619. break;
  1620. case PTRACE_SYSCALL: /* continue, stop after next syscall */
  1621. case PTRACE_CONT: /* restart after signal. */
  1622. case PTRACE_KILL:
  1623. case PTRACE_SINGLESTEP: /* execute chile for one instruction */
  1624. case PTRACE_DETACH: /* detach a process */
  1625. ret = sys_ptrace(request, pid, addr, data);
  1626. break;
  1627. default:
  1628. ret = ptrace_request(child, request, addr, data);
  1629. break;
  1630. }
  1631. out_tsk:
  1632. put_task_struct(child);
  1633. out:
  1634. unlock_kernel();
  1635. return ret;
  1636. }
  1637. typedef struct {
  1638. unsigned int ss_sp;
  1639. unsigned int ss_flags;
  1640. unsigned int ss_size;
  1641. } ia32_stack_t;
  1642. asmlinkage long
  1643. sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
  1644. long arg2, long arg3, long arg4, long arg5, long arg6,
  1645. long arg7, struct pt_regs pt)
  1646. {
  1647. stack_t uss, uoss;
  1648. ia32_stack_t buf32;
  1649. int ret;
  1650. mm_segment_t old_fs = get_fs();
  1651. if (uss32) {
  1652. if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
  1653. return -EFAULT;
  1654. uss.ss_sp = (void __user *) (long) buf32.ss_sp;
  1655. uss.ss_flags = buf32.ss_flags;
  1656. /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
  1657. check and set it to the user requested value later */
  1658. if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
  1659. ret = -ENOMEM;
  1660. goto out;
  1661. }
  1662. uss.ss_size = MINSIGSTKSZ;
  1663. }
  1664. set_fs(KERNEL_DS);
  1665. ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
  1666. (stack_t __user *) &uoss, pt.r12);
  1667. current->sas_ss_size = buf32.ss_size;
  1668. set_fs(old_fs);
  1669. out:
  1670. if (ret < 0)
  1671. return(ret);
  1672. if (uoss32) {
  1673. buf32.ss_sp = (long __user) uoss.ss_sp;
  1674. buf32.ss_flags = uoss.ss_flags;
  1675. buf32.ss_size = uoss.ss_size;
  1676. if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
  1677. return -EFAULT;
  1678. }
  1679. return ret;
  1680. }
  1681. asmlinkage int
  1682. sys32_pause (void)
  1683. {
  1684. current->state = TASK_INTERRUPTIBLE;
  1685. schedule();
  1686. return -ERESTARTNOHAND;
  1687. }
  1688. asmlinkage int
  1689. sys32_msync (unsigned int start, unsigned int len, int flags)
  1690. {
  1691. unsigned int addr;
  1692. if (OFFSET4K(start))
  1693. return -EINVAL;
  1694. addr = PAGE_START(start);
  1695. return sys_msync(addr, len + (start - addr), flags);
  1696. }
  1697. struct sysctl32 {
  1698. unsigned int name;
  1699. int nlen;
  1700. unsigned int oldval;
  1701. unsigned int oldlenp;
  1702. unsigned int newval;
  1703. unsigned int newlen;
  1704. unsigned int __unused[4];
  1705. };
  1706. #ifdef CONFIG_SYSCTL
  1707. asmlinkage long
  1708. sys32_sysctl (struct sysctl32 __user *args)
  1709. {
  1710. struct sysctl32 a32;
  1711. mm_segment_t old_fs = get_fs ();
  1712. void __user *oldvalp, *newvalp;
  1713. size_t oldlen;
  1714. int __user *namep;
  1715. long ret;
  1716. if (copy_from_user(&a32, args, sizeof(a32)))
  1717. return -EFAULT;
  1718. /*
  1719. * We need to pre-validate these because we have to disable address checking
  1720. * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
  1721. * user specifying bad addresses here. Well, since we're dealing with 32 bit
  1722. * addresses, we KNOW that access_ok() will always succeed, so this is an
  1723. * expensive NOP, but so what...
  1724. */
  1725. namep = (int __user *) compat_ptr(a32.name);
  1726. oldvalp = compat_ptr(a32.oldval);
  1727. newvalp = compat_ptr(a32.newval);
  1728. if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1729. || !access_ok(VERIFY_WRITE, namep, 0)
  1730. || !access_ok(VERIFY_WRITE, oldvalp, 0)
  1731. || !access_ok(VERIFY_WRITE, newvalp, 0))
  1732. return -EFAULT;
  1733. set_fs(KERNEL_DS);
  1734. lock_kernel();
  1735. ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
  1736. newvalp, (size_t) a32.newlen);
  1737. unlock_kernel();
  1738. set_fs(old_fs);
  1739. if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1740. return -EFAULT;
  1741. return ret;
  1742. }
  1743. #endif
  1744. asmlinkage long
  1745. sys32_newuname (struct new_utsname __user *name)
  1746. {
  1747. int ret = sys_newuname(name);
  1748. if (!ret)
  1749. if (copy_to_user(name->machine, "i686\0\0\0", 8))
  1750. ret = -EFAULT;
  1751. return ret;
  1752. }
  1753. asmlinkage long
  1754. sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
  1755. {
  1756. uid_t a, b, c;
  1757. int ret;
  1758. mm_segment_t old_fs = get_fs();
  1759. set_fs(KERNEL_DS);
  1760. ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
  1761. set_fs(old_fs);
  1762. if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
  1763. return -EFAULT;
  1764. return ret;
  1765. }
  1766. asmlinkage long
  1767. sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
  1768. {
  1769. gid_t a, b, c;
  1770. int ret;
  1771. mm_segment_t old_fs = get_fs();
  1772. set_fs(KERNEL_DS);
  1773. ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
  1774. set_fs(old_fs);
  1775. if (ret)
  1776. return ret;
  1777. return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
  1778. }
  1779. asmlinkage long
  1780. sys32_lseek (unsigned int fd, int offset, unsigned int whence)
  1781. {
  1782. /* Sign-extension of "offset" is important here... */
  1783. return sys_lseek(fd, offset, whence);
  1784. }
  1785. static int
  1786. groups16_to_user(short __user *grouplist, struct group_info *group_info)
  1787. {
  1788. int i;
  1789. short group;
  1790. for (i = 0; i < group_info->ngroups; i++) {
  1791. group = (short)GROUP_AT(group_info, i);
  1792. if (put_user(group, grouplist+i))
  1793. return -EFAULT;
  1794. }
  1795. return 0;
  1796. }
  1797. static int
  1798. groups16_from_user(struct group_info *group_info, short __user *grouplist)
  1799. {
  1800. int i;
  1801. short group;
  1802. for (i = 0; i < group_info->ngroups; i++) {
  1803. if (get_user(group, grouplist+i))
  1804. return -EFAULT;
  1805. GROUP_AT(group_info, i) = (gid_t)group;
  1806. }
  1807. return 0;
  1808. }
  1809. asmlinkage long
  1810. sys32_getgroups16 (int gidsetsize, short __user *grouplist)
  1811. {
  1812. int i;
  1813. if (gidsetsize < 0)
  1814. return -EINVAL;
  1815. get_group_info(current->group_info);
  1816. i = current->group_info->ngroups;
  1817. if (gidsetsize) {
  1818. if (i > gidsetsize) {
  1819. i = -EINVAL;
  1820. goto out;
  1821. }
  1822. if (groups16_to_user(grouplist, current->group_info)) {
  1823. i = -EFAULT;
  1824. goto out;
  1825. }
  1826. }
  1827. out:
  1828. put_group_info(current->group_info);
  1829. return i;
  1830. }
  1831. asmlinkage long
  1832. sys32_setgroups16 (int gidsetsize, short __user *grouplist)
  1833. {
  1834. struct group_info *group_info;
  1835. int retval;
  1836. if (!capable(CAP_SETGID))
  1837. return -EPERM;
  1838. if ((unsigned)gidsetsize > NGROUPS_MAX)
  1839. return -EINVAL;
  1840. group_info = groups_alloc(gidsetsize);
  1841. if (!group_info)
  1842. return -ENOMEM;
  1843. retval = groups16_from_user(group_info, grouplist);
  1844. if (retval) {
  1845. put_group_info(group_info);
  1846. return retval;
  1847. }
  1848. retval = set_current_groups(group_info);
  1849. put_group_info(group_info);
  1850. return retval;
  1851. }
  1852. asmlinkage long
  1853. sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
  1854. {
  1855. return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
  1856. }
  1857. asmlinkage long
  1858. sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
  1859. {
  1860. return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
  1861. }
  1862. static int
  1863. putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
  1864. {
  1865. int err;
  1866. u64 hdev;
  1867. if (clear_user(ubuf, sizeof(*ubuf)))
  1868. return -EFAULT;
  1869. hdev = huge_encode_dev(kbuf->dev);
  1870. err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
  1871. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
  1872. err |= __put_user(kbuf->ino, &ubuf->__st_ino);
  1873. err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
  1874. err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
  1875. err |= __put_user(kbuf->mode, &ubuf->st_mode);
  1876. err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
  1877. err |= __put_user(kbuf->uid, &ubuf->st_uid);
  1878. err |= __put_user(kbuf->gid, &ubuf->st_gid);
  1879. hdev = huge_encode_dev(kbuf->rdev);
  1880. err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
  1881. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
  1882. err |= __put_user(kbuf->size, &ubuf->st_size_lo);
  1883. err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
  1884. err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
  1885. err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
  1886. err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
  1887. err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  1888. err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
  1889. err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  1890. err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
  1891. err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
  1892. return err;
  1893. }
  1894. asmlinkage long
  1895. sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
  1896. {
  1897. struct kstat s;
  1898. long ret = vfs_stat(filename, &s);
  1899. if (!ret)
  1900. ret = putstat64(statbuf, &s);
  1901. return ret;
  1902. }
  1903. asmlinkage long
  1904. sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
  1905. {
  1906. struct kstat s;
  1907. long ret = vfs_lstat(filename, &s);
  1908. if (!ret)
  1909. ret = putstat64(statbuf, &s);
  1910. return ret;
  1911. }
  1912. asmlinkage long
  1913. sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
  1914. {
  1915. struct kstat s;
  1916. long ret = vfs_fstat(fd, &s);
  1917. if (!ret)
  1918. ret = putstat64(statbuf, &s);
  1919. return ret;
  1920. }
  1921. struct sysinfo32 {
  1922. s32 uptime;
  1923. u32 loads[3];
  1924. u32 totalram;
  1925. u32 freeram;
  1926. u32 sharedram;
  1927. u32 bufferram;
  1928. u32 totalswap;
  1929. u32 freeswap;
  1930. u16 procs;
  1931. u16 pad;
  1932. u32 totalhigh;
  1933. u32 freehigh;
  1934. u32 mem_unit;
  1935. char _f[8];
  1936. };
  1937. asmlinkage long
  1938. sys32_sysinfo (struct sysinfo32 __user *info)
  1939. {
  1940. struct sysinfo s;
  1941. long ret, err;
  1942. int bitcount = 0;
  1943. mm_segment_t old_fs = get_fs();
  1944. set_fs(KERNEL_DS);
  1945. ret = sys_sysinfo((struct sysinfo __user *) &s);
  1946. set_fs(old_fs);
  1947. /* Check to see if any memory value is too large for 32-bit and
  1948. * scale down if needed.
  1949. */
  1950. if ((s.totalram >> 32) || (s.totalswap >> 32)) {
  1951. while (s.mem_unit < PAGE_SIZE) {
  1952. s.mem_unit <<= 1;
  1953. bitcount++;
  1954. }
  1955. s.totalram >>= bitcount;
  1956. s.freeram >>= bitcount;
  1957. s.sharedram >>= bitcount;
  1958. s.bufferram >>= bitcount;
  1959. s.totalswap >>= bitcount;
  1960. s.freeswap >>= bitcount;
  1961. s.totalhigh >>= bitcount;
  1962. s.freehigh >>= bitcount;
  1963. }
  1964. if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
  1965. return -EFAULT;
  1966. err = __put_user(s.uptime, &info->uptime);
  1967. err |= __put_user(s.loads[0], &info->loads[0]);
  1968. err |= __put_user(s.loads[1], &info->loads[1]);
  1969. err |= __put_user(s.loads[2], &info->loads[2]);
  1970. err |= __put_user(s.totalram, &info->totalram);
  1971. err |= __put_user(s.freeram, &info->freeram);
  1972. err |= __put_user(s.sharedram, &info->sharedram);
  1973. err |= __put_user(s.bufferram, &info->bufferram);
  1974. err |= __put_user(s.totalswap, &info->totalswap);
  1975. err |= __put_user(s.freeswap, &info->freeswap);
  1976. err |= __put_user(s.procs, &info->procs);
  1977. err |= __put_user (s.totalhigh, &info->totalhigh);
  1978. err |= __put_user (s.freehigh, &info->freehigh);
  1979. err |= __put_user (s.mem_unit, &info->mem_unit);
  1980. if (err)
  1981. return -EFAULT;
  1982. return ret;
  1983. }
  1984. asmlinkage long
  1985. sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
  1986. {
  1987. mm_segment_t old_fs = get_fs();
  1988. struct timespec t;
  1989. long ret;
  1990. set_fs(KERNEL_DS);
  1991. ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
  1992. set_fs(old_fs);
  1993. if (put_compat_timespec(&t, interval))
  1994. return -EFAULT;
  1995. return ret;
  1996. }
  1997. asmlinkage long
  1998. sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  1999. {
  2000. return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  2001. }
  2002. asmlinkage long
  2003. sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  2004. {
  2005. return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  2006. }
  2007. asmlinkage long
  2008. sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
  2009. {
  2010. mm_segment_t old_fs = get_fs();
  2011. long ret;
  2012. off_t of;
  2013. if (offset && get_user(of, offset))
  2014. return -EFAULT;
  2015. set_fs(KERNEL_DS);
  2016. ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
  2017. set_fs(old_fs);
  2018. if (offset && put_user(of, offset))
  2019. return -EFAULT;
  2020. return ret;
  2021. }
  2022. asmlinkage long
  2023. sys32_personality (unsigned int personality)
  2024. {
  2025. long ret;
  2026. if (current->personality == PER_LINUX32 && personality == PER_LINUX)
  2027. personality = PER_LINUX32;
  2028. ret = sys_personality(personality);
  2029. if (ret == PER_LINUX32)
  2030. ret = PER_LINUX;
  2031. return ret;
  2032. }
  2033. asmlinkage unsigned long
  2034. sys32_brk (unsigned int brk)
  2035. {
  2036. unsigned long ret, obrk;
  2037. struct mm_struct *mm = current->mm;
  2038. obrk = mm->brk;
  2039. ret = sys_brk(brk);
  2040. if (ret < obrk)
  2041. clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
  2042. return ret;
  2043. }
  2044. /* Structure for ia32 emulation on ia64 */
  2045. struct epoll_event32
  2046. {
  2047. u32 events;
  2048. u32 data[2];
  2049. };
  2050. asmlinkage long
  2051. sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
  2052. {
  2053. mm_segment_t old_fs = get_fs();
  2054. struct epoll_event event64;
  2055. int error;
  2056. u32 data_halfword;
  2057. if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
  2058. return -EFAULT;
  2059. __get_user(event64.events, &event->events);
  2060. __get_user(data_halfword, &event->data[0]);
  2061. event64.data = data_halfword;
  2062. __get_user(data_halfword, &event->data[1]);
  2063. event64.data |= (u64)data_halfword << 32;
  2064. set_fs(KERNEL_DS);
  2065. error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
  2066. set_fs(old_fs);
  2067. return error;
  2068. }
  2069. asmlinkage long
  2070. sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
  2071. int timeout)
  2072. {
  2073. struct epoll_event *events64 = NULL;
  2074. mm_segment_t old_fs = get_fs();
  2075. int numevents, size;
  2076. int evt_idx;
  2077. int do_free_pages = 0;
  2078. if (maxevents <= 0) {
  2079. return -EINVAL;
  2080. }
  2081. /* Verify that the area passed by the user is writeable */
  2082. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
  2083. return -EFAULT;
  2084. /*
  2085. * Allocate space for the intermediate copy. If the space needed
  2086. * is large enough to cause kmalloc to fail, then try again with
  2087. * __get_free_pages.
  2088. */
  2089. size = maxevents * sizeof(struct epoll_event);
  2090. events64 = kmalloc(size, GFP_KERNEL);
  2091. if (events64 == NULL) {
  2092. events64 = (struct epoll_event *)
  2093. __get_free_pages(GFP_KERNEL, get_order(size));
  2094. if (events64 == NULL)
  2095. return -ENOMEM;
  2096. do_free_pages = 1;
  2097. }
  2098. /* Do the system call */
  2099. set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
  2100. numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
  2101. maxevents, timeout);
  2102. set_fs(old_fs);
  2103. /* Don't modify userspace memory if we're returning an error */
  2104. if (numevents > 0) {
  2105. /* Translate the 64-bit structures back into the 32-bit
  2106. structures */
  2107. for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
  2108. __put_user(events64[evt_idx].events,
  2109. &events[evt_idx].events);
  2110. __put_user((u32)events64[evt_idx].data,
  2111. &events[evt_idx].data[0]);
  2112. __put_user((u32)(events64[evt_idx].data >> 32),
  2113. &events[evt_idx].data[1]);
  2114. }
  2115. }
  2116. if (do_free_pages)
  2117. free_pages((unsigned long) events64, get_order(size));
  2118. else
  2119. kfree(events64);
  2120. return numevents;
  2121. }
  2122. /*
  2123. * Get a yet unused TLS descriptor index.
  2124. */
  2125. static int
  2126. get_free_idx (void)
  2127. {
  2128. struct thread_struct *t = &current->thread;
  2129. int idx;
  2130. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  2131. if (desc_empty(t->tls_array + idx))
  2132. return idx + GDT_ENTRY_TLS_MIN;
  2133. return -ESRCH;
  2134. }
  2135. /*
  2136. * Set a given TLS descriptor:
  2137. */
  2138. asmlinkage int
  2139. sys32_set_thread_area (struct ia32_user_desc __user *u_info)
  2140. {
  2141. struct thread_struct *t = &current->thread;
  2142. struct ia32_user_desc info;
  2143. struct desc_struct *desc;
  2144. int cpu, idx;
  2145. if (copy_from_user(&info, u_info, sizeof(info)))
  2146. return -EFAULT;
  2147. idx = info.entry_number;
  2148. /*
  2149. * index -1 means the kernel should try to find and allocate an empty descriptor:
  2150. */
  2151. if (idx == -1) {
  2152. idx = get_free_idx();
  2153. if (idx < 0)
  2154. return idx;
  2155. if (put_user(idx, &u_info->entry_number))
  2156. return -EFAULT;
  2157. }
  2158. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2159. return -EINVAL;
  2160. desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
  2161. cpu = smp_processor_id();
  2162. if (LDT_empty(&info)) {
  2163. desc->a = 0;
  2164. desc->b = 0;
  2165. } else {
  2166. desc->a = LDT_entry_a(&info);
  2167. desc->b = LDT_entry_b(&info);
  2168. }
  2169. load_TLS(t, cpu);
  2170. return 0;
  2171. }
  2172. /*
  2173. * Get the current Thread-Local Storage area:
  2174. */
  2175. #define GET_BASE(desc) ( \
  2176. (((desc)->a >> 16) & 0x0000ffff) | \
  2177. (((desc)->b << 16) & 0x00ff0000) | \
  2178. ( (desc)->b & 0xff000000) )
  2179. #define GET_LIMIT(desc) ( \
  2180. ((desc)->a & 0x0ffff) | \
  2181. ((desc)->b & 0xf0000) )
  2182. #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
  2183. #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
  2184. #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
  2185. #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
  2186. #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
  2187. #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
  2188. asmlinkage int
  2189. sys32_get_thread_area (struct ia32_user_desc __user *u_info)
  2190. {
  2191. struct ia32_user_desc info;
  2192. struct desc_struct *desc;
  2193. int idx;
  2194. if (get_user(idx, &u_info->entry_number))
  2195. return -EFAULT;
  2196. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2197. return -EINVAL;
  2198. desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  2199. info.entry_number = idx;
  2200. info.base_addr = GET_BASE(desc);
  2201. info.limit = GET_LIMIT(desc);
  2202. info.seg_32bit = GET_32BIT(desc);
  2203. info.contents = GET_CONTENTS(desc);
  2204. info.read_exec_only = !GET_WRITABLE(desc);
  2205. info.limit_in_pages = GET_LIMIT_PAGES(desc);
  2206. info.seg_not_present = !GET_PRESENT(desc);
  2207. info.useable = GET_USEABLE(desc);
  2208. if (copy_to_user(u_info, &info, sizeof(info)))
  2209. return -EFAULT;
  2210. return 0;
  2211. }
  2212. long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
  2213. __u32 len_low, __u32 len_high, int advice)
  2214. {
  2215. return sys_fadvise64_64(fd,
  2216. (((u64)offset_high)<<32) | offset_low,
  2217. (((u64)len_high)<<32) | len_low,
  2218. advice);
  2219. }
  2220. #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
  2221. asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
  2222. {
  2223. uid_t sruid, seuid;
  2224. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2225. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2226. return sys_setreuid(sruid, seuid);
  2227. }
  2228. asmlinkage long
  2229. sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
  2230. compat_uid_t suid)
  2231. {
  2232. uid_t sruid, seuid, ssuid;
  2233. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2234. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2235. ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
  2236. return sys_setresuid(sruid, seuid, ssuid);
  2237. }
  2238. asmlinkage long
  2239. sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
  2240. {
  2241. gid_t srgid, segid;
  2242. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2243. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2244. return sys_setregid(srgid, segid);
  2245. }
  2246. asmlinkage long
  2247. sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
  2248. compat_gid_t sgid)
  2249. {
  2250. gid_t srgid, segid, ssgid;
  2251. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2252. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2253. ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
  2254. return sys_setresgid(srgid, segid, ssgid);
  2255. }
  2256. /* Handle adjtimex compatibility. */
  2257. struct timex32 {
  2258. u32 modes;
  2259. s32 offset, freq, maxerror, esterror;
  2260. s32 status, constant, precision, tolerance;
  2261. struct compat_timeval time;
  2262. s32 tick;
  2263. s32 ppsfreq, jitter, shift, stabil;
  2264. s32 jitcnt, calcnt, errcnt, stbcnt;
  2265. s32 :32; s32 :32; s32 :32; s32 :32;
  2266. s32 :32; s32 :32; s32 :32; s32 :32;
  2267. s32 :32; s32 :32; s32 :32; s32 :32;
  2268. };
  2269. extern int do_adjtimex(struct timex *);
  2270. asmlinkage long
  2271. sys32_adjtimex(struct timex32 *utp)
  2272. {
  2273. struct timex txc;
  2274. int ret;
  2275. memset(&txc, 0, sizeof(struct timex));
  2276. if(get_user(txc.modes, &utp->modes) ||
  2277. __get_user(txc.offset, &utp->offset) ||
  2278. __get_user(txc.freq, &utp->freq) ||
  2279. __get_user(txc.maxerror, &utp->maxerror) ||
  2280. __get_user(txc.esterror, &utp->esterror) ||
  2281. __get_user(txc.status, &utp->status) ||
  2282. __get_user(txc.constant, &utp->constant) ||
  2283. __get_user(txc.precision, &utp->precision) ||
  2284. __get_user(txc.tolerance, &utp->tolerance) ||
  2285. __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
  2286. __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
  2287. __get_user(txc.tick, &utp->tick) ||
  2288. __get_user(txc.ppsfreq, &utp->ppsfreq) ||
  2289. __get_user(txc.jitter, &utp->jitter) ||
  2290. __get_user(txc.shift, &utp->shift) ||
  2291. __get_user(txc.stabil, &utp->stabil) ||
  2292. __get_user(txc.jitcnt, &utp->jitcnt) ||
  2293. __get_user(txc.calcnt, &utp->calcnt) ||
  2294. __get_user(txc.errcnt, &utp->errcnt) ||
  2295. __get_user(txc.stbcnt, &utp->stbcnt))
  2296. return -EFAULT;
  2297. ret = do_adjtimex(&txc);
  2298. if(put_user(txc.modes, &utp->modes) ||
  2299. __put_user(txc.offset, &utp->offset) ||
  2300. __put_user(txc.freq, &utp->freq) ||
  2301. __put_user(txc.maxerror, &utp->maxerror) ||
  2302. __put_user(txc.esterror, &utp->esterror) ||
  2303. __put_user(txc.status, &utp->status) ||
  2304. __put_user(txc.constant, &utp->constant) ||
  2305. __put_user(txc.precision, &utp->precision) ||
  2306. __put_user(txc.tolerance, &utp->tolerance) ||
  2307. __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
  2308. __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
  2309. __put_user(txc.tick, &utp->tick) ||
  2310. __put_user(txc.ppsfreq, &utp->ppsfreq) ||
  2311. __put_user(txc.jitter, &utp->jitter) ||
  2312. __put_user(txc.shift, &utp->shift) ||
  2313. __put_user(txc.stabil, &utp->stabil) ||
  2314. __put_user(txc.jitcnt, &utp->jitcnt) ||
  2315. __put_user(txc.calcnt, &utp->calcnt) ||
  2316. __put_user(txc.errcnt, &utp->errcnt) ||
  2317. __put_user(txc.stbcnt, &utp->stbcnt))
  2318. ret = -EFAULT;
  2319. return ret;
  2320. }
  2321. #endif /* NOTYET */