sys_ia32.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716
  1. /*
  2. * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
  3. *
  4. * Copyright (C) 2000 VA Linux Co
  5. * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
  6. * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  9. * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
  10. * David Mosberger-Tang <davidm@hpl.hp.com>
  11. * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
  12. *
  13. * These routines maintain argument size conversion between 32bit and 64bit
  14. * environment.
  15. */
  16. #include <linux/config.h>
  17. #include <linux/kernel.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/sysctl.h>
  20. #include <linux/sched.h>
  21. #include <linux/fs.h>
  22. #include <linux/file.h>
  23. #include <linux/signal.h>
  24. #include <linux/resource.h>
  25. #include <linux/times.h>
  26. #include <linux/utsname.h>
  27. #include <linux/timex.h>
  28. #include <linux/smp.h>
  29. #include <linux/smp_lock.h>
  30. #include <linux/sem.h>
  31. #include <linux/msg.h>
  32. #include <linux/mm.h>
  33. #include <linux/shm.h>
  34. #include <linux/slab.h>
  35. #include <linux/uio.h>
  36. #include <linux/nfs_fs.h>
  37. #include <linux/quota.h>
  38. #include <linux/sunrpc/svc.h>
  39. #include <linux/nfsd/nfsd.h>
  40. #include <linux/nfsd/cache.h>
  41. #include <linux/nfsd/xdr.h>
  42. #include <linux/nfsd/syscall.h>
  43. #include <linux/poll.h>
  44. #include <linux/eventpoll.h>
  45. #include <linux/personality.h>
  46. #include <linux/ptrace.h>
  47. #include <linux/stat.h>
  48. #include <linux/ipc.h>
  49. #include <linux/compat.h>
  50. #include <linux/vfs.h>
  51. #include <linux/mman.h>
  52. #include <asm/intrinsics.h>
  53. #include <asm/semaphore.h>
  54. #include <asm/types.h>
  55. #include <asm/uaccess.h>
  56. #include <asm/unistd.h>
  57. #include "ia32priv.h"
  58. #include <net/scm.h>
  59. #include <net/sock.h>
  60. #define DEBUG 0
  61. #if DEBUG
  62. # define DBG(fmt...) printk(KERN_DEBUG fmt)
  63. #else
  64. # define DBG(fmt...)
  65. #endif
  66. #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
  67. #define OFFSET4K(a) ((a) & 0xfff)
  68. #define PAGE_START(addr) ((addr) & PAGE_MASK)
  69. #define MINSIGSTKSZ_IA32 2048
  70. #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
  71. #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
  72. /*
  73. * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
  74. * while doing so.
  75. */
  76. /* XXX make per-mm: */
  77. static DECLARE_MUTEX(ia32_mmap_sem);
  78. asmlinkage long
  79. sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
  80. struct pt_regs *regs)
  81. {
  82. long error;
  83. char *filename;
  84. unsigned long old_map_base, old_task_size, tssd;
  85. filename = getname(name);
  86. error = PTR_ERR(filename);
  87. if (IS_ERR(filename))
  88. return error;
  89. old_map_base = current->thread.map_base;
  90. old_task_size = current->thread.task_size;
  91. tssd = ia64_get_kr(IA64_KR_TSSD);
  92. /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
  93. current->thread.map_base = DEFAULT_MAP_BASE;
  94. current->thread.task_size = DEFAULT_TASK_SIZE;
  95. ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
  96. ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
  97. error = compat_do_execve(filename, argv, envp, regs);
  98. putname(filename);
  99. if (error < 0) {
  100. /* oops, execve failed, switch back to old values... */
  101. ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
  102. ia64_set_kr(IA64_KR_TSSD, tssd);
  103. current->thread.map_base = old_map_base;
  104. current->thread.task_size = old_task_size;
  105. }
  106. return error;
  107. }
  108. int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
  109. {
  110. int err;
  111. if ((u64) stat->size > MAX_NON_LFS ||
  112. !old_valid_dev(stat->dev) ||
  113. !old_valid_dev(stat->rdev))
  114. return -EOVERFLOW;
  115. if (clear_user(ubuf, sizeof(*ubuf)))
  116. return -EFAULT;
  117. err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
  118. err |= __put_user(stat->ino, &ubuf->st_ino);
  119. err |= __put_user(stat->mode, &ubuf->st_mode);
  120. err |= __put_user(stat->nlink, &ubuf->st_nlink);
  121. err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
  122. err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
  123. err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
  124. err |= __put_user(stat->size, &ubuf->st_size);
  125. err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
  126. err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
  127. err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
  128. err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  129. err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
  130. err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  131. err |= __put_user(stat->blksize, &ubuf->st_blksize);
  132. err |= __put_user(stat->blocks, &ubuf->st_blocks);
  133. return err;
  134. }
  135. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  136. static int
  137. get_page_prot (struct vm_area_struct *vma, unsigned long addr)
  138. {
  139. int prot = 0;
  140. if (!vma || vma->vm_start > addr)
  141. return 0;
  142. if (vma->vm_flags & VM_READ)
  143. prot |= PROT_READ;
  144. if (vma->vm_flags & VM_WRITE)
  145. prot |= PROT_WRITE;
  146. if (vma->vm_flags & VM_EXEC)
  147. prot |= PROT_EXEC;
  148. return prot;
  149. }
  150. /*
  151. * Map a subpage by creating an anonymous page that contains the union of the old page and
  152. * the subpage.
  153. */
  154. static unsigned long
  155. mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
  156. loff_t off)
  157. {
  158. void *page = NULL;
  159. struct inode *inode;
  160. unsigned long ret = 0;
  161. struct vm_area_struct *vma = find_vma(current->mm, start);
  162. int old_prot = get_page_prot(vma, start);
  163. DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
  164. file, start, end, prot, flags, off);
  165. /* Optimize the case where the old mmap and the new mmap are both anonymous */
  166. if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
  167. if (clear_user((void __user *) start, end - start)) {
  168. ret = -EFAULT;
  169. goto out;
  170. }
  171. goto skip_mmap;
  172. }
  173. page = (void *) get_zeroed_page(GFP_KERNEL);
  174. if (!page)
  175. return -ENOMEM;
  176. if (old_prot)
  177. copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
  178. down_write(&current->mm->mmap_sem);
  179. {
  180. ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
  181. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  182. }
  183. up_write(&current->mm->mmap_sem);
  184. if (IS_ERR((void *) ret))
  185. goto out;
  186. if (old_prot) {
  187. /* copy back the old page contents. */
  188. if (offset_in_page(start))
  189. copy_to_user((void __user *) PAGE_START(start), page,
  190. offset_in_page(start));
  191. if (offset_in_page(end))
  192. copy_to_user((void __user *) end, page + offset_in_page(end),
  193. PAGE_SIZE - offset_in_page(end));
  194. }
  195. if (!(flags & MAP_ANONYMOUS)) {
  196. /* read the file contents */
  197. inode = file->f_dentry->d_inode;
  198. if (!inode->i_fop || !file->f_op->read
  199. || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
  200. {
  201. ret = -EINVAL;
  202. goto out;
  203. }
  204. }
  205. skip_mmap:
  206. if (!(prot & PROT_WRITE))
  207. ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
  208. out:
  209. if (page)
  210. free_page((unsigned long) page);
  211. return ret;
  212. }
  213. /* SLAB cache for partial_page structures */
  214. kmem_cache_t *partial_page_cachep;
  215. /*
  216. * init partial_page_list.
  217. * return 0 means kmalloc fail.
  218. */
  219. struct partial_page_list*
  220. ia32_init_pp_list(void)
  221. {
  222. struct partial_page_list *p;
  223. if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
  224. return p;
  225. p->pp_head = NULL;
  226. p->ppl_rb = RB_ROOT;
  227. p->pp_hint = NULL;
  228. atomic_set(&p->pp_count, 1);
  229. return p;
  230. }
  231. /*
  232. * Search for the partial page with @start in partial page list @ppl.
  233. * If finds the partial page, return the found partial page.
  234. * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
  235. * be used by later __ia32_insert_pp().
  236. */
  237. static struct partial_page *
  238. __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
  239. struct partial_page **pprev, struct rb_node ***rb_link,
  240. struct rb_node **rb_parent)
  241. {
  242. struct partial_page *pp;
  243. struct rb_node **__rb_link, *__rb_parent, *rb_prev;
  244. pp = ppl->pp_hint;
  245. if (pp && pp->base == start)
  246. return pp;
  247. __rb_link = &ppl->ppl_rb.rb_node;
  248. rb_prev = __rb_parent = NULL;
  249. while (*__rb_link) {
  250. __rb_parent = *__rb_link;
  251. pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
  252. if (pp->base == start) {
  253. ppl->pp_hint = pp;
  254. return pp;
  255. } else if (pp->base < start) {
  256. rb_prev = __rb_parent;
  257. __rb_link = &__rb_parent->rb_right;
  258. } else {
  259. __rb_link = &__rb_parent->rb_left;
  260. }
  261. }
  262. *rb_link = __rb_link;
  263. *rb_parent = __rb_parent;
  264. *pprev = NULL;
  265. if (rb_prev)
  266. *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
  267. return NULL;
  268. }
  269. /*
  270. * insert @pp into @ppl.
  271. */
  272. static void
  273. __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
  274. struct partial_page *prev, struct rb_node **rb_link,
  275. struct rb_node *rb_parent)
  276. {
  277. /* link list */
  278. if (prev) {
  279. pp->next = prev->next;
  280. prev->next = pp;
  281. } else {
  282. ppl->pp_head = pp;
  283. if (rb_parent)
  284. pp->next = rb_entry(rb_parent,
  285. struct partial_page, pp_rb);
  286. else
  287. pp->next = NULL;
  288. }
  289. /* link rb */
  290. rb_link_node(&pp->pp_rb, rb_parent, rb_link);
  291. rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
  292. ppl->pp_hint = pp;
  293. }
  294. /*
  295. * delete @pp from partial page list @ppl.
  296. */
  297. static void
  298. __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
  299. struct partial_page *prev)
  300. {
  301. if (prev) {
  302. prev->next = pp->next;
  303. if (ppl->pp_hint == pp)
  304. ppl->pp_hint = prev;
  305. } else {
  306. ppl->pp_head = pp->next;
  307. if (ppl->pp_hint == pp)
  308. ppl->pp_hint = pp->next;
  309. }
  310. rb_erase(&pp->pp_rb, &ppl->ppl_rb);
  311. kmem_cache_free(partial_page_cachep, pp);
  312. }
  313. static struct partial_page *
  314. __pp_prev(struct partial_page *pp)
  315. {
  316. struct rb_node *prev = rb_prev(&pp->pp_rb);
  317. if (prev)
  318. return rb_entry(prev, struct partial_page, pp_rb);
  319. else
  320. return NULL;
  321. }
  322. /*
  323. * Delete partial pages with address between @start and @end.
  324. * @start and @end are page aligned.
  325. */
  326. static void
  327. __ia32_delete_pp_range(unsigned int start, unsigned int end)
  328. {
  329. struct partial_page *pp, *prev;
  330. struct rb_node **rb_link, *rb_parent;
  331. if (start >= end)
  332. return;
  333. pp = __ia32_find_pp(current->thread.ppl, start, &prev,
  334. &rb_link, &rb_parent);
  335. if (pp)
  336. prev = __pp_prev(pp);
  337. else {
  338. if (prev)
  339. pp = prev->next;
  340. else
  341. pp = current->thread.ppl->pp_head;
  342. }
  343. while (pp && pp->base < end) {
  344. struct partial_page *tmp = pp->next;
  345. __ia32_delete_pp(current->thread.ppl, pp, prev);
  346. pp = tmp;
  347. }
  348. }
  349. /*
  350. * Set the range between @start and @end in bitmap.
  351. * @start and @end should be IA32 page aligned and in the same IA64 page.
  352. */
  353. static int
  354. __ia32_set_pp(unsigned int start, unsigned int end, int flags)
  355. {
  356. struct partial_page *pp, *prev;
  357. struct rb_node ** rb_link, *rb_parent;
  358. unsigned int pstart, start_bit, end_bit, i;
  359. pstart = PAGE_START(start);
  360. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  361. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  362. if (end_bit == 0)
  363. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  364. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  365. &rb_link, &rb_parent);
  366. if (pp) {
  367. for (i = start_bit; i < end_bit; i++)
  368. set_bit(i, &pp->bitmap);
  369. /*
  370. * Check: if this partial page has been set to a full page,
  371. * then delete it.
  372. */
  373. if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
  374. PAGE_SIZE/IA32_PAGE_SIZE) {
  375. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  376. }
  377. return 0;
  378. }
  379. /*
  380. * MAP_FIXED may lead to overlapping mmap.
  381. * In this case, the requested mmap area may already mmaped as a full
  382. * page. So check vma before adding a new partial page.
  383. */
  384. if (flags & MAP_FIXED) {
  385. struct vm_area_struct *vma = find_vma(current->mm, pstart);
  386. if (vma && vma->vm_start <= pstart)
  387. return 0;
  388. }
  389. /* new a partial_page */
  390. pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  391. if (!pp)
  392. return -ENOMEM;
  393. pp->base = pstart;
  394. pp->bitmap = 0;
  395. for (i=start_bit; i<end_bit; i++)
  396. set_bit(i, &(pp->bitmap));
  397. pp->next = NULL;
  398. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  399. return 0;
  400. }
  401. /*
  402. * @start and @end should be IA32 page aligned, but don't need to be in the
  403. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  404. * page, then call __ia32_set_pp().
  405. */
  406. static void
  407. ia32_set_pp(unsigned int start, unsigned int end, int flags)
  408. {
  409. down_write(&current->mm->mmap_sem);
  410. if (flags & MAP_FIXED) {
  411. /*
  412. * MAP_FIXED may lead to overlapping mmap. When this happens,
  413. * a series of complete IA64 pages results in deletion of
  414. * old partial pages in that range.
  415. */
  416. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  417. }
  418. if (end < PAGE_ALIGN(start)) {
  419. __ia32_set_pp(start, end, flags);
  420. } else {
  421. if (offset_in_page(start))
  422. __ia32_set_pp(start, PAGE_ALIGN(start), flags);
  423. if (offset_in_page(end))
  424. __ia32_set_pp(PAGE_START(end), end, flags);
  425. }
  426. up_write(&current->mm->mmap_sem);
  427. }
  428. /*
  429. * Unset the range between @start and @end in bitmap.
  430. * @start and @end should be IA32 page aligned and in the same IA64 page.
  431. * After doing that, if the bitmap is 0, then free the page and return 1,
  432. * else return 0;
  433. * If not find the partial page in the list, then
  434. * If the vma exists, then the full page is set to a partial page;
  435. * Else return -ENOMEM.
  436. */
  437. static int
  438. __ia32_unset_pp(unsigned int start, unsigned int end)
  439. {
  440. struct partial_page *pp, *prev;
  441. struct rb_node ** rb_link, *rb_parent;
  442. unsigned int pstart, start_bit, end_bit, i;
  443. struct vm_area_struct *vma;
  444. pstart = PAGE_START(start);
  445. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  446. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  447. if (end_bit == 0)
  448. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  449. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  450. &rb_link, &rb_parent);
  451. if (pp) {
  452. for (i = start_bit; i < end_bit; i++)
  453. clear_bit(i, &pp->bitmap);
  454. if (pp->bitmap == 0) {
  455. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  456. return 1;
  457. }
  458. return 0;
  459. }
  460. vma = find_vma(current->mm, pstart);
  461. if (!vma || vma->vm_start > pstart) {
  462. return -ENOMEM;
  463. }
  464. /* new a partial_page */
  465. pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  466. if (!pp)
  467. return -ENOMEM;
  468. pp->base = pstart;
  469. pp->bitmap = 0;
  470. for (i = 0; i < start_bit; i++)
  471. set_bit(i, &(pp->bitmap));
  472. for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
  473. set_bit(i, &(pp->bitmap));
  474. pp->next = NULL;
  475. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  476. return 0;
  477. }
  478. /*
  479. * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
  480. * __ia32_delete_pp_range(). Unset possible partial pages by calling
  481. * __ia32_unset_pp().
  482. * The returned value see __ia32_unset_pp().
  483. */
  484. static int
  485. ia32_unset_pp(unsigned int *startp, unsigned int *endp)
  486. {
  487. unsigned int start = *startp, end = *endp;
  488. int ret = 0;
  489. down_write(&current->mm->mmap_sem);
  490. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  491. if (end < PAGE_ALIGN(start)) {
  492. ret = __ia32_unset_pp(start, end);
  493. if (ret == 1) {
  494. *startp = PAGE_START(start);
  495. *endp = PAGE_ALIGN(end);
  496. }
  497. if (ret == 0) {
  498. /* to shortcut sys_munmap() in sys32_munmap() */
  499. *startp = PAGE_START(start);
  500. *endp = PAGE_START(end);
  501. }
  502. } else {
  503. if (offset_in_page(start)) {
  504. ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
  505. if (ret == 1)
  506. *startp = PAGE_START(start);
  507. if (ret == 0)
  508. *startp = PAGE_ALIGN(start);
  509. if (ret < 0)
  510. goto out;
  511. }
  512. if (offset_in_page(end)) {
  513. ret = __ia32_unset_pp(PAGE_START(end), end);
  514. if (ret == 1)
  515. *endp = PAGE_ALIGN(end);
  516. if (ret == 0)
  517. *endp = PAGE_START(end);
  518. }
  519. }
  520. out:
  521. up_write(&current->mm->mmap_sem);
  522. return ret;
  523. }
  524. /*
  525. * Compare the range between @start and @end with bitmap in partial page.
  526. * @start and @end should be IA32 page aligned and in the same IA64 page.
  527. */
  528. static int
  529. __ia32_compare_pp(unsigned int start, unsigned int end)
  530. {
  531. struct partial_page *pp, *prev;
  532. struct rb_node ** rb_link, *rb_parent;
  533. unsigned int pstart, start_bit, end_bit, size;
  534. unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
  535. pstart = PAGE_START(start);
  536. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  537. &rb_link, &rb_parent);
  538. if (!pp)
  539. return 1;
  540. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  541. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  542. size = sizeof(pp->bitmap) * 8;
  543. first_bit = find_first_bit(&pp->bitmap, size);
  544. next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
  545. if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
  546. /* exceeds the first range in bitmap */
  547. return -ENOMEM;
  548. } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
  549. first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
  550. if ((next_zero_bit < first_bit) && (first_bit < size))
  551. return 1; /* has next range */
  552. else
  553. return 0; /* no next range */
  554. } else
  555. return 1;
  556. }
  557. /*
  558. * @start and @end should be IA32 page aligned, but don't need to be in the
  559. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  560. * page, then call __ia32_compare_pp().
  561. *
  562. * Take this as example: the range is the 1st and 2nd 4K page.
  563. * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
  564. * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
  565. * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
  566. * bitmap = 00000101.
  567. */
  568. static int
  569. ia32_compare_pp(unsigned int *startp, unsigned int *endp)
  570. {
  571. unsigned int start = *startp, end = *endp;
  572. int retval = 0;
  573. down_write(&current->mm->mmap_sem);
  574. if (end < PAGE_ALIGN(start)) {
  575. retval = __ia32_compare_pp(start, end);
  576. if (retval == 0) {
  577. *startp = PAGE_START(start);
  578. *endp = PAGE_ALIGN(end);
  579. }
  580. } else {
  581. if (offset_in_page(start)) {
  582. retval = __ia32_compare_pp(start,
  583. PAGE_ALIGN(start));
  584. if (retval == 0)
  585. *startp = PAGE_START(start);
  586. if (retval < 0)
  587. goto out;
  588. }
  589. if (offset_in_page(end)) {
  590. retval = __ia32_compare_pp(PAGE_START(end), end);
  591. if (retval == 0)
  592. *endp = PAGE_ALIGN(end);
  593. }
  594. }
  595. out:
  596. up_write(&current->mm->mmap_sem);
  597. return retval;
  598. }
  599. static void
  600. __ia32_drop_pp_list(struct partial_page_list *ppl)
  601. {
  602. struct partial_page *pp = ppl->pp_head;
  603. while (pp) {
  604. struct partial_page *next = pp->next;
  605. kmem_cache_free(partial_page_cachep, pp);
  606. pp = next;
  607. }
  608. kfree(ppl);
  609. }
  610. void
  611. ia32_drop_partial_page_list(struct task_struct *task)
  612. {
  613. struct partial_page_list* ppl = task->thread.ppl;
  614. if (ppl && atomic_dec_and_test(&ppl->pp_count))
  615. __ia32_drop_pp_list(ppl);
  616. }
  617. /*
  618. * Copy current->thread.ppl to ppl (already initialized).
  619. */
  620. static int
  621. __ia32_copy_pp_list(struct partial_page_list *ppl)
  622. {
  623. struct partial_page *pp, *tmp, *prev;
  624. struct rb_node **rb_link, *rb_parent;
  625. ppl->pp_head = NULL;
  626. ppl->pp_hint = NULL;
  627. ppl->ppl_rb = RB_ROOT;
  628. rb_link = &ppl->ppl_rb.rb_node;
  629. rb_parent = NULL;
  630. prev = NULL;
  631. for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
  632. tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  633. if (!tmp)
  634. return -ENOMEM;
  635. *tmp = *pp;
  636. __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
  637. prev = tmp;
  638. rb_link = &tmp->pp_rb.rb_right;
  639. rb_parent = &tmp->pp_rb;
  640. }
  641. return 0;
  642. }
  643. int
  644. ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
  645. {
  646. int retval = 0;
  647. if (clone_flags & CLONE_VM) {
  648. atomic_inc(&current->thread.ppl->pp_count);
  649. p->thread.ppl = current->thread.ppl;
  650. } else {
  651. p->thread.ppl = ia32_init_pp_list();
  652. if (!p->thread.ppl)
  653. return -ENOMEM;
  654. down_write(&current->mm->mmap_sem);
  655. {
  656. retval = __ia32_copy_pp_list(p->thread.ppl);
  657. }
  658. up_write(&current->mm->mmap_sem);
  659. }
  660. return retval;
  661. }
  662. static unsigned long
  663. emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
  664. loff_t off)
  665. {
  666. unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
  667. struct inode *inode;
  668. loff_t poff;
  669. end = start + len;
  670. pstart = PAGE_START(start);
  671. pend = PAGE_ALIGN(end);
  672. if (flags & MAP_FIXED) {
  673. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  674. if (start > pstart) {
  675. if (flags & MAP_SHARED)
  676. printk(KERN_INFO
  677. "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
  678. current->comm, current->pid, start);
  679. ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
  680. off);
  681. if (IS_ERR((void *) ret))
  682. return ret;
  683. pstart += PAGE_SIZE;
  684. if (pstart >= pend)
  685. goto out; /* done */
  686. }
  687. if (end < pend) {
  688. if (flags & MAP_SHARED)
  689. printk(KERN_INFO
  690. "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
  691. current->comm, current->pid, end);
  692. ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
  693. (off + len) - offset_in_page(end));
  694. if (IS_ERR((void *) ret))
  695. return ret;
  696. pend -= PAGE_SIZE;
  697. if (pstart >= pend)
  698. goto out; /* done */
  699. }
  700. } else {
  701. /*
  702. * If a start address was specified, use it if the entire rounded out area
  703. * is available.
  704. */
  705. if (start && !pstart)
  706. fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
  707. tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
  708. if (tmp != pstart) {
  709. pstart = tmp;
  710. start = pstart + offset_in_page(off); /* make start congruent with off */
  711. end = start + len;
  712. pend = PAGE_ALIGN(end);
  713. }
  714. }
  715. poff = off + (pstart - start); /* note: (pstart - start) may be negative */
  716. is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
  717. if ((flags & MAP_SHARED) && !is_congruent)
  718. printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
  719. "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
  720. DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
  721. is_congruent ? "congruent" : "not congruent", poff);
  722. down_write(&current->mm->mmap_sem);
  723. {
  724. if (!(flags & MAP_ANONYMOUS) && is_congruent)
  725. ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
  726. else
  727. ret = do_mmap(NULL, pstart, pend - pstart,
  728. prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
  729. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  730. }
  731. up_write(&current->mm->mmap_sem);
  732. if (IS_ERR((void *) ret))
  733. return ret;
  734. if (!is_congruent) {
  735. /* read the file contents */
  736. inode = file->f_dentry->d_inode;
  737. if (!inode->i_fop || !file->f_op->read
  738. || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
  739. < 0))
  740. {
  741. sys_munmap(pstart, pend - pstart);
  742. return -EINVAL;
  743. }
  744. if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
  745. return -EINVAL;
  746. }
  747. if (!(flags & MAP_FIXED))
  748. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  749. out:
  750. return start;
  751. }
  752. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  753. static inline unsigned int
  754. get_prot32 (unsigned int prot)
  755. {
  756. if (prot & PROT_WRITE)
  757. /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
  758. prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
  759. else if (prot & (PROT_READ | PROT_EXEC))
  760. /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
  761. prot |= (PROT_READ | PROT_EXEC);
  762. return prot;
  763. }
  764. unsigned long
  765. ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
  766. loff_t offset)
  767. {
  768. DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
  769. file, addr, len, prot, flags, offset);
  770. if (file && (!file->f_op || !file->f_op->mmap))
  771. return -ENODEV;
  772. len = IA32_PAGE_ALIGN(len);
  773. if (len == 0)
  774. return addr;
  775. if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
  776. {
  777. if (flags & MAP_FIXED)
  778. return -ENOMEM;
  779. else
  780. return -EINVAL;
  781. }
  782. if (OFFSET4K(offset))
  783. return -EINVAL;
  784. prot = get_prot32(prot);
  785. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  786. down(&ia32_mmap_sem);
  787. {
  788. addr = emulate_mmap(file, addr, len, prot, flags, offset);
  789. }
  790. up(&ia32_mmap_sem);
  791. #else
  792. down_write(&current->mm->mmap_sem);
  793. {
  794. addr = do_mmap(file, addr, len, prot, flags, offset);
  795. }
  796. up_write(&current->mm->mmap_sem);
  797. #endif
  798. DBG("ia32_do_mmap: returning 0x%lx\n", addr);
  799. return addr;
  800. }
  801. /*
  802. * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
  803. * system calls used a memory block for parameter passing..
  804. */
  805. struct mmap_arg_struct {
  806. unsigned int addr;
  807. unsigned int len;
  808. unsigned int prot;
  809. unsigned int flags;
  810. unsigned int fd;
  811. unsigned int offset;
  812. };
  813. asmlinkage long
  814. sys32_mmap (struct mmap_arg_struct __user *arg)
  815. {
  816. struct mmap_arg_struct a;
  817. struct file *file = NULL;
  818. unsigned long addr;
  819. int flags;
  820. if (copy_from_user(&a, arg, sizeof(a)))
  821. return -EFAULT;
  822. if (OFFSET4K(a.offset))
  823. return -EINVAL;
  824. flags = a.flags;
  825. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  826. if (!(flags & MAP_ANONYMOUS)) {
  827. file = fget(a.fd);
  828. if (!file)
  829. return -EBADF;
  830. }
  831. addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
  832. if (file)
  833. fput(file);
  834. return addr;
  835. }
  836. asmlinkage long
  837. sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
  838. unsigned int fd, unsigned int pgoff)
  839. {
  840. struct file *file = NULL;
  841. unsigned long retval;
  842. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  843. if (!(flags & MAP_ANONYMOUS)) {
  844. file = fget(fd);
  845. if (!file)
  846. return -EBADF;
  847. }
  848. retval = ia32_do_mmap(file, addr, len, prot, flags,
  849. (unsigned long) pgoff << IA32_PAGE_SHIFT);
  850. if (file)
  851. fput(file);
  852. return retval;
  853. }
  854. asmlinkage long
  855. sys32_munmap (unsigned int start, unsigned int len)
  856. {
  857. unsigned int end = start + len;
  858. long ret;
  859. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  860. ret = sys_munmap(start, end - start);
  861. #else
  862. if (OFFSET4K(start))
  863. return -EINVAL;
  864. end = IA32_PAGE_ALIGN(end);
  865. if (start >= end)
  866. return -EINVAL;
  867. ret = ia32_unset_pp(&start, &end);
  868. if (ret < 0)
  869. return ret;
  870. if (start >= end)
  871. return 0;
  872. down(&ia32_mmap_sem);
  873. {
  874. ret = sys_munmap(start, end - start);
  875. }
  876. up(&ia32_mmap_sem);
  877. #endif
  878. return ret;
  879. }
  880. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  881. /*
  882. * When mprotect()ing a partial page, we set the permission to the union of the old
  883. * settings and the new settings. In other words, it's only possible to make access to a
  884. * partial page less restrictive.
  885. */
  886. static long
  887. mprotect_subpage (unsigned long address, int new_prot)
  888. {
  889. int old_prot;
  890. struct vm_area_struct *vma;
  891. if (new_prot == PROT_NONE)
  892. return 0; /* optimize case where nothing changes... */
  893. vma = find_vma(current->mm, address);
  894. old_prot = get_page_prot(vma, address);
  895. return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
  896. }
  897. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  898. asmlinkage long
  899. sys32_mprotect (unsigned int start, unsigned int len, int prot)
  900. {
  901. unsigned int end = start + len;
  902. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  903. long retval = 0;
  904. #endif
  905. prot = get_prot32(prot);
  906. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  907. return sys_mprotect(start, end - start, prot);
  908. #else
  909. if (OFFSET4K(start))
  910. return -EINVAL;
  911. end = IA32_PAGE_ALIGN(end);
  912. if (end < start)
  913. return -EINVAL;
  914. retval = ia32_compare_pp(&start, &end);
  915. if (retval < 0)
  916. return retval;
  917. down(&ia32_mmap_sem);
  918. {
  919. if (offset_in_page(start)) {
  920. /* start address is 4KB aligned but not page aligned. */
  921. retval = mprotect_subpage(PAGE_START(start), prot);
  922. if (retval < 0)
  923. goto out;
  924. start = PAGE_ALIGN(start);
  925. if (start >= end)
  926. goto out; /* retval is already zero... */
  927. }
  928. if (offset_in_page(end)) {
  929. /* end address is 4KB aligned but not page aligned. */
  930. retval = mprotect_subpage(PAGE_START(end), prot);
  931. if (retval < 0)
  932. goto out;
  933. end = PAGE_START(end);
  934. }
  935. retval = sys_mprotect(start, end - start, prot);
  936. }
  937. out:
  938. up(&ia32_mmap_sem);
  939. return retval;
  940. #endif
  941. }
  942. asmlinkage long
  943. sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
  944. unsigned int flags, unsigned int new_addr)
  945. {
  946. long ret;
  947. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  948. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  949. #else
  950. unsigned int old_end, new_end;
  951. if (OFFSET4K(addr))
  952. return -EINVAL;
  953. old_len = IA32_PAGE_ALIGN(old_len);
  954. new_len = IA32_PAGE_ALIGN(new_len);
  955. old_end = addr + old_len;
  956. new_end = addr + new_len;
  957. if (!new_len)
  958. return -EINVAL;
  959. if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
  960. return -EINVAL;
  961. if (old_len >= new_len) {
  962. ret = sys32_munmap(addr + new_len, old_len - new_len);
  963. if (ret && old_len != new_len)
  964. return ret;
  965. ret = addr;
  966. if (!(flags & MREMAP_FIXED) || (new_addr == addr))
  967. return ret;
  968. old_len = new_len;
  969. }
  970. addr = PAGE_START(addr);
  971. old_len = PAGE_ALIGN(old_end) - addr;
  972. new_len = PAGE_ALIGN(new_end) - addr;
  973. down(&ia32_mmap_sem);
  974. {
  975. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  976. }
  977. up(&ia32_mmap_sem);
  978. if ((ret >= 0) && (old_len < new_len)) {
  979. /* mremap expanded successfully */
  980. ia32_set_pp(old_end, new_end, flags);
  981. }
  982. #endif
  983. return ret;
  984. }
  985. asmlinkage long
  986. sys32_pipe (int __user *fd)
  987. {
  988. int retval;
  989. int fds[2];
  990. retval = do_pipe(fds);
  991. if (retval)
  992. goto out;
  993. if (copy_to_user(fd, fds, sizeof(fds)))
  994. retval = -EFAULT;
  995. out:
  996. return retval;
  997. }
  998. static inline long
  999. get_tv32 (struct timeval *o, struct compat_timeval __user *i)
  1000. {
  1001. return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
  1002. (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
  1003. }
  1004. static inline long
  1005. put_tv32 (struct compat_timeval __user *o, struct timeval *i)
  1006. {
  1007. return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
  1008. (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
  1009. }
  1010. asmlinkage unsigned long
  1011. sys32_alarm (unsigned int seconds)
  1012. {
  1013. struct itimerval it_new, it_old;
  1014. unsigned int oldalarm;
  1015. it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
  1016. it_new.it_value.tv_sec = seconds;
  1017. it_new.it_value.tv_usec = 0;
  1018. do_setitimer(ITIMER_REAL, &it_new, &it_old);
  1019. oldalarm = it_old.it_value.tv_sec;
  1020. /* ehhh.. We can't return 0 if we have an alarm pending.. */
  1021. /* And we'd better return too much than too little anyway */
  1022. if (it_old.it_value.tv_usec)
  1023. oldalarm++;
  1024. return oldalarm;
  1025. }
  1026. /* Translations due to time_t size differences. Which affects all
  1027. sorts of things, like timeval and itimerval. */
  1028. extern struct timezone sys_tz;
  1029. asmlinkage long
  1030. sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1031. {
  1032. if (tv) {
  1033. struct timeval ktv;
  1034. do_gettimeofday(&ktv);
  1035. if (put_tv32(tv, &ktv))
  1036. return -EFAULT;
  1037. }
  1038. if (tz) {
  1039. if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
  1040. return -EFAULT;
  1041. }
  1042. return 0;
  1043. }
  1044. asmlinkage long
  1045. sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1046. {
  1047. struct timeval ktv;
  1048. struct timespec kts;
  1049. struct timezone ktz;
  1050. if (tv) {
  1051. if (get_tv32(&ktv, tv))
  1052. return -EFAULT;
  1053. kts.tv_sec = ktv.tv_sec;
  1054. kts.tv_nsec = ktv.tv_usec * 1000;
  1055. }
  1056. if (tz) {
  1057. if (copy_from_user(&ktz, tz, sizeof(ktz)))
  1058. return -EFAULT;
  1059. }
  1060. return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
  1061. }
  1062. struct getdents32_callback {
  1063. struct compat_dirent __user *current_dir;
  1064. struct compat_dirent __user *previous;
  1065. int count;
  1066. int error;
  1067. };
  1068. struct readdir32_callback {
  1069. struct old_linux32_dirent __user * dirent;
  1070. int count;
  1071. };
  1072. static int
  1073. filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
  1074. unsigned int d_type)
  1075. {
  1076. struct compat_dirent __user * dirent;
  1077. struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
  1078. int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4);
  1079. buf->error = -EINVAL; /* only used if we fail.. */
  1080. if (reclen > buf->count)
  1081. return -EINVAL;
  1082. buf->error = -EFAULT; /* only used if we fail.. */
  1083. dirent = buf->previous;
  1084. if (dirent)
  1085. if (put_user(offset, &dirent->d_off))
  1086. return -EFAULT;
  1087. dirent = buf->current_dir;
  1088. buf->previous = dirent;
  1089. if (put_user(ino, &dirent->d_ino)
  1090. || put_user(reclen, &dirent->d_reclen)
  1091. || copy_to_user(dirent->d_name, name, namlen)
  1092. || put_user(0, dirent->d_name + namlen))
  1093. return -EFAULT;
  1094. dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen);
  1095. buf->current_dir = dirent;
  1096. buf->count -= reclen;
  1097. return 0;
  1098. }
  1099. asmlinkage long
  1100. sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count)
  1101. {
  1102. struct file * file;
  1103. struct compat_dirent __user * lastdirent;
  1104. struct getdents32_callback buf;
  1105. int error;
  1106. error = -EBADF;
  1107. file = fget(fd);
  1108. if (!file)
  1109. goto out;
  1110. buf.current_dir = dirent;
  1111. buf.previous = NULL;
  1112. buf.count = count;
  1113. buf.error = 0;
  1114. error = vfs_readdir(file, filldir32, &buf);
  1115. if (error < 0)
  1116. goto out_putf;
  1117. error = buf.error;
  1118. lastdirent = buf.previous;
  1119. if (lastdirent) {
  1120. error = -EINVAL;
  1121. if (put_user(file->f_pos, &lastdirent->d_off))
  1122. goto out_putf;
  1123. error = count - buf.count;
  1124. }
  1125. out_putf:
  1126. fput(file);
  1127. out:
  1128. return error;
  1129. }
  1130. static int
  1131. fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
  1132. unsigned int d_type)
  1133. {
  1134. struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
  1135. struct old_linux32_dirent __user * dirent;
  1136. if (buf->count)
  1137. return -EINVAL;
  1138. buf->count++;
  1139. dirent = buf->dirent;
  1140. if (put_user(ino, &dirent->d_ino)
  1141. || put_user(offset, &dirent->d_offset)
  1142. || put_user(namlen, &dirent->d_namlen)
  1143. || copy_to_user(dirent->d_name, name, namlen)
  1144. || put_user(0, dirent->d_name + namlen))
  1145. return -EFAULT;
  1146. return 0;
  1147. }
  1148. asmlinkage long
  1149. sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count)
  1150. {
  1151. int error;
  1152. struct file * file;
  1153. struct readdir32_callback buf;
  1154. error = -EBADF;
  1155. file = fget(fd);
  1156. if (!file)
  1157. goto out;
  1158. buf.count = 0;
  1159. buf.dirent = dirent;
  1160. error = vfs_readdir(file, fillonedir32, &buf);
  1161. if (error >= 0)
  1162. error = buf.count;
  1163. fput(file);
  1164. out:
  1165. return error;
  1166. }
  1167. struct sel_arg_struct {
  1168. unsigned int n;
  1169. unsigned int inp;
  1170. unsigned int outp;
  1171. unsigned int exp;
  1172. unsigned int tvp;
  1173. };
  1174. asmlinkage long
  1175. sys32_old_select (struct sel_arg_struct __user *arg)
  1176. {
  1177. struct sel_arg_struct a;
  1178. if (copy_from_user(&a, arg, sizeof(a)))
  1179. return -EFAULT;
  1180. return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
  1181. compat_ptr(a.exp), compat_ptr(a.tvp));
  1182. }
  1183. #define SEMOP 1
  1184. #define SEMGET 2
  1185. #define SEMCTL 3
  1186. #define SEMTIMEDOP 4
  1187. #define MSGSND 11
  1188. #define MSGRCV 12
  1189. #define MSGGET 13
  1190. #define MSGCTL 14
  1191. #define SHMAT 21
  1192. #define SHMDT 22
  1193. #define SHMGET 23
  1194. #define SHMCTL 24
  1195. asmlinkage long
  1196. sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
  1197. {
  1198. int version;
  1199. version = call >> 16; /* hack for backward compatibility */
  1200. call &= 0xffff;
  1201. switch (call) {
  1202. case SEMTIMEDOP:
  1203. if (fifth)
  1204. return compat_sys_semtimedop(first, compat_ptr(ptr),
  1205. second, compat_ptr(fifth));
  1206. /* else fall through for normal semop() */
  1207. case SEMOP:
  1208. /* struct sembuf is the same on 32 and 64bit :)) */
  1209. return sys_semtimedop(first, compat_ptr(ptr), second,
  1210. NULL);
  1211. case SEMGET:
  1212. return sys_semget(first, second, third);
  1213. case SEMCTL:
  1214. return compat_sys_semctl(first, second, third, compat_ptr(ptr));
  1215. case MSGSND:
  1216. return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
  1217. case MSGRCV:
  1218. return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
  1219. case MSGGET:
  1220. return sys_msgget((key_t) first, second);
  1221. case MSGCTL:
  1222. return compat_sys_msgctl(first, second, compat_ptr(ptr));
  1223. case SHMAT:
  1224. return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
  1225. break;
  1226. case SHMDT:
  1227. return sys_shmdt(compat_ptr(ptr));
  1228. case SHMGET:
  1229. return sys_shmget(first, (unsigned)second, third);
  1230. case SHMCTL:
  1231. return compat_sys_shmctl(first, second, compat_ptr(ptr));
  1232. default:
  1233. return -ENOSYS;
  1234. }
  1235. return -EINVAL;
  1236. }
  1237. asmlinkage long
  1238. compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
  1239. struct compat_rusage *ru);
  1240. asmlinkage long
  1241. sys32_waitpid (int pid, unsigned int *stat_addr, int options)
  1242. {
  1243. return compat_sys_wait4(pid, stat_addr, options, NULL);
  1244. }
  1245. static unsigned int
  1246. ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
  1247. {
  1248. size_t copied;
  1249. unsigned int ret;
  1250. copied = access_process_vm(child, addr, val, sizeof(*val), 0);
  1251. return (copied != sizeof(ret)) ? -EIO : 0;
  1252. }
  1253. static unsigned int
  1254. ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
  1255. {
  1256. if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
  1257. return -EIO;
  1258. return 0;
  1259. }
  1260. /*
  1261. * The order in which registers are stored in the ptrace regs structure
  1262. */
  1263. #define PT_EBX 0
  1264. #define PT_ECX 1
  1265. #define PT_EDX 2
  1266. #define PT_ESI 3
  1267. #define PT_EDI 4
  1268. #define PT_EBP 5
  1269. #define PT_EAX 6
  1270. #define PT_DS 7
  1271. #define PT_ES 8
  1272. #define PT_FS 9
  1273. #define PT_GS 10
  1274. #define PT_ORIG_EAX 11
  1275. #define PT_EIP 12
  1276. #define PT_CS 13
  1277. #define PT_EFL 14
  1278. #define PT_UESP 15
  1279. #define PT_SS 16
  1280. static unsigned int
  1281. getreg (struct task_struct *child, int regno)
  1282. {
  1283. struct pt_regs *child_regs;
  1284. child_regs = ia64_task_regs(child);
  1285. switch (regno / sizeof(int)) {
  1286. case PT_EBX: return child_regs->r11;
  1287. case PT_ECX: return child_regs->r9;
  1288. case PT_EDX: return child_regs->r10;
  1289. case PT_ESI: return child_regs->r14;
  1290. case PT_EDI: return child_regs->r15;
  1291. case PT_EBP: return child_regs->r13;
  1292. case PT_EAX: return child_regs->r8;
  1293. case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
  1294. case PT_EIP: return child_regs->cr_iip;
  1295. case PT_UESP: return child_regs->r12;
  1296. case PT_EFL: return child->thread.eflag;
  1297. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1298. return __USER_DS;
  1299. case PT_CS: return __USER_CS;
  1300. default:
  1301. printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
  1302. break;
  1303. }
  1304. return 0;
  1305. }
  1306. static void
  1307. putreg (struct task_struct *child, int regno, unsigned int value)
  1308. {
  1309. struct pt_regs *child_regs;
  1310. child_regs = ia64_task_regs(child);
  1311. switch (regno / sizeof(int)) {
  1312. case PT_EBX: child_regs->r11 = value; break;
  1313. case PT_ECX: child_regs->r9 = value; break;
  1314. case PT_EDX: child_regs->r10 = value; break;
  1315. case PT_ESI: child_regs->r14 = value; break;
  1316. case PT_EDI: child_regs->r15 = value; break;
  1317. case PT_EBP: child_regs->r13 = value; break;
  1318. case PT_EAX: child_regs->r8 = value; break;
  1319. case PT_ORIG_EAX: child_regs->r1 = value; break;
  1320. case PT_EIP: child_regs->cr_iip = value; break;
  1321. case PT_UESP: child_regs->r12 = value; break;
  1322. case PT_EFL: child->thread.eflag = value; break;
  1323. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1324. if (value != __USER_DS)
  1325. printk(KERN_ERR
  1326. "ia32.putreg: attempt to set invalid segment register %d = %x\n",
  1327. regno, value);
  1328. break;
  1329. case PT_CS:
  1330. if (value != __USER_CS)
  1331. printk(KERN_ERR
  1332. "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
  1333. regno, value);
  1334. break;
  1335. default:
  1336. printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
  1337. break;
  1338. }
  1339. }
  1340. static void
  1341. put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1342. struct switch_stack *swp, int tos)
  1343. {
  1344. struct _fpreg_ia32 *f;
  1345. char buf[32];
  1346. f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
  1347. if ((regno += tos) >= 8)
  1348. regno -= 8;
  1349. switch (regno) {
  1350. case 0:
  1351. ia64f2ia32f(f, &ptp->f8);
  1352. break;
  1353. case 1:
  1354. ia64f2ia32f(f, &ptp->f9);
  1355. break;
  1356. case 2:
  1357. ia64f2ia32f(f, &ptp->f10);
  1358. break;
  1359. case 3:
  1360. ia64f2ia32f(f, &ptp->f11);
  1361. break;
  1362. case 4:
  1363. case 5:
  1364. case 6:
  1365. case 7:
  1366. ia64f2ia32f(f, &swp->f12 + (regno - 4));
  1367. break;
  1368. }
  1369. copy_to_user(reg, f, sizeof(*reg));
  1370. }
  1371. static void
  1372. get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1373. struct switch_stack *swp, int tos)
  1374. {
  1375. if ((regno += tos) >= 8)
  1376. regno -= 8;
  1377. switch (regno) {
  1378. case 0:
  1379. copy_from_user(&ptp->f8, reg, sizeof(*reg));
  1380. break;
  1381. case 1:
  1382. copy_from_user(&ptp->f9, reg, sizeof(*reg));
  1383. break;
  1384. case 2:
  1385. copy_from_user(&ptp->f10, reg, sizeof(*reg));
  1386. break;
  1387. case 3:
  1388. copy_from_user(&ptp->f11, reg, sizeof(*reg));
  1389. break;
  1390. case 4:
  1391. case 5:
  1392. case 6:
  1393. case 7:
  1394. copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
  1395. break;
  1396. }
  1397. return;
  1398. }
  1399. int
  1400. save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1401. {
  1402. struct switch_stack *swp;
  1403. struct pt_regs *ptp;
  1404. int i, tos;
  1405. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1406. return -EFAULT;
  1407. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1408. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1409. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1410. __put_user(tsk->thread.fir, &save->fip);
  1411. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1412. __put_user(tsk->thread.fdr, &save->foo);
  1413. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1414. /*
  1415. * Stack frames start with 16-bytes of temp space
  1416. */
  1417. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1418. ptp = ia64_task_regs(tsk);
  1419. tos = (tsk->thread.fsr >> 11) & 7;
  1420. for (i = 0; i < 8; i++)
  1421. put_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1422. return 0;
  1423. }
  1424. static int
  1425. restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1426. {
  1427. struct switch_stack *swp;
  1428. struct pt_regs *ptp;
  1429. int i, tos;
  1430. unsigned int fsrlo, fsrhi, num32;
  1431. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1432. return(-EFAULT);
  1433. __get_user(num32, (unsigned int __user *)&save->cwd);
  1434. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1435. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1436. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1437. num32 = (fsrhi << 16) | fsrlo;
  1438. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1439. __get_user(num32, (unsigned int __user *)&save->fip);
  1440. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1441. __get_user(num32, (unsigned int __user *)&save->foo);
  1442. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1443. /*
  1444. * Stack frames start with 16-bytes of temp space
  1445. */
  1446. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1447. ptp = ia64_task_regs(tsk);
  1448. tos = (tsk->thread.fsr >> 11) & 7;
  1449. for (i = 0; i < 8; i++)
  1450. get_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1451. return 0;
  1452. }
  1453. int
  1454. save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1455. {
  1456. struct switch_stack *swp;
  1457. struct pt_regs *ptp;
  1458. int i, tos;
  1459. unsigned long mxcsr=0;
  1460. unsigned long num128[2];
  1461. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1462. return -EFAULT;
  1463. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1464. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1465. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1466. __put_user(tsk->thread.fir, &save->fip);
  1467. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1468. __put_user(tsk->thread.fdr, &save->foo);
  1469. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1470. /*
  1471. * Stack frames start with 16-bytes of temp space
  1472. */
  1473. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1474. ptp = ia64_task_regs(tsk);
  1475. tos = (tsk->thread.fsr >> 11) & 7;
  1476. for (i = 0; i < 8; i++)
  1477. put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1478. mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
  1479. __put_user(mxcsr & 0xffff, &save->mxcsr);
  1480. for (i = 0; i < 8; i++) {
  1481. memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
  1482. memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
  1483. copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
  1484. }
  1485. return 0;
  1486. }
  1487. static int
  1488. restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1489. {
  1490. struct switch_stack *swp;
  1491. struct pt_regs *ptp;
  1492. int i, tos;
  1493. unsigned int fsrlo, fsrhi, num32;
  1494. int mxcsr;
  1495. unsigned long num64;
  1496. unsigned long num128[2];
  1497. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1498. return(-EFAULT);
  1499. __get_user(num32, (unsigned int __user *)&save->cwd);
  1500. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1501. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1502. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1503. num32 = (fsrhi << 16) | fsrlo;
  1504. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1505. __get_user(num32, (unsigned int __user *)&save->fip);
  1506. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1507. __get_user(num32, (unsigned int __user *)&save->foo);
  1508. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1509. /*
  1510. * Stack frames start with 16-bytes of temp space
  1511. */
  1512. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1513. ptp = ia64_task_regs(tsk);
  1514. tos = (tsk->thread.fsr >> 11) & 7;
  1515. for (i = 0; i < 8; i++)
  1516. get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1517. __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
  1518. num64 = mxcsr & 0xff10;
  1519. tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
  1520. num64 = mxcsr & 0x3f;
  1521. tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
  1522. for (i = 0; i < 8; i++) {
  1523. copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
  1524. memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
  1525. memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
  1526. }
  1527. return 0;
  1528. }
  1529. asmlinkage long
  1530. sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
  1531. {
  1532. struct task_struct *child;
  1533. unsigned int value, tmp;
  1534. long i, ret;
  1535. lock_kernel();
  1536. if (request == PTRACE_TRACEME) {
  1537. ret = sys_ptrace(request, pid, addr, data);
  1538. goto out;
  1539. }
  1540. ret = -ESRCH;
  1541. read_lock(&tasklist_lock);
  1542. child = find_task_by_pid(pid);
  1543. if (child)
  1544. get_task_struct(child);
  1545. read_unlock(&tasklist_lock);
  1546. if (!child)
  1547. goto out;
  1548. ret = -EPERM;
  1549. if (pid == 1) /* no messing around with init! */
  1550. goto out_tsk;
  1551. if (request == PTRACE_ATTACH) {
  1552. ret = sys_ptrace(request, pid, addr, data);
  1553. goto out_tsk;
  1554. }
  1555. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  1556. if (ret < 0)
  1557. goto out_tsk;
  1558. switch (request) {
  1559. case PTRACE_PEEKTEXT:
  1560. case PTRACE_PEEKDATA: /* read word at location addr */
  1561. ret = ia32_peek(child, addr, &value);
  1562. if (ret == 0)
  1563. ret = put_user(value, (unsigned int __user *) compat_ptr(data));
  1564. else
  1565. ret = -EIO;
  1566. goto out_tsk;
  1567. case PTRACE_POKETEXT:
  1568. case PTRACE_POKEDATA: /* write the word at location addr */
  1569. ret = ia32_poke(child, addr, data);
  1570. goto out_tsk;
  1571. case PTRACE_PEEKUSR: /* read word at addr in USER area */
  1572. ret = -EIO;
  1573. if ((addr & 3) || addr > 17*sizeof(int))
  1574. break;
  1575. tmp = getreg(child, addr);
  1576. if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
  1577. ret = 0;
  1578. break;
  1579. case PTRACE_POKEUSR: /* write word at addr in USER area */
  1580. ret = -EIO;
  1581. if ((addr & 3) || addr > 17*sizeof(int))
  1582. break;
  1583. putreg(child, addr, data);
  1584. ret = 0;
  1585. break;
  1586. case IA32_PTRACE_GETREGS:
  1587. if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
  1588. ret = -EIO;
  1589. break;
  1590. }
  1591. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1592. put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
  1593. data += sizeof(int);
  1594. }
  1595. ret = 0;
  1596. break;
  1597. case IA32_PTRACE_SETREGS:
  1598. if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
  1599. ret = -EIO;
  1600. break;
  1601. }
  1602. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1603. get_user(tmp, (unsigned int __user *) compat_ptr(data));
  1604. putreg(child, i, tmp);
  1605. data += sizeof(int);
  1606. }
  1607. ret = 0;
  1608. break;
  1609. case IA32_PTRACE_GETFPREGS:
  1610. ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1611. compat_ptr(data));
  1612. break;
  1613. case IA32_PTRACE_GETFPXREGS:
  1614. ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1615. compat_ptr(data));
  1616. break;
  1617. case IA32_PTRACE_SETFPREGS:
  1618. ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1619. compat_ptr(data));
  1620. break;
  1621. case IA32_PTRACE_SETFPXREGS:
  1622. ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1623. compat_ptr(data));
  1624. break;
  1625. case PTRACE_GETEVENTMSG:
  1626. ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
  1627. break;
  1628. case PTRACE_SYSCALL: /* continue, stop after next syscall */
  1629. case PTRACE_CONT: /* restart after signal. */
  1630. case PTRACE_KILL:
  1631. case PTRACE_SINGLESTEP: /* execute chile for one instruction */
  1632. case PTRACE_DETACH: /* detach a process */
  1633. ret = sys_ptrace(request, pid, addr, data);
  1634. break;
  1635. default:
  1636. ret = ptrace_request(child, request, addr, data);
  1637. break;
  1638. }
  1639. out_tsk:
  1640. put_task_struct(child);
  1641. out:
  1642. unlock_kernel();
  1643. return ret;
  1644. }
  1645. typedef struct {
  1646. unsigned int ss_sp;
  1647. unsigned int ss_flags;
  1648. unsigned int ss_size;
  1649. } ia32_stack_t;
  1650. asmlinkage long
  1651. sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
  1652. long arg2, long arg3, long arg4, long arg5, long arg6,
  1653. long arg7, struct pt_regs pt)
  1654. {
  1655. stack_t uss, uoss;
  1656. ia32_stack_t buf32;
  1657. int ret;
  1658. mm_segment_t old_fs = get_fs();
  1659. if (uss32) {
  1660. if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
  1661. return -EFAULT;
  1662. uss.ss_sp = (void __user *) (long) buf32.ss_sp;
  1663. uss.ss_flags = buf32.ss_flags;
  1664. /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
  1665. check and set it to the user requested value later */
  1666. if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
  1667. ret = -ENOMEM;
  1668. goto out;
  1669. }
  1670. uss.ss_size = MINSIGSTKSZ;
  1671. }
  1672. set_fs(KERNEL_DS);
  1673. ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
  1674. (stack_t __user *) &uoss, pt.r12);
  1675. current->sas_ss_size = buf32.ss_size;
  1676. set_fs(old_fs);
  1677. out:
  1678. if (ret < 0)
  1679. return(ret);
  1680. if (uoss32) {
  1681. buf32.ss_sp = (long __user) uoss.ss_sp;
  1682. buf32.ss_flags = uoss.ss_flags;
  1683. buf32.ss_size = uoss.ss_size;
  1684. if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
  1685. return -EFAULT;
  1686. }
  1687. return ret;
  1688. }
  1689. asmlinkage int
  1690. sys32_pause (void)
  1691. {
  1692. current->state = TASK_INTERRUPTIBLE;
  1693. schedule();
  1694. return -ERESTARTNOHAND;
  1695. }
  1696. asmlinkage int
  1697. sys32_msync (unsigned int start, unsigned int len, int flags)
  1698. {
  1699. unsigned int addr;
  1700. if (OFFSET4K(start))
  1701. return -EINVAL;
  1702. addr = PAGE_START(start);
  1703. return sys_msync(addr, len + (start - addr), flags);
  1704. }
  1705. struct sysctl32 {
  1706. unsigned int name;
  1707. int nlen;
  1708. unsigned int oldval;
  1709. unsigned int oldlenp;
  1710. unsigned int newval;
  1711. unsigned int newlen;
  1712. unsigned int __unused[4];
  1713. };
  1714. #ifdef CONFIG_SYSCTL
  1715. asmlinkage long
  1716. sys32_sysctl (struct sysctl32 __user *args)
  1717. {
  1718. struct sysctl32 a32;
  1719. mm_segment_t old_fs = get_fs ();
  1720. void __user *oldvalp, *newvalp;
  1721. size_t oldlen;
  1722. int __user *namep;
  1723. long ret;
  1724. if (copy_from_user(&a32, args, sizeof(a32)))
  1725. return -EFAULT;
  1726. /*
  1727. * We need to pre-validate these because we have to disable address checking
  1728. * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
  1729. * user specifying bad addresses here. Well, since we're dealing with 32 bit
  1730. * addresses, we KNOW that access_ok() will always succeed, so this is an
  1731. * expensive NOP, but so what...
  1732. */
  1733. namep = (int __user *) compat_ptr(a32.name);
  1734. oldvalp = compat_ptr(a32.oldval);
  1735. newvalp = compat_ptr(a32.newval);
  1736. if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1737. || !access_ok(VERIFY_WRITE, namep, 0)
  1738. || !access_ok(VERIFY_WRITE, oldvalp, 0)
  1739. || !access_ok(VERIFY_WRITE, newvalp, 0))
  1740. return -EFAULT;
  1741. set_fs(KERNEL_DS);
  1742. lock_kernel();
  1743. ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
  1744. newvalp, (size_t) a32.newlen);
  1745. unlock_kernel();
  1746. set_fs(old_fs);
  1747. if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1748. return -EFAULT;
  1749. return ret;
  1750. }
  1751. #endif
  1752. asmlinkage long
  1753. sys32_newuname (struct new_utsname __user *name)
  1754. {
  1755. int ret = sys_newuname(name);
  1756. if (!ret)
  1757. if (copy_to_user(name->machine, "i686\0\0\0", 8))
  1758. ret = -EFAULT;
  1759. return ret;
  1760. }
  1761. asmlinkage long
  1762. sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
  1763. {
  1764. uid_t a, b, c;
  1765. int ret;
  1766. mm_segment_t old_fs = get_fs();
  1767. set_fs(KERNEL_DS);
  1768. ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
  1769. set_fs(old_fs);
  1770. if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
  1771. return -EFAULT;
  1772. return ret;
  1773. }
  1774. asmlinkage long
  1775. sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
  1776. {
  1777. gid_t a, b, c;
  1778. int ret;
  1779. mm_segment_t old_fs = get_fs();
  1780. set_fs(KERNEL_DS);
  1781. ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
  1782. set_fs(old_fs);
  1783. if (ret)
  1784. return ret;
  1785. return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
  1786. }
  1787. asmlinkage long
  1788. sys32_lseek (unsigned int fd, int offset, unsigned int whence)
  1789. {
  1790. /* Sign-extension of "offset" is important here... */
  1791. return sys_lseek(fd, offset, whence);
  1792. }
  1793. static int
  1794. groups16_to_user(short __user *grouplist, struct group_info *group_info)
  1795. {
  1796. int i;
  1797. short group;
  1798. for (i = 0; i < group_info->ngroups; i++) {
  1799. group = (short)GROUP_AT(group_info, i);
  1800. if (put_user(group, grouplist+i))
  1801. return -EFAULT;
  1802. }
  1803. return 0;
  1804. }
  1805. static int
  1806. groups16_from_user(struct group_info *group_info, short __user *grouplist)
  1807. {
  1808. int i;
  1809. short group;
  1810. for (i = 0; i < group_info->ngroups; i++) {
  1811. if (get_user(group, grouplist+i))
  1812. return -EFAULT;
  1813. GROUP_AT(group_info, i) = (gid_t)group;
  1814. }
  1815. return 0;
  1816. }
  1817. asmlinkage long
  1818. sys32_getgroups16 (int gidsetsize, short __user *grouplist)
  1819. {
  1820. int i;
  1821. if (gidsetsize < 0)
  1822. return -EINVAL;
  1823. get_group_info(current->group_info);
  1824. i = current->group_info->ngroups;
  1825. if (gidsetsize) {
  1826. if (i > gidsetsize) {
  1827. i = -EINVAL;
  1828. goto out;
  1829. }
  1830. if (groups16_to_user(grouplist, current->group_info)) {
  1831. i = -EFAULT;
  1832. goto out;
  1833. }
  1834. }
  1835. out:
  1836. put_group_info(current->group_info);
  1837. return i;
  1838. }
  1839. asmlinkage long
  1840. sys32_setgroups16 (int gidsetsize, short __user *grouplist)
  1841. {
  1842. struct group_info *group_info;
  1843. int retval;
  1844. if (!capable(CAP_SETGID))
  1845. return -EPERM;
  1846. if ((unsigned)gidsetsize > NGROUPS_MAX)
  1847. return -EINVAL;
  1848. group_info = groups_alloc(gidsetsize);
  1849. if (!group_info)
  1850. return -ENOMEM;
  1851. retval = groups16_from_user(group_info, grouplist);
  1852. if (retval) {
  1853. put_group_info(group_info);
  1854. return retval;
  1855. }
  1856. retval = set_current_groups(group_info);
  1857. put_group_info(group_info);
  1858. return retval;
  1859. }
  1860. asmlinkage long
  1861. sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
  1862. {
  1863. return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
  1864. }
  1865. asmlinkage long
  1866. sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
  1867. {
  1868. return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
  1869. }
  1870. static int
  1871. putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
  1872. {
  1873. int err;
  1874. u64 hdev;
  1875. if (clear_user(ubuf, sizeof(*ubuf)))
  1876. return -EFAULT;
  1877. hdev = huge_encode_dev(kbuf->dev);
  1878. err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
  1879. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
  1880. err |= __put_user(kbuf->ino, &ubuf->__st_ino);
  1881. err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
  1882. err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
  1883. err |= __put_user(kbuf->mode, &ubuf->st_mode);
  1884. err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
  1885. err |= __put_user(kbuf->uid, &ubuf->st_uid);
  1886. err |= __put_user(kbuf->gid, &ubuf->st_gid);
  1887. hdev = huge_encode_dev(kbuf->rdev);
  1888. err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
  1889. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
  1890. err |= __put_user(kbuf->size, &ubuf->st_size_lo);
  1891. err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
  1892. err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
  1893. err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
  1894. err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
  1895. err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  1896. err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
  1897. err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  1898. err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
  1899. err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
  1900. return err;
  1901. }
  1902. asmlinkage long
  1903. sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
  1904. {
  1905. struct kstat s;
  1906. long ret = vfs_stat(filename, &s);
  1907. if (!ret)
  1908. ret = putstat64(statbuf, &s);
  1909. return ret;
  1910. }
  1911. asmlinkage long
  1912. sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
  1913. {
  1914. struct kstat s;
  1915. long ret = vfs_lstat(filename, &s);
  1916. if (!ret)
  1917. ret = putstat64(statbuf, &s);
  1918. return ret;
  1919. }
  1920. asmlinkage long
  1921. sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
  1922. {
  1923. struct kstat s;
  1924. long ret = vfs_fstat(fd, &s);
  1925. if (!ret)
  1926. ret = putstat64(statbuf, &s);
  1927. return ret;
  1928. }
  1929. struct sysinfo32 {
  1930. s32 uptime;
  1931. u32 loads[3];
  1932. u32 totalram;
  1933. u32 freeram;
  1934. u32 sharedram;
  1935. u32 bufferram;
  1936. u32 totalswap;
  1937. u32 freeswap;
  1938. u16 procs;
  1939. u16 pad;
  1940. u32 totalhigh;
  1941. u32 freehigh;
  1942. u32 mem_unit;
  1943. char _f[8];
  1944. };
  1945. asmlinkage long
  1946. sys32_sysinfo (struct sysinfo32 __user *info)
  1947. {
  1948. struct sysinfo s;
  1949. long ret, err;
  1950. int bitcount = 0;
  1951. mm_segment_t old_fs = get_fs();
  1952. set_fs(KERNEL_DS);
  1953. ret = sys_sysinfo((struct sysinfo __user *) &s);
  1954. set_fs(old_fs);
  1955. /* Check to see if any memory value is too large for 32-bit and
  1956. * scale down if needed.
  1957. */
  1958. if ((s.totalram >> 32) || (s.totalswap >> 32)) {
  1959. while (s.mem_unit < PAGE_SIZE) {
  1960. s.mem_unit <<= 1;
  1961. bitcount++;
  1962. }
  1963. s.totalram >>= bitcount;
  1964. s.freeram >>= bitcount;
  1965. s.sharedram >>= bitcount;
  1966. s.bufferram >>= bitcount;
  1967. s.totalswap >>= bitcount;
  1968. s.freeswap >>= bitcount;
  1969. s.totalhigh >>= bitcount;
  1970. s.freehigh >>= bitcount;
  1971. }
  1972. if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
  1973. return -EFAULT;
  1974. err = __put_user(s.uptime, &info->uptime);
  1975. err |= __put_user(s.loads[0], &info->loads[0]);
  1976. err |= __put_user(s.loads[1], &info->loads[1]);
  1977. err |= __put_user(s.loads[2], &info->loads[2]);
  1978. err |= __put_user(s.totalram, &info->totalram);
  1979. err |= __put_user(s.freeram, &info->freeram);
  1980. err |= __put_user(s.sharedram, &info->sharedram);
  1981. err |= __put_user(s.bufferram, &info->bufferram);
  1982. err |= __put_user(s.totalswap, &info->totalswap);
  1983. err |= __put_user(s.freeswap, &info->freeswap);
  1984. err |= __put_user(s.procs, &info->procs);
  1985. err |= __put_user (s.totalhigh, &info->totalhigh);
  1986. err |= __put_user (s.freehigh, &info->freehigh);
  1987. err |= __put_user (s.mem_unit, &info->mem_unit);
  1988. if (err)
  1989. return -EFAULT;
  1990. return ret;
  1991. }
  1992. asmlinkage long
  1993. sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
  1994. {
  1995. mm_segment_t old_fs = get_fs();
  1996. struct timespec t;
  1997. long ret;
  1998. set_fs(KERNEL_DS);
  1999. ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
  2000. set_fs(old_fs);
  2001. if (put_compat_timespec(&t, interval))
  2002. return -EFAULT;
  2003. return ret;
  2004. }
  2005. asmlinkage long
  2006. sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  2007. {
  2008. return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  2009. }
  2010. asmlinkage long
  2011. sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  2012. {
  2013. return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  2014. }
  2015. asmlinkage long
  2016. sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
  2017. {
  2018. mm_segment_t old_fs = get_fs();
  2019. long ret;
  2020. off_t of;
  2021. if (offset && get_user(of, offset))
  2022. return -EFAULT;
  2023. set_fs(KERNEL_DS);
  2024. ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
  2025. set_fs(old_fs);
  2026. if (!ret && offset && put_user(of, offset))
  2027. return -EFAULT;
  2028. return ret;
  2029. }
  2030. asmlinkage long
  2031. sys32_personality (unsigned int personality)
  2032. {
  2033. long ret;
  2034. if (current->personality == PER_LINUX32 && personality == PER_LINUX)
  2035. personality = PER_LINUX32;
  2036. ret = sys_personality(personality);
  2037. if (ret == PER_LINUX32)
  2038. ret = PER_LINUX;
  2039. return ret;
  2040. }
  2041. asmlinkage unsigned long
  2042. sys32_brk (unsigned int brk)
  2043. {
  2044. unsigned long ret, obrk;
  2045. struct mm_struct *mm = current->mm;
  2046. obrk = mm->brk;
  2047. ret = sys_brk(brk);
  2048. if (ret < obrk)
  2049. clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
  2050. return ret;
  2051. }
  2052. /* Structure for ia32 emulation on ia64 */
  2053. struct epoll_event32
  2054. {
  2055. u32 events;
  2056. u32 data[2];
  2057. };
  2058. asmlinkage long
  2059. sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
  2060. {
  2061. mm_segment_t old_fs = get_fs();
  2062. struct epoll_event event64;
  2063. int error;
  2064. u32 data_halfword;
  2065. if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
  2066. return -EFAULT;
  2067. __get_user(event64.events, &event->events);
  2068. __get_user(data_halfword, &event->data[0]);
  2069. event64.data = data_halfword;
  2070. __get_user(data_halfword, &event->data[1]);
  2071. event64.data |= (u64)data_halfword << 32;
  2072. set_fs(KERNEL_DS);
  2073. error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
  2074. set_fs(old_fs);
  2075. return error;
  2076. }
  2077. asmlinkage long
  2078. sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
  2079. int timeout)
  2080. {
  2081. struct epoll_event *events64 = NULL;
  2082. mm_segment_t old_fs = get_fs();
  2083. int numevents, size;
  2084. int evt_idx;
  2085. int do_free_pages = 0;
  2086. if (maxevents <= 0) {
  2087. return -EINVAL;
  2088. }
  2089. /* Verify that the area passed by the user is writeable */
  2090. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
  2091. return -EFAULT;
  2092. /*
  2093. * Allocate space for the intermediate copy. If the space needed
  2094. * is large enough to cause kmalloc to fail, then try again with
  2095. * __get_free_pages.
  2096. */
  2097. size = maxevents * sizeof(struct epoll_event);
  2098. events64 = kmalloc(size, GFP_KERNEL);
  2099. if (events64 == NULL) {
  2100. events64 = (struct epoll_event *)
  2101. __get_free_pages(GFP_KERNEL, get_order(size));
  2102. if (events64 == NULL)
  2103. return -ENOMEM;
  2104. do_free_pages = 1;
  2105. }
  2106. /* Do the system call */
  2107. set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
  2108. numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
  2109. maxevents, timeout);
  2110. set_fs(old_fs);
  2111. /* Don't modify userspace memory if we're returning an error */
  2112. if (numevents > 0) {
  2113. /* Translate the 64-bit structures back into the 32-bit
  2114. structures */
  2115. for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
  2116. __put_user(events64[evt_idx].events,
  2117. &events[evt_idx].events);
  2118. __put_user((u32)events64[evt_idx].data,
  2119. &events[evt_idx].data[0]);
  2120. __put_user((u32)(events64[evt_idx].data >> 32),
  2121. &events[evt_idx].data[1]);
  2122. }
  2123. }
  2124. if (do_free_pages)
  2125. free_pages((unsigned long) events64, get_order(size));
  2126. else
  2127. kfree(events64);
  2128. return numevents;
  2129. }
  2130. /*
  2131. * Get a yet unused TLS descriptor index.
  2132. */
  2133. static int
  2134. get_free_idx (void)
  2135. {
  2136. struct thread_struct *t = &current->thread;
  2137. int idx;
  2138. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  2139. if (desc_empty(t->tls_array + idx))
  2140. return idx + GDT_ENTRY_TLS_MIN;
  2141. return -ESRCH;
  2142. }
  2143. /*
  2144. * Set a given TLS descriptor:
  2145. */
  2146. asmlinkage int
  2147. sys32_set_thread_area (struct ia32_user_desc __user *u_info)
  2148. {
  2149. struct thread_struct *t = &current->thread;
  2150. struct ia32_user_desc info;
  2151. struct desc_struct *desc;
  2152. int cpu, idx;
  2153. if (copy_from_user(&info, u_info, sizeof(info)))
  2154. return -EFAULT;
  2155. idx = info.entry_number;
  2156. /*
  2157. * index -1 means the kernel should try to find and allocate an empty descriptor:
  2158. */
  2159. if (idx == -1) {
  2160. idx = get_free_idx();
  2161. if (idx < 0)
  2162. return idx;
  2163. if (put_user(idx, &u_info->entry_number))
  2164. return -EFAULT;
  2165. }
  2166. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2167. return -EINVAL;
  2168. desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
  2169. cpu = smp_processor_id();
  2170. if (LDT_empty(&info)) {
  2171. desc->a = 0;
  2172. desc->b = 0;
  2173. } else {
  2174. desc->a = LDT_entry_a(&info);
  2175. desc->b = LDT_entry_b(&info);
  2176. }
  2177. load_TLS(t, cpu);
  2178. return 0;
  2179. }
  2180. /*
  2181. * Get the current Thread-Local Storage area:
  2182. */
  2183. #define GET_BASE(desc) ( \
  2184. (((desc)->a >> 16) & 0x0000ffff) | \
  2185. (((desc)->b << 16) & 0x00ff0000) | \
  2186. ( (desc)->b & 0xff000000) )
  2187. #define GET_LIMIT(desc) ( \
  2188. ((desc)->a & 0x0ffff) | \
  2189. ((desc)->b & 0xf0000) )
  2190. #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
  2191. #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
  2192. #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
  2193. #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
  2194. #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
  2195. #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
  2196. asmlinkage int
  2197. sys32_get_thread_area (struct ia32_user_desc __user *u_info)
  2198. {
  2199. struct ia32_user_desc info;
  2200. struct desc_struct *desc;
  2201. int idx;
  2202. if (get_user(idx, &u_info->entry_number))
  2203. return -EFAULT;
  2204. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2205. return -EINVAL;
  2206. desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  2207. info.entry_number = idx;
  2208. info.base_addr = GET_BASE(desc);
  2209. info.limit = GET_LIMIT(desc);
  2210. info.seg_32bit = GET_32BIT(desc);
  2211. info.contents = GET_CONTENTS(desc);
  2212. info.read_exec_only = !GET_WRITABLE(desc);
  2213. info.limit_in_pages = GET_LIMIT_PAGES(desc);
  2214. info.seg_not_present = !GET_PRESENT(desc);
  2215. info.useable = GET_USEABLE(desc);
  2216. if (copy_to_user(u_info, &info, sizeof(info)))
  2217. return -EFAULT;
  2218. return 0;
  2219. }
  2220. asmlinkage long
  2221. sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
  2222. {
  2223. struct sigevent se;
  2224. mm_segment_t oldfs;
  2225. timer_t t;
  2226. long err;
  2227. if (se32 == NULL)
  2228. return sys_timer_create(clock, NULL, timer_id);
  2229. if (get_compat_sigevent(&se, se32))
  2230. return -EFAULT;
  2231. if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
  2232. return -EFAULT;
  2233. oldfs = get_fs();
  2234. set_fs(KERNEL_DS);
  2235. err = sys_timer_create(clock, (struct sigevent __user *) &se, (timer_t __user *) &t);
  2236. set_fs(oldfs);
  2237. if (!err)
  2238. err = __put_user (t, timer_id);
  2239. return err;
  2240. }
  2241. long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
  2242. __u32 len_low, __u32 len_high, int advice)
  2243. {
  2244. return sys_fadvise64_64(fd,
  2245. (((u64)offset_high)<<32) | offset_low,
  2246. (((u64)len_high)<<32) | len_low,
  2247. advice);
  2248. }
  2249. #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
  2250. asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
  2251. {
  2252. uid_t sruid, seuid;
  2253. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2254. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2255. return sys_setreuid(sruid, seuid);
  2256. }
  2257. asmlinkage long
  2258. sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
  2259. compat_uid_t suid)
  2260. {
  2261. uid_t sruid, seuid, ssuid;
  2262. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2263. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2264. ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
  2265. return sys_setresuid(sruid, seuid, ssuid);
  2266. }
  2267. asmlinkage long
  2268. sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
  2269. {
  2270. gid_t srgid, segid;
  2271. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2272. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2273. return sys_setregid(srgid, segid);
  2274. }
  2275. asmlinkage long
  2276. sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
  2277. compat_gid_t sgid)
  2278. {
  2279. gid_t srgid, segid, ssgid;
  2280. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2281. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2282. ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
  2283. return sys_setresgid(srgid, segid, ssgid);
  2284. }
  2285. /* Handle adjtimex compatibility. */
  2286. struct timex32 {
  2287. u32 modes;
  2288. s32 offset, freq, maxerror, esterror;
  2289. s32 status, constant, precision, tolerance;
  2290. struct compat_timeval time;
  2291. s32 tick;
  2292. s32 ppsfreq, jitter, shift, stabil;
  2293. s32 jitcnt, calcnt, errcnt, stbcnt;
  2294. s32 :32; s32 :32; s32 :32; s32 :32;
  2295. s32 :32; s32 :32; s32 :32; s32 :32;
  2296. s32 :32; s32 :32; s32 :32; s32 :32;
  2297. };
  2298. extern int do_adjtimex(struct timex *);
  2299. asmlinkage long
  2300. sys32_adjtimex(struct timex32 *utp)
  2301. {
  2302. struct timex txc;
  2303. int ret;
  2304. memset(&txc, 0, sizeof(struct timex));
  2305. if(get_user(txc.modes, &utp->modes) ||
  2306. __get_user(txc.offset, &utp->offset) ||
  2307. __get_user(txc.freq, &utp->freq) ||
  2308. __get_user(txc.maxerror, &utp->maxerror) ||
  2309. __get_user(txc.esterror, &utp->esterror) ||
  2310. __get_user(txc.status, &utp->status) ||
  2311. __get_user(txc.constant, &utp->constant) ||
  2312. __get_user(txc.precision, &utp->precision) ||
  2313. __get_user(txc.tolerance, &utp->tolerance) ||
  2314. __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
  2315. __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
  2316. __get_user(txc.tick, &utp->tick) ||
  2317. __get_user(txc.ppsfreq, &utp->ppsfreq) ||
  2318. __get_user(txc.jitter, &utp->jitter) ||
  2319. __get_user(txc.shift, &utp->shift) ||
  2320. __get_user(txc.stabil, &utp->stabil) ||
  2321. __get_user(txc.jitcnt, &utp->jitcnt) ||
  2322. __get_user(txc.calcnt, &utp->calcnt) ||
  2323. __get_user(txc.errcnt, &utp->errcnt) ||
  2324. __get_user(txc.stbcnt, &utp->stbcnt))
  2325. return -EFAULT;
  2326. ret = do_adjtimex(&txc);
  2327. if(put_user(txc.modes, &utp->modes) ||
  2328. __put_user(txc.offset, &utp->offset) ||
  2329. __put_user(txc.freq, &utp->freq) ||
  2330. __put_user(txc.maxerror, &utp->maxerror) ||
  2331. __put_user(txc.esterror, &utp->esterror) ||
  2332. __put_user(txc.status, &utp->status) ||
  2333. __put_user(txc.constant, &utp->constant) ||
  2334. __put_user(txc.precision, &utp->precision) ||
  2335. __put_user(txc.tolerance, &utp->tolerance) ||
  2336. __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
  2337. __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
  2338. __put_user(txc.tick, &utp->tick) ||
  2339. __put_user(txc.ppsfreq, &utp->ppsfreq) ||
  2340. __put_user(txc.jitter, &utp->jitter) ||
  2341. __put_user(txc.shift, &utp->shift) ||
  2342. __put_user(txc.stabil, &utp->stabil) ||
  2343. __put_user(txc.jitcnt, &utp->jitcnt) ||
  2344. __put_user(txc.calcnt, &utp->calcnt) ||
  2345. __put_user(txc.errcnt, &utp->errcnt) ||
  2346. __put_user(txc.stbcnt, &utp->stbcnt))
  2347. ret = -EFAULT;
  2348. return ret;
  2349. }
  2350. #endif /* NOTYET */