sys_ia32.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592
  1. /*
  2. * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
  3. *
  4. * Copyright (C) 2000 VA Linux Co
  5. * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
  6. * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  9. * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
  10. * David Mosberger-Tang <davidm@hpl.hp.com>
  11. * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
  12. *
  13. * These routines maintain argument size conversion between 32bit and 64bit
  14. * environment.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/sysctl.h>
  19. #include <linux/sched.h>
  20. #include <linux/fs.h>
  21. #include <linux/file.h>
  22. #include <linux/signal.h>
  23. #include <linux/resource.h>
  24. #include <linux/times.h>
  25. #include <linux/utsname.h>
  26. #include <linux/smp.h>
  27. #include <linux/smp_lock.h>
  28. #include <linux/sem.h>
  29. #include <linux/msg.h>
  30. #include <linux/mm.h>
  31. #include <linux/shm.h>
  32. #include <linux/slab.h>
  33. #include <linux/uio.h>
  34. #include <linux/nfs_fs.h>
  35. #include <linux/quota.h>
  36. #include <linux/syscalls.h>
  37. #include <linux/sunrpc/svc.h>
  38. #include <linux/nfsd/nfsd.h>
  39. #include <linux/nfsd/cache.h>
  40. #include <linux/nfsd/xdr.h>
  41. #include <linux/nfsd/syscall.h>
  42. #include <linux/poll.h>
  43. #include <linux/eventpoll.h>
  44. #include <linux/personality.h>
  45. #include <linux/ptrace.h>
  46. #include <linux/stat.h>
  47. #include <linux/ipc.h>
  48. #include <linux/capability.h>
  49. #include <linux/compat.h>
  50. #include <linux/vfs.h>
  51. #include <linux/mman.h>
  52. #include <linux/mutex.h>
  53. #include <asm/intrinsics.h>
  54. #include <asm/types.h>
  55. #include <asm/uaccess.h>
  56. #include <asm/unistd.h>
  57. #include "ia32priv.h"
  58. #include <net/scm.h>
  59. #include <net/sock.h>
  60. #define DEBUG 0
  61. #if DEBUG
  62. # define DBG(fmt...) printk(KERN_DEBUG fmt)
  63. #else
  64. # define DBG(fmt...)
  65. #endif
  66. #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
  67. #define OFFSET4K(a) ((a) & 0xfff)
  68. #define PAGE_START(addr) ((addr) & PAGE_MASK)
  69. #define MINSIGSTKSZ_IA32 2048
  70. #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
  71. #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
  72. /*
  73. * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
  74. * while doing so.
  75. */
  76. /* XXX make per-mm: */
  77. static DEFINE_MUTEX(ia32_mmap_mutex);
  78. asmlinkage long
  79. sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
  80. struct pt_regs *regs)
  81. {
  82. long error;
  83. char *filename;
  84. unsigned long old_map_base, old_task_size, tssd;
  85. filename = getname(name);
  86. error = PTR_ERR(filename);
  87. if (IS_ERR(filename))
  88. return error;
  89. old_map_base = current->thread.map_base;
  90. old_task_size = current->thread.task_size;
  91. tssd = ia64_get_kr(IA64_KR_TSSD);
  92. /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
  93. current->thread.map_base = DEFAULT_MAP_BASE;
  94. current->thread.task_size = DEFAULT_TASK_SIZE;
  95. ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
  96. ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
  97. error = compat_do_execve(filename, argv, envp, regs);
  98. putname(filename);
  99. if (error < 0) {
  100. /* oops, execve failed, switch back to old values... */
  101. ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
  102. ia64_set_kr(IA64_KR_TSSD, tssd);
  103. current->thread.map_base = old_map_base;
  104. current->thread.task_size = old_task_size;
  105. }
  106. return error;
  107. }
  108. int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
  109. {
  110. int err;
  111. if ((u64) stat->size > MAX_NON_LFS ||
  112. !old_valid_dev(stat->dev) ||
  113. !old_valid_dev(stat->rdev))
  114. return -EOVERFLOW;
  115. if (clear_user(ubuf, sizeof(*ubuf)))
  116. return -EFAULT;
  117. err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
  118. err |= __put_user(stat->ino, &ubuf->st_ino);
  119. err |= __put_user(stat->mode, &ubuf->st_mode);
  120. err |= __put_user(stat->nlink, &ubuf->st_nlink);
  121. err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
  122. err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
  123. err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
  124. err |= __put_user(stat->size, &ubuf->st_size);
  125. err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
  126. err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
  127. err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
  128. err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  129. err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
  130. err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  131. err |= __put_user(stat->blksize, &ubuf->st_blksize);
  132. err |= __put_user(stat->blocks, &ubuf->st_blocks);
  133. return err;
  134. }
  135. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  136. static int
  137. get_page_prot (struct vm_area_struct *vma, unsigned long addr)
  138. {
  139. int prot = 0;
  140. if (!vma || vma->vm_start > addr)
  141. return 0;
  142. if (vma->vm_flags & VM_READ)
  143. prot |= PROT_READ;
  144. if (vma->vm_flags & VM_WRITE)
  145. prot |= PROT_WRITE;
  146. if (vma->vm_flags & VM_EXEC)
  147. prot |= PROT_EXEC;
  148. return prot;
  149. }
  150. /*
  151. * Map a subpage by creating an anonymous page that contains the union of the old page and
  152. * the subpage.
  153. */
  154. static unsigned long
  155. mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
  156. loff_t off)
  157. {
  158. void *page = NULL;
  159. struct inode *inode;
  160. unsigned long ret = 0;
  161. struct vm_area_struct *vma = find_vma(current->mm, start);
  162. int old_prot = get_page_prot(vma, start);
  163. DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
  164. file, start, end, prot, flags, off);
  165. /* Optimize the case where the old mmap and the new mmap are both anonymous */
  166. if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
  167. if (clear_user((void __user *) start, end - start)) {
  168. ret = -EFAULT;
  169. goto out;
  170. }
  171. goto skip_mmap;
  172. }
  173. page = (void *) get_zeroed_page(GFP_KERNEL);
  174. if (!page)
  175. return -ENOMEM;
  176. if (old_prot)
  177. copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
  178. down_write(&current->mm->mmap_sem);
  179. {
  180. ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
  181. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  182. }
  183. up_write(&current->mm->mmap_sem);
  184. if (IS_ERR((void *) ret))
  185. goto out;
  186. if (old_prot) {
  187. /* copy back the old page contents. */
  188. if (offset_in_page(start))
  189. copy_to_user((void __user *) PAGE_START(start), page,
  190. offset_in_page(start));
  191. if (offset_in_page(end))
  192. copy_to_user((void __user *) end, page + offset_in_page(end),
  193. PAGE_SIZE - offset_in_page(end));
  194. }
  195. if (!(flags & MAP_ANONYMOUS)) {
  196. /* read the file contents */
  197. inode = file->f_dentry->d_inode;
  198. if (!inode->i_fop || !file->f_op->read
  199. || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
  200. {
  201. ret = -EINVAL;
  202. goto out;
  203. }
  204. }
  205. skip_mmap:
  206. if (!(prot & PROT_WRITE))
  207. ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
  208. out:
  209. if (page)
  210. free_page((unsigned long) page);
  211. return ret;
  212. }
  213. /* SLAB cache for partial_page structures */
  214. kmem_cache_t *partial_page_cachep;
  215. /*
  216. * init partial_page_list.
  217. * return 0 means kmalloc fail.
  218. */
  219. struct partial_page_list*
  220. ia32_init_pp_list(void)
  221. {
  222. struct partial_page_list *p;
  223. if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
  224. return p;
  225. p->pp_head = NULL;
  226. p->ppl_rb = RB_ROOT;
  227. p->pp_hint = NULL;
  228. atomic_set(&p->pp_count, 1);
  229. return p;
  230. }
  231. /*
  232. * Search for the partial page with @start in partial page list @ppl.
  233. * If finds the partial page, return the found partial page.
  234. * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
  235. * be used by later __ia32_insert_pp().
  236. */
  237. static struct partial_page *
  238. __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
  239. struct partial_page **pprev, struct rb_node ***rb_link,
  240. struct rb_node **rb_parent)
  241. {
  242. struct partial_page *pp;
  243. struct rb_node **__rb_link, *__rb_parent, *rb_prev;
  244. pp = ppl->pp_hint;
  245. if (pp && pp->base == start)
  246. return pp;
  247. __rb_link = &ppl->ppl_rb.rb_node;
  248. rb_prev = __rb_parent = NULL;
  249. while (*__rb_link) {
  250. __rb_parent = *__rb_link;
  251. pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
  252. if (pp->base == start) {
  253. ppl->pp_hint = pp;
  254. return pp;
  255. } else if (pp->base < start) {
  256. rb_prev = __rb_parent;
  257. __rb_link = &__rb_parent->rb_right;
  258. } else {
  259. __rb_link = &__rb_parent->rb_left;
  260. }
  261. }
  262. *rb_link = __rb_link;
  263. *rb_parent = __rb_parent;
  264. *pprev = NULL;
  265. if (rb_prev)
  266. *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
  267. return NULL;
  268. }
  269. /*
  270. * insert @pp into @ppl.
  271. */
  272. static void
  273. __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
  274. struct partial_page *prev, struct rb_node **rb_link,
  275. struct rb_node *rb_parent)
  276. {
  277. /* link list */
  278. if (prev) {
  279. pp->next = prev->next;
  280. prev->next = pp;
  281. } else {
  282. ppl->pp_head = pp;
  283. if (rb_parent)
  284. pp->next = rb_entry(rb_parent,
  285. struct partial_page, pp_rb);
  286. else
  287. pp->next = NULL;
  288. }
  289. /* link rb */
  290. rb_link_node(&pp->pp_rb, rb_parent, rb_link);
  291. rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
  292. ppl->pp_hint = pp;
  293. }
  294. /*
  295. * delete @pp from partial page list @ppl.
  296. */
  297. static void
  298. __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
  299. struct partial_page *prev)
  300. {
  301. if (prev) {
  302. prev->next = pp->next;
  303. if (ppl->pp_hint == pp)
  304. ppl->pp_hint = prev;
  305. } else {
  306. ppl->pp_head = pp->next;
  307. if (ppl->pp_hint == pp)
  308. ppl->pp_hint = pp->next;
  309. }
  310. rb_erase(&pp->pp_rb, &ppl->ppl_rb);
  311. kmem_cache_free(partial_page_cachep, pp);
  312. }
  313. static struct partial_page *
  314. __pp_prev(struct partial_page *pp)
  315. {
  316. struct rb_node *prev = rb_prev(&pp->pp_rb);
  317. if (prev)
  318. return rb_entry(prev, struct partial_page, pp_rb);
  319. else
  320. return NULL;
  321. }
  322. /*
  323. * Delete partial pages with address between @start and @end.
  324. * @start and @end are page aligned.
  325. */
  326. static void
  327. __ia32_delete_pp_range(unsigned int start, unsigned int end)
  328. {
  329. struct partial_page *pp, *prev;
  330. struct rb_node **rb_link, *rb_parent;
  331. if (start >= end)
  332. return;
  333. pp = __ia32_find_pp(current->thread.ppl, start, &prev,
  334. &rb_link, &rb_parent);
  335. if (pp)
  336. prev = __pp_prev(pp);
  337. else {
  338. if (prev)
  339. pp = prev->next;
  340. else
  341. pp = current->thread.ppl->pp_head;
  342. }
  343. while (pp && pp->base < end) {
  344. struct partial_page *tmp = pp->next;
  345. __ia32_delete_pp(current->thread.ppl, pp, prev);
  346. pp = tmp;
  347. }
  348. }
  349. /*
  350. * Set the range between @start and @end in bitmap.
  351. * @start and @end should be IA32 page aligned and in the same IA64 page.
  352. */
  353. static int
  354. __ia32_set_pp(unsigned int start, unsigned int end, int flags)
  355. {
  356. struct partial_page *pp, *prev;
  357. struct rb_node ** rb_link, *rb_parent;
  358. unsigned int pstart, start_bit, end_bit, i;
  359. pstart = PAGE_START(start);
  360. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  361. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  362. if (end_bit == 0)
  363. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  364. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  365. &rb_link, &rb_parent);
  366. if (pp) {
  367. for (i = start_bit; i < end_bit; i++)
  368. set_bit(i, &pp->bitmap);
  369. /*
  370. * Check: if this partial page has been set to a full page,
  371. * then delete it.
  372. */
  373. if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
  374. PAGE_SIZE/IA32_PAGE_SIZE) {
  375. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  376. }
  377. return 0;
  378. }
  379. /*
  380. * MAP_FIXED may lead to overlapping mmap.
  381. * In this case, the requested mmap area may already mmaped as a full
  382. * page. So check vma before adding a new partial page.
  383. */
  384. if (flags & MAP_FIXED) {
  385. struct vm_area_struct *vma = find_vma(current->mm, pstart);
  386. if (vma && vma->vm_start <= pstart)
  387. return 0;
  388. }
  389. /* new a partial_page */
  390. pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  391. if (!pp)
  392. return -ENOMEM;
  393. pp->base = pstart;
  394. pp->bitmap = 0;
  395. for (i=start_bit; i<end_bit; i++)
  396. set_bit(i, &(pp->bitmap));
  397. pp->next = NULL;
  398. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  399. return 0;
  400. }
  401. /*
  402. * @start and @end should be IA32 page aligned, but don't need to be in the
  403. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  404. * page, then call __ia32_set_pp().
  405. */
  406. static void
  407. ia32_set_pp(unsigned int start, unsigned int end, int flags)
  408. {
  409. down_write(&current->mm->mmap_sem);
  410. if (flags & MAP_FIXED) {
  411. /*
  412. * MAP_FIXED may lead to overlapping mmap. When this happens,
  413. * a series of complete IA64 pages results in deletion of
  414. * old partial pages in that range.
  415. */
  416. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  417. }
  418. if (end < PAGE_ALIGN(start)) {
  419. __ia32_set_pp(start, end, flags);
  420. } else {
  421. if (offset_in_page(start))
  422. __ia32_set_pp(start, PAGE_ALIGN(start), flags);
  423. if (offset_in_page(end))
  424. __ia32_set_pp(PAGE_START(end), end, flags);
  425. }
  426. up_write(&current->mm->mmap_sem);
  427. }
  428. /*
  429. * Unset the range between @start and @end in bitmap.
  430. * @start and @end should be IA32 page aligned and in the same IA64 page.
  431. * After doing that, if the bitmap is 0, then free the page and return 1,
  432. * else return 0;
  433. * If not find the partial page in the list, then
  434. * If the vma exists, then the full page is set to a partial page;
  435. * Else return -ENOMEM.
  436. */
  437. static int
  438. __ia32_unset_pp(unsigned int start, unsigned int end)
  439. {
  440. struct partial_page *pp, *prev;
  441. struct rb_node ** rb_link, *rb_parent;
  442. unsigned int pstart, start_bit, end_bit, i;
  443. struct vm_area_struct *vma;
  444. pstart = PAGE_START(start);
  445. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  446. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  447. if (end_bit == 0)
  448. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  449. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  450. &rb_link, &rb_parent);
  451. if (pp) {
  452. for (i = start_bit; i < end_bit; i++)
  453. clear_bit(i, &pp->bitmap);
  454. if (pp->bitmap == 0) {
  455. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  456. return 1;
  457. }
  458. return 0;
  459. }
  460. vma = find_vma(current->mm, pstart);
  461. if (!vma || vma->vm_start > pstart) {
  462. return -ENOMEM;
  463. }
  464. /* new a partial_page */
  465. pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  466. if (!pp)
  467. return -ENOMEM;
  468. pp->base = pstart;
  469. pp->bitmap = 0;
  470. for (i = 0; i < start_bit; i++)
  471. set_bit(i, &(pp->bitmap));
  472. for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
  473. set_bit(i, &(pp->bitmap));
  474. pp->next = NULL;
  475. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  476. return 0;
  477. }
  478. /*
  479. * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
  480. * __ia32_delete_pp_range(). Unset possible partial pages by calling
  481. * __ia32_unset_pp().
  482. * The returned value see __ia32_unset_pp().
  483. */
  484. static int
  485. ia32_unset_pp(unsigned int *startp, unsigned int *endp)
  486. {
  487. unsigned int start = *startp, end = *endp;
  488. int ret = 0;
  489. down_write(&current->mm->mmap_sem);
  490. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  491. if (end < PAGE_ALIGN(start)) {
  492. ret = __ia32_unset_pp(start, end);
  493. if (ret == 1) {
  494. *startp = PAGE_START(start);
  495. *endp = PAGE_ALIGN(end);
  496. }
  497. if (ret == 0) {
  498. /* to shortcut sys_munmap() in sys32_munmap() */
  499. *startp = PAGE_START(start);
  500. *endp = PAGE_START(end);
  501. }
  502. } else {
  503. if (offset_in_page(start)) {
  504. ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
  505. if (ret == 1)
  506. *startp = PAGE_START(start);
  507. if (ret == 0)
  508. *startp = PAGE_ALIGN(start);
  509. if (ret < 0)
  510. goto out;
  511. }
  512. if (offset_in_page(end)) {
  513. ret = __ia32_unset_pp(PAGE_START(end), end);
  514. if (ret == 1)
  515. *endp = PAGE_ALIGN(end);
  516. if (ret == 0)
  517. *endp = PAGE_START(end);
  518. }
  519. }
  520. out:
  521. up_write(&current->mm->mmap_sem);
  522. return ret;
  523. }
  524. /*
  525. * Compare the range between @start and @end with bitmap in partial page.
  526. * @start and @end should be IA32 page aligned and in the same IA64 page.
  527. */
  528. static int
  529. __ia32_compare_pp(unsigned int start, unsigned int end)
  530. {
  531. struct partial_page *pp, *prev;
  532. struct rb_node ** rb_link, *rb_parent;
  533. unsigned int pstart, start_bit, end_bit, size;
  534. unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
  535. pstart = PAGE_START(start);
  536. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  537. &rb_link, &rb_parent);
  538. if (!pp)
  539. return 1;
  540. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  541. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  542. size = sizeof(pp->bitmap) * 8;
  543. first_bit = find_first_bit(&pp->bitmap, size);
  544. next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
  545. if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
  546. /* exceeds the first range in bitmap */
  547. return -ENOMEM;
  548. } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
  549. first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
  550. if ((next_zero_bit < first_bit) && (first_bit < size))
  551. return 1; /* has next range */
  552. else
  553. return 0; /* no next range */
  554. } else
  555. return 1;
  556. }
  557. /*
  558. * @start and @end should be IA32 page aligned, but don't need to be in the
  559. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  560. * page, then call __ia32_compare_pp().
  561. *
  562. * Take this as example: the range is the 1st and 2nd 4K page.
  563. * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
  564. * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
  565. * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
  566. * bitmap = 00000101.
  567. */
  568. static int
  569. ia32_compare_pp(unsigned int *startp, unsigned int *endp)
  570. {
  571. unsigned int start = *startp, end = *endp;
  572. int retval = 0;
  573. down_write(&current->mm->mmap_sem);
  574. if (end < PAGE_ALIGN(start)) {
  575. retval = __ia32_compare_pp(start, end);
  576. if (retval == 0) {
  577. *startp = PAGE_START(start);
  578. *endp = PAGE_ALIGN(end);
  579. }
  580. } else {
  581. if (offset_in_page(start)) {
  582. retval = __ia32_compare_pp(start,
  583. PAGE_ALIGN(start));
  584. if (retval == 0)
  585. *startp = PAGE_START(start);
  586. if (retval < 0)
  587. goto out;
  588. }
  589. if (offset_in_page(end)) {
  590. retval = __ia32_compare_pp(PAGE_START(end), end);
  591. if (retval == 0)
  592. *endp = PAGE_ALIGN(end);
  593. }
  594. }
  595. out:
  596. up_write(&current->mm->mmap_sem);
  597. return retval;
  598. }
  599. static void
  600. __ia32_drop_pp_list(struct partial_page_list *ppl)
  601. {
  602. struct partial_page *pp = ppl->pp_head;
  603. while (pp) {
  604. struct partial_page *next = pp->next;
  605. kmem_cache_free(partial_page_cachep, pp);
  606. pp = next;
  607. }
  608. kfree(ppl);
  609. }
  610. void
  611. ia32_drop_partial_page_list(struct task_struct *task)
  612. {
  613. struct partial_page_list* ppl = task->thread.ppl;
  614. if (ppl && atomic_dec_and_test(&ppl->pp_count))
  615. __ia32_drop_pp_list(ppl);
  616. }
  617. /*
  618. * Copy current->thread.ppl to ppl (already initialized).
  619. */
  620. static int
  621. __ia32_copy_pp_list(struct partial_page_list *ppl)
  622. {
  623. struct partial_page *pp, *tmp, *prev;
  624. struct rb_node **rb_link, *rb_parent;
  625. ppl->pp_head = NULL;
  626. ppl->pp_hint = NULL;
  627. ppl->ppl_rb = RB_ROOT;
  628. rb_link = &ppl->ppl_rb.rb_node;
  629. rb_parent = NULL;
  630. prev = NULL;
  631. for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
  632. tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  633. if (!tmp)
  634. return -ENOMEM;
  635. *tmp = *pp;
  636. __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
  637. prev = tmp;
  638. rb_link = &tmp->pp_rb.rb_right;
  639. rb_parent = &tmp->pp_rb;
  640. }
  641. return 0;
  642. }
  643. int
  644. ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
  645. {
  646. int retval = 0;
  647. if (clone_flags & CLONE_VM) {
  648. atomic_inc(&current->thread.ppl->pp_count);
  649. p->thread.ppl = current->thread.ppl;
  650. } else {
  651. p->thread.ppl = ia32_init_pp_list();
  652. if (!p->thread.ppl)
  653. return -ENOMEM;
  654. down_write(&current->mm->mmap_sem);
  655. {
  656. retval = __ia32_copy_pp_list(p->thread.ppl);
  657. }
  658. up_write(&current->mm->mmap_sem);
  659. }
  660. return retval;
  661. }
  662. static unsigned long
  663. emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
  664. loff_t off)
  665. {
  666. unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
  667. struct inode *inode;
  668. loff_t poff;
  669. end = start + len;
  670. pstart = PAGE_START(start);
  671. pend = PAGE_ALIGN(end);
  672. if (flags & MAP_FIXED) {
  673. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  674. if (start > pstart) {
  675. if (flags & MAP_SHARED)
  676. printk(KERN_INFO
  677. "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
  678. current->comm, current->pid, start);
  679. ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
  680. off);
  681. if (IS_ERR((void *) ret))
  682. return ret;
  683. pstart += PAGE_SIZE;
  684. if (pstart >= pend)
  685. goto out; /* done */
  686. }
  687. if (end < pend) {
  688. if (flags & MAP_SHARED)
  689. printk(KERN_INFO
  690. "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
  691. current->comm, current->pid, end);
  692. ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
  693. (off + len) - offset_in_page(end));
  694. if (IS_ERR((void *) ret))
  695. return ret;
  696. pend -= PAGE_SIZE;
  697. if (pstart >= pend)
  698. goto out; /* done */
  699. }
  700. } else {
  701. /*
  702. * If a start address was specified, use it if the entire rounded out area
  703. * is available.
  704. */
  705. if (start && !pstart)
  706. fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
  707. tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
  708. if (tmp != pstart) {
  709. pstart = tmp;
  710. start = pstart + offset_in_page(off); /* make start congruent with off */
  711. end = start + len;
  712. pend = PAGE_ALIGN(end);
  713. }
  714. }
  715. poff = off + (pstart - start); /* note: (pstart - start) may be negative */
  716. is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
  717. if ((flags & MAP_SHARED) && !is_congruent)
  718. printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
  719. "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
  720. DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
  721. is_congruent ? "congruent" : "not congruent", poff);
  722. down_write(&current->mm->mmap_sem);
  723. {
  724. if (!(flags & MAP_ANONYMOUS) && is_congruent)
  725. ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
  726. else
  727. ret = do_mmap(NULL, pstart, pend - pstart,
  728. prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
  729. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  730. }
  731. up_write(&current->mm->mmap_sem);
  732. if (IS_ERR((void *) ret))
  733. return ret;
  734. if (!is_congruent) {
  735. /* read the file contents */
  736. inode = file->f_dentry->d_inode;
  737. if (!inode->i_fop || !file->f_op->read
  738. || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
  739. < 0))
  740. {
  741. sys_munmap(pstart, pend - pstart);
  742. return -EINVAL;
  743. }
  744. if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
  745. return -EINVAL;
  746. }
  747. if (!(flags & MAP_FIXED))
  748. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  749. out:
  750. return start;
  751. }
  752. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  753. static inline unsigned int
  754. get_prot32 (unsigned int prot)
  755. {
  756. if (prot & PROT_WRITE)
  757. /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
  758. prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
  759. else if (prot & (PROT_READ | PROT_EXEC))
  760. /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
  761. prot |= (PROT_READ | PROT_EXEC);
  762. return prot;
  763. }
  764. unsigned long
  765. ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
  766. loff_t offset)
  767. {
  768. DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
  769. file, addr, len, prot, flags, offset);
  770. if (file && (!file->f_op || !file->f_op->mmap))
  771. return -ENODEV;
  772. len = IA32_PAGE_ALIGN(len);
  773. if (len == 0)
  774. return addr;
  775. if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
  776. {
  777. if (flags & MAP_FIXED)
  778. return -ENOMEM;
  779. else
  780. return -EINVAL;
  781. }
  782. if (OFFSET4K(offset))
  783. return -EINVAL;
  784. prot = get_prot32(prot);
  785. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  786. mutex_lock(&ia32_mmap_mutex);
  787. {
  788. addr = emulate_mmap(file, addr, len, prot, flags, offset);
  789. }
  790. mutex_unlock(&ia32_mmap_mutex);
  791. #else
  792. down_write(&current->mm->mmap_sem);
  793. {
  794. addr = do_mmap(file, addr, len, prot, flags, offset);
  795. }
  796. up_write(&current->mm->mmap_sem);
  797. #endif
  798. DBG("ia32_do_mmap: returning 0x%lx\n", addr);
  799. return addr;
  800. }
  801. /*
  802. * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
  803. * system calls used a memory block for parameter passing..
  804. */
  805. struct mmap_arg_struct {
  806. unsigned int addr;
  807. unsigned int len;
  808. unsigned int prot;
  809. unsigned int flags;
  810. unsigned int fd;
  811. unsigned int offset;
  812. };
  813. asmlinkage long
  814. sys32_mmap (struct mmap_arg_struct __user *arg)
  815. {
  816. struct mmap_arg_struct a;
  817. struct file *file = NULL;
  818. unsigned long addr;
  819. int flags;
  820. if (copy_from_user(&a, arg, sizeof(a)))
  821. return -EFAULT;
  822. if (OFFSET4K(a.offset))
  823. return -EINVAL;
  824. flags = a.flags;
  825. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  826. if (!(flags & MAP_ANONYMOUS)) {
  827. file = fget(a.fd);
  828. if (!file)
  829. return -EBADF;
  830. }
  831. addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
  832. if (file)
  833. fput(file);
  834. return addr;
  835. }
  836. asmlinkage long
  837. sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
  838. unsigned int fd, unsigned int pgoff)
  839. {
  840. struct file *file = NULL;
  841. unsigned long retval;
  842. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  843. if (!(flags & MAP_ANONYMOUS)) {
  844. file = fget(fd);
  845. if (!file)
  846. return -EBADF;
  847. }
  848. retval = ia32_do_mmap(file, addr, len, prot, flags,
  849. (unsigned long) pgoff << IA32_PAGE_SHIFT);
  850. if (file)
  851. fput(file);
  852. return retval;
  853. }
  854. asmlinkage long
  855. sys32_munmap (unsigned int start, unsigned int len)
  856. {
  857. unsigned int end = start + len;
  858. long ret;
  859. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  860. ret = sys_munmap(start, end - start);
  861. #else
  862. if (OFFSET4K(start))
  863. return -EINVAL;
  864. end = IA32_PAGE_ALIGN(end);
  865. if (start >= end)
  866. return -EINVAL;
  867. ret = ia32_unset_pp(&start, &end);
  868. if (ret < 0)
  869. return ret;
  870. if (start >= end)
  871. return 0;
  872. mutex_lock(&ia32_mmap_mutex);
  873. ret = sys_munmap(start, end - start);
  874. mutex_unlock(&ia32_mmap_mutex);
  875. #endif
  876. return ret;
  877. }
  878. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  879. /*
  880. * When mprotect()ing a partial page, we set the permission to the union of the old
  881. * settings and the new settings. In other words, it's only possible to make access to a
  882. * partial page less restrictive.
  883. */
  884. static long
  885. mprotect_subpage (unsigned long address, int new_prot)
  886. {
  887. int old_prot;
  888. struct vm_area_struct *vma;
  889. if (new_prot == PROT_NONE)
  890. return 0; /* optimize case where nothing changes... */
  891. vma = find_vma(current->mm, address);
  892. old_prot = get_page_prot(vma, address);
  893. return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
  894. }
  895. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  896. asmlinkage long
  897. sys32_mprotect (unsigned int start, unsigned int len, int prot)
  898. {
  899. unsigned int end = start + len;
  900. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  901. long retval = 0;
  902. #endif
  903. prot = get_prot32(prot);
  904. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  905. return sys_mprotect(start, end - start, prot);
  906. #else
  907. if (OFFSET4K(start))
  908. return -EINVAL;
  909. end = IA32_PAGE_ALIGN(end);
  910. if (end < start)
  911. return -EINVAL;
  912. retval = ia32_compare_pp(&start, &end);
  913. if (retval < 0)
  914. return retval;
  915. mutex_lock(&ia32_mmap_mutex);
  916. {
  917. if (offset_in_page(start)) {
  918. /* start address is 4KB aligned but not page aligned. */
  919. retval = mprotect_subpage(PAGE_START(start), prot);
  920. if (retval < 0)
  921. goto out;
  922. start = PAGE_ALIGN(start);
  923. if (start >= end)
  924. goto out; /* retval is already zero... */
  925. }
  926. if (offset_in_page(end)) {
  927. /* end address is 4KB aligned but not page aligned. */
  928. retval = mprotect_subpage(PAGE_START(end), prot);
  929. if (retval < 0)
  930. goto out;
  931. end = PAGE_START(end);
  932. }
  933. retval = sys_mprotect(start, end - start, prot);
  934. }
  935. out:
  936. mutex_unlock(&ia32_mmap_mutex);
  937. return retval;
  938. #endif
  939. }
  940. asmlinkage long
  941. sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
  942. unsigned int flags, unsigned int new_addr)
  943. {
  944. long ret;
  945. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  946. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  947. #else
  948. unsigned int old_end, new_end;
  949. if (OFFSET4K(addr))
  950. return -EINVAL;
  951. old_len = IA32_PAGE_ALIGN(old_len);
  952. new_len = IA32_PAGE_ALIGN(new_len);
  953. old_end = addr + old_len;
  954. new_end = addr + new_len;
  955. if (!new_len)
  956. return -EINVAL;
  957. if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
  958. return -EINVAL;
  959. if (old_len >= new_len) {
  960. ret = sys32_munmap(addr + new_len, old_len - new_len);
  961. if (ret && old_len != new_len)
  962. return ret;
  963. ret = addr;
  964. if (!(flags & MREMAP_FIXED) || (new_addr == addr))
  965. return ret;
  966. old_len = new_len;
  967. }
  968. addr = PAGE_START(addr);
  969. old_len = PAGE_ALIGN(old_end) - addr;
  970. new_len = PAGE_ALIGN(new_end) - addr;
  971. mutex_lock(&ia32_mmap_mutex);
  972. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  973. mutex_unlock(&ia32_mmap_mutex);
  974. if ((ret >= 0) && (old_len < new_len)) {
  975. /* mremap expanded successfully */
  976. ia32_set_pp(old_end, new_end, flags);
  977. }
  978. #endif
  979. return ret;
  980. }
  981. asmlinkage long
  982. sys32_pipe (int __user *fd)
  983. {
  984. int retval;
  985. int fds[2];
  986. retval = do_pipe(fds);
  987. if (retval)
  988. goto out;
  989. if (copy_to_user(fd, fds, sizeof(fds)))
  990. retval = -EFAULT;
  991. out:
  992. return retval;
  993. }
  994. static inline long
  995. get_tv32 (struct timeval *o, struct compat_timeval __user *i)
  996. {
  997. return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
  998. (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
  999. }
  1000. static inline long
  1001. put_tv32 (struct compat_timeval __user *o, struct timeval *i)
  1002. {
  1003. return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
  1004. (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
  1005. }
  1006. asmlinkage unsigned long
  1007. sys32_alarm (unsigned int seconds)
  1008. {
  1009. return alarm_setitimer(seconds);
  1010. }
  1011. /* Translations due to time_t size differences. Which affects all
  1012. sorts of things, like timeval and itimerval. */
  1013. extern struct timezone sys_tz;
  1014. asmlinkage long
  1015. sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1016. {
  1017. if (tv) {
  1018. struct timeval ktv;
  1019. do_gettimeofday(&ktv);
  1020. if (put_tv32(tv, &ktv))
  1021. return -EFAULT;
  1022. }
  1023. if (tz) {
  1024. if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
  1025. return -EFAULT;
  1026. }
  1027. return 0;
  1028. }
  1029. asmlinkage long
  1030. sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1031. {
  1032. struct timeval ktv;
  1033. struct timespec kts;
  1034. struct timezone ktz;
  1035. if (tv) {
  1036. if (get_tv32(&ktv, tv))
  1037. return -EFAULT;
  1038. kts.tv_sec = ktv.tv_sec;
  1039. kts.tv_nsec = ktv.tv_usec * 1000;
  1040. }
  1041. if (tz) {
  1042. if (copy_from_user(&ktz, tz, sizeof(ktz)))
  1043. return -EFAULT;
  1044. }
  1045. return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
  1046. }
  1047. struct getdents32_callback {
  1048. struct compat_dirent __user *current_dir;
  1049. struct compat_dirent __user *previous;
  1050. int count;
  1051. int error;
  1052. };
  1053. struct readdir32_callback {
  1054. struct old_linux32_dirent __user * dirent;
  1055. int count;
  1056. };
  1057. static int
  1058. filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
  1059. unsigned int d_type)
  1060. {
  1061. struct compat_dirent __user * dirent;
  1062. struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
  1063. int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4);
  1064. buf->error = -EINVAL; /* only used if we fail.. */
  1065. if (reclen > buf->count)
  1066. return -EINVAL;
  1067. buf->error = -EFAULT; /* only used if we fail.. */
  1068. dirent = buf->previous;
  1069. if (dirent)
  1070. if (put_user(offset, &dirent->d_off))
  1071. return -EFAULT;
  1072. dirent = buf->current_dir;
  1073. buf->previous = dirent;
  1074. if (put_user(ino, &dirent->d_ino)
  1075. || put_user(reclen, &dirent->d_reclen)
  1076. || copy_to_user(dirent->d_name, name, namlen)
  1077. || put_user(0, dirent->d_name + namlen))
  1078. return -EFAULT;
  1079. dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen);
  1080. buf->current_dir = dirent;
  1081. buf->count -= reclen;
  1082. return 0;
  1083. }
  1084. asmlinkage long
  1085. sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count)
  1086. {
  1087. struct file * file;
  1088. struct compat_dirent __user * lastdirent;
  1089. struct getdents32_callback buf;
  1090. int error;
  1091. error = -EBADF;
  1092. file = fget(fd);
  1093. if (!file)
  1094. goto out;
  1095. buf.current_dir = dirent;
  1096. buf.previous = NULL;
  1097. buf.count = count;
  1098. buf.error = 0;
  1099. error = vfs_readdir(file, filldir32, &buf);
  1100. if (error < 0)
  1101. goto out_putf;
  1102. error = buf.error;
  1103. lastdirent = buf.previous;
  1104. if (lastdirent) {
  1105. error = -EINVAL;
  1106. if (put_user(file->f_pos, &lastdirent->d_off))
  1107. goto out_putf;
  1108. error = count - buf.count;
  1109. }
  1110. out_putf:
  1111. fput(file);
  1112. out:
  1113. return error;
  1114. }
  1115. static int
  1116. fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
  1117. unsigned int d_type)
  1118. {
  1119. struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
  1120. struct old_linux32_dirent __user * dirent;
  1121. if (buf->count)
  1122. return -EINVAL;
  1123. buf->count++;
  1124. dirent = buf->dirent;
  1125. if (put_user(ino, &dirent->d_ino)
  1126. || put_user(offset, &dirent->d_offset)
  1127. || put_user(namlen, &dirent->d_namlen)
  1128. || copy_to_user(dirent->d_name, name, namlen)
  1129. || put_user(0, dirent->d_name + namlen))
  1130. return -EFAULT;
  1131. return 0;
  1132. }
  1133. asmlinkage long
  1134. sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count)
  1135. {
  1136. int error;
  1137. struct file * file;
  1138. struct readdir32_callback buf;
  1139. error = -EBADF;
  1140. file = fget(fd);
  1141. if (!file)
  1142. goto out;
  1143. buf.count = 0;
  1144. buf.dirent = dirent;
  1145. error = vfs_readdir(file, fillonedir32, &buf);
  1146. if (error >= 0)
  1147. error = buf.count;
  1148. fput(file);
  1149. out:
  1150. return error;
  1151. }
  1152. struct sel_arg_struct {
  1153. unsigned int n;
  1154. unsigned int inp;
  1155. unsigned int outp;
  1156. unsigned int exp;
  1157. unsigned int tvp;
  1158. };
  1159. asmlinkage long
  1160. sys32_old_select (struct sel_arg_struct __user *arg)
  1161. {
  1162. struct sel_arg_struct a;
  1163. if (copy_from_user(&a, arg, sizeof(a)))
  1164. return -EFAULT;
  1165. return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
  1166. compat_ptr(a.exp), compat_ptr(a.tvp));
  1167. }
  1168. #define SEMOP 1
  1169. #define SEMGET 2
  1170. #define SEMCTL 3
  1171. #define SEMTIMEDOP 4
  1172. #define MSGSND 11
  1173. #define MSGRCV 12
  1174. #define MSGGET 13
  1175. #define MSGCTL 14
  1176. #define SHMAT 21
  1177. #define SHMDT 22
  1178. #define SHMGET 23
  1179. #define SHMCTL 24
  1180. asmlinkage long
  1181. sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
  1182. {
  1183. int version;
  1184. version = call >> 16; /* hack for backward compatibility */
  1185. call &= 0xffff;
  1186. switch (call) {
  1187. case SEMTIMEDOP:
  1188. if (fifth)
  1189. return compat_sys_semtimedop(first, compat_ptr(ptr),
  1190. second, compat_ptr(fifth));
  1191. /* else fall through for normal semop() */
  1192. case SEMOP:
  1193. /* struct sembuf is the same on 32 and 64bit :)) */
  1194. return sys_semtimedop(first, compat_ptr(ptr), second,
  1195. NULL);
  1196. case SEMGET:
  1197. return sys_semget(first, second, third);
  1198. case SEMCTL:
  1199. return compat_sys_semctl(first, second, third, compat_ptr(ptr));
  1200. case MSGSND:
  1201. return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
  1202. case MSGRCV:
  1203. return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
  1204. case MSGGET:
  1205. return sys_msgget((key_t) first, second);
  1206. case MSGCTL:
  1207. return compat_sys_msgctl(first, second, compat_ptr(ptr));
  1208. case SHMAT:
  1209. return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
  1210. break;
  1211. case SHMDT:
  1212. return sys_shmdt(compat_ptr(ptr));
  1213. case SHMGET:
  1214. return sys_shmget(first, (unsigned)second, third);
  1215. case SHMCTL:
  1216. return compat_sys_shmctl(first, second, compat_ptr(ptr));
  1217. default:
  1218. return -ENOSYS;
  1219. }
  1220. return -EINVAL;
  1221. }
  1222. asmlinkage long
  1223. compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
  1224. struct compat_rusage *ru);
  1225. asmlinkage long
  1226. sys32_waitpid (int pid, unsigned int *stat_addr, int options)
  1227. {
  1228. return compat_sys_wait4(pid, stat_addr, options, NULL);
  1229. }
  1230. static unsigned int
  1231. ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
  1232. {
  1233. size_t copied;
  1234. unsigned int ret;
  1235. copied = access_process_vm(child, addr, val, sizeof(*val), 0);
  1236. return (copied != sizeof(ret)) ? -EIO : 0;
  1237. }
  1238. static unsigned int
  1239. ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
  1240. {
  1241. if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
  1242. return -EIO;
  1243. return 0;
  1244. }
  1245. /*
  1246. * The order in which registers are stored in the ptrace regs structure
  1247. */
  1248. #define PT_EBX 0
  1249. #define PT_ECX 1
  1250. #define PT_EDX 2
  1251. #define PT_ESI 3
  1252. #define PT_EDI 4
  1253. #define PT_EBP 5
  1254. #define PT_EAX 6
  1255. #define PT_DS 7
  1256. #define PT_ES 8
  1257. #define PT_FS 9
  1258. #define PT_GS 10
  1259. #define PT_ORIG_EAX 11
  1260. #define PT_EIP 12
  1261. #define PT_CS 13
  1262. #define PT_EFL 14
  1263. #define PT_UESP 15
  1264. #define PT_SS 16
  1265. static unsigned int
  1266. getreg (struct task_struct *child, int regno)
  1267. {
  1268. struct pt_regs *child_regs;
  1269. child_regs = task_pt_regs(child);
  1270. switch (regno / sizeof(int)) {
  1271. case PT_EBX: return child_regs->r11;
  1272. case PT_ECX: return child_regs->r9;
  1273. case PT_EDX: return child_regs->r10;
  1274. case PT_ESI: return child_regs->r14;
  1275. case PT_EDI: return child_regs->r15;
  1276. case PT_EBP: return child_regs->r13;
  1277. case PT_EAX: return child_regs->r8;
  1278. case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
  1279. case PT_EIP: return child_regs->cr_iip;
  1280. case PT_UESP: return child_regs->r12;
  1281. case PT_EFL: return child->thread.eflag;
  1282. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1283. return __USER_DS;
  1284. case PT_CS: return __USER_CS;
  1285. default:
  1286. printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
  1287. break;
  1288. }
  1289. return 0;
  1290. }
  1291. static void
  1292. putreg (struct task_struct *child, int regno, unsigned int value)
  1293. {
  1294. struct pt_regs *child_regs;
  1295. child_regs = task_pt_regs(child);
  1296. switch (regno / sizeof(int)) {
  1297. case PT_EBX: child_regs->r11 = value; break;
  1298. case PT_ECX: child_regs->r9 = value; break;
  1299. case PT_EDX: child_regs->r10 = value; break;
  1300. case PT_ESI: child_regs->r14 = value; break;
  1301. case PT_EDI: child_regs->r15 = value; break;
  1302. case PT_EBP: child_regs->r13 = value; break;
  1303. case PT_EAX: child_regs->r8 = value; break;
  1304. case PT_ORIG_EAX: child_regs->r1 = value; break;
  1305. case PT_EIP: child_regs->cr_iip = value; break;
  1306. case PT_UESP: child_regs->r12 = value; break;
  1307. case PT_EFL: child->thread.eflag = value; break;
  1308. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1309. if (value != __USER_DS)
  1310. printk(KERN_ERR
  1311. "ia32.putreg: attempt to set invalid segment register %d = %x\n",
  1312. regno, value);
  1313. break;
  1314. case PT_CS:
  1315. if (value != __USER_CS)
  1316. printk(KERN_ERR
  1317. "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
  1318. regno, value);
  1319. break;
  1320. default:
  1321. printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
  1322. break;
  1323. }
  1324. }
  1325. static void
  1326. put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1327. struct switch_stack *swp, int tos)
  1328. {
  1329. struct _fpreg_ia32 *f;
  1330. char buf[32];
  1331. f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
  1332. if ((regno += tos) >= 8)
  1333. regno -= 8;
  1334. switch (regno) {
  1335. case 0:
  1336. ia64f2ia32f(f, &ptp->f8);
  1337. break;
  1338. case 1:
  1339. ia64f2ia32f(f, &ptp->f9);
  1340. break;
  1341. case 2:
  1342. ia64f2ia32f(f, &ptp->f10);
  1343. break;
  1344. case 3:
  1345. ia64f2ia32f(f, &ptp->f11);
  1346. break;
  1347. case 4:
  1348. case 5:
  1349. case 6:
  1350. case 7:
  1351. ia64f2ia32f(f, &swp->f12 + (regno - 4));
  1352. break;
  1353. }
  1354. copy_to_user(reg, f, sizeof(*reg));
  1355. }
  1356. static void
  1357. get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1358. struct switch_stack *swp, int tos)
  1359. {
  1360. if ((regno += tos) >= 8)
  1361. regno -= 8;
  1362. switch (regno) {
  1363. case 0:
  1364. copy_from_user(&ptp->f8, reg, sizeof(*reg));
  1365. break;
  1366. case 1:
  1367. copy_from_user(&ptp->f9, reg, sizeof(*reg));
  1368. break;
  1369. case 2:
  1370. copy_from_user(&ptp->f10, reg, sizeof(*reg));
  1371. break;
  1372. case 3:
  1373. copy_from_user(&ptp->f11, reg, sizeof(*reg));
  1374. break;
  1375. case 4:
  1376. case 5:
  1377. case 6:
  1378. case 7:
  1379. copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
  1380. break;
  1381. }
  1382. return;
  1383. }
  1384. int
  1385. save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1386. {
  1387. struct switch_stack *swp;
  1388. struct pt_regs *ptp;
  1389. int i, tos;
  1390. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1391. return -EFAULT;
  1392. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1393. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1394. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1395. __put_user(tsk->thread.fir, &save->fip);
  1396. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1397. __put_user(tsk->thread.fdr, &save->foo);
  1398. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1399. /*
  1400. * Stack frames start with 16-bytes of temp space
  1401. */
  1402. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1403. ptp = task_pt_regs(tsk);
  1404. tos = (tsk->thread.fsr >> 11) & 7;
  1405. for (i = 0; i < 8; i++)
  1406. put_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1407. return 0;
  1408. }
  1409. static int
  1410. restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1411. {
  1412. struct switch_stack *swp;
  1413. struct pt_regs *ptp;
  1414. int i, tos;
  1415. unsigned int fsrlo, fsrhi, num32;
  1416. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1417. return(-EFAULT);
  1418. __get_user(num32, (unsigned int __user *)&save->cwd);
  1419. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1420. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1421. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1422. num32 = (fsrhi << 16) | fsrlo;
  1423. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1424. __get_user(num32, (unsigned int __user *)&save->fip);
  1425. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1426. __get_user(num32, (unsigned int __user *)&save->foo);
  1427. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1428. /*
  1429. * Stack frames start with 16-bytes of temp space
  1430. */
  1431. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1432. ptp = task_pt_regs(tsk);
  1433. tos = (tsk->thread.fsr >> 11) & 7;
  1434. for (i = 0; i < 8; i++)
  1435. get_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1436. return 0;
  1437. }
  1438. int
  1439. save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1440. {
  1441. struct switch_stack *swp;
  1442. struct pt_regs *ptp;
  1443. int i, tos;
  1444. unsigned long mxcsr=0;
  1445. unsigned long num128[2];
  1446. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1447. return -EFAULT;
  1448. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1449. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1450. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1451. __put_user(tsk->thread.fir, &save->fip);
  1452. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1453. __put_user(tsk->thread.fdr, &save->foo);
  1454. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1455. /*
  1456. * Stack frames start with 16-bytes of temp space
  1457. */
  1458. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1459. ptp = task_pt_regs(tsk);
  1460. tos = (tsk->thread.fsr >> 11) & 7;
  1461. for (i = 0; i < 8; i++)
  1462. put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1463. mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
  1464. __put_user(mxcsr & 0xffff, &save->mxcsr);
  1465. for (i = 0; i < 8; i++) {
  1466. memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
  1467. memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
  1468. copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
  1469. }
  1470. return 0;
  1471. }
  1472. static int
  1473. restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1474. {
  1475. struct switch_stack *swp;
  1476. struct pt_regs *ptp;
  1477. int i, tos;
  1478. unsigned int fsrlo, fsrhi, num32;
  1479. int mxcsr;
  1480. unsigned long num64;
  1481. unsigned long num128[2];
  1482. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1483. return(-EFAULT);
  1484. __get_user(num32, (unsigned int __user *)&save->cwd);
  1485. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1486. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1487. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1488. num32 = (fsrhi << 16) | fsrlo;
  1489. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1490. __get_user(num32, (unsigned int __user *)&save->fip);
  1491. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1492. __get_user(num32, (unsigned int __user *)&save->foo);
  1493. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1494. /*
  1495. * Stack frames start with 16-bytes of temp space
  1496. */
  1497. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1498. ptp = task_pt_regs(tsk);
  1499. tos = (tsk->thread.fsr >> 11) & 7;
  1500. for (i = 0; i < 8; i++)
  1501. get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1502. __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
  1503. num64 = mxcsr & 0xff10;
  1504. tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
  1505. num64 = mxcsr & 0x3f;
  1506. tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
  1507. for (i = 0; i < 8; i++) {
  1508. copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
  1509. memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
  1510. memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
  1511. }
  1512. return 0;
  1513. }
  1514. asmlinkage long
  1515. sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
  1516. {
  1517. struct task_struct *child;
  1518. unsigned int value, tmp;
  1519. long i, ret;
  1520. lock_kernel();
  1521. if (request == PTRACE_TRACEME) {
  1522. ret = ptrace_traceme();
  1523. goto out;
  1524. }
  1525. child = ptrace_get_task_struct(pid);
  1526. if (IS_ERR(child)) {
  1527. ret = PTR_ERR(child);
  1528. goto out;
  1529. }
  1530. if (request == PTRACE_ATTACH) {
  1531. ret = sys_ptrace(request, pid, addr, data);
  1532. goto out_tsk;
  1533. }
  1534. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  1535. if (ret < 0)
  1536. goto out_tsk;
  1537. switch (request) {
  1538. case PTRACE_PEEKTEXT:
  1539. case PTRACE_PEEKDATA: /* read word at location addr */
  1540. ret = ia32_peek(child, addr, &value);
  1541. if (ret == 0)
  1542. ret = put_user(value, (unsigned int __user *) compat_ptr(data));
  1543. else
  1544. ret = -EIO;
  1545. goto out_tsk;
  1546. case PTRACE_POKETEXT:
  1547. case PTRACE_POKEDATA: /* write the word at location addr */
  1548. ret = ia32_poke(child, addr, data);
  1549. goto out_tsk;
  1550. case PTRACE_PEEKUSR: /* read word at addr in USER area */
  1551. ret = -EIO;
  1552. if ((addr & 3) || addr > 17*sizeof(int))
  1553. break;
  1554. tmp = getreg(child, addr);
  1555. if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
  1556. ret = 0;
  1557. break;
  1558. case PTRACE_POKEUSR: /* write word at addr in USER area */
  1559. ret = -EIO;
  1560. if ((addr & 3) || addr > 17*sizeof(int))
  1561. break;
  1562. putreg(child, addr, data);
  1563. ret = 0;
  1564. break;
  1565. case IA32_PTRACE_GETREGS:
  1566. if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
  1567. ret = -EIO;
  1568. break;
  1569. }
  1570. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1571. put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
  1572. data += sizeof(int);
  1573. }
  1574. ret = 0;
  1575. break;
  1576. case IA32_PTRACE_SETREGS:
  1577. if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
  1578. ret = -EIO;
  1579. break;
  1580. }
  1581. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1582. get_user(tmp, (unsigned int __user *) compat_ptr(data));
  1583. putreg(child, i, tmp);
  1584. data += sizeof(int);
  1585. }
  1586. ret = 0;
  1587. break;
  1588. case IA32_PTRACE_GETFPREGS:
  1589. ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1590. compat_ptr(data));
  1591. break;
  1592. case IA32_PTRACE_GETFPXREGS:
  1593. ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1594. compat_ptr(data));
  1595. break;
  1596. case IA32_PTRACE_SETFPREGS:
  1597. ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1598. compat_ptr(data));
  1599. break;
  1600. case IA32_PTRACE_SETFPXREGS:
  1601. ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1602. compat_ptr(data));
  1603. break;
  1604. case PTRACE_GETEVENTMSG:
  1605. ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
  1606. break;
  1607. case PTRACE_SYSCALL: /* continue, stop after next syscall */
  1608. case PTRACE_CONT: /* restart after signal. */
  1609. case PTRACE_KILL:
  1610. case PTRACE_SINGLESTEP: /* execute chile for one instruction */
  1611. case PTRACE_DETACH: /* detach a process */
  1612. ret = sys_ptrace(request, pid, addr, data);
  1613. break;
  1614. default:
  1615. ret = ptrace_request(child, request, addr, data);
  1616. break;
  1617. }
  1618. out_tsk:
  1619. put_task_struct(child);
  1620. out:
  1621. unlock_kernel();
  1622. return ret;
  1623. }
  1624. typedef struct {
  1625. unsigned int ss_sp;
  1626. unsigned int ss_flags;
  1627. unsigned int ss_size;
  1628. } ia32_stack_t;
  1629. asmlinkage long
  1630. sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
  1631. long arg2, long arg3, long arg4, long arg5, long arg6,
  1632. long arg7, struct pt_regs pt)
  1633. {
  1634. stack_t uss, uoss;
  1635. ia32_stack_t buf32;
  1636. int ret;
  1637. mm_segment_t old_fs = get_fs();
  1638. if (uss32) {
  1639. if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
  1640. return -EFAULT;
  1641. uss.ss_sp = (void __user *) (long) buf32.ss_sp;
  1642. uss.ss_flags = buf32.ss_flags;
  1643. /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
  1644. check and set it to the user requested value later */
  1645. if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
  1646. ret = -ENOMEM;
  1647. goto out;
  1648. }
  1649. uss.ss_size = MINSIGSTKSZ;
  1650. }
  1651. set_fs(KERNEL_DS);
  1652. ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
  1653. (stack_t __user *) &uoss, pt.r12);
  1654. current->sas_ss_size = buf32.ss_size;
  1655. set_fs(old_fs);
  1656. out:
  1657. if (ret < 0)
  1658. return(ret);
  1659. if (uoss32) {
  1660. buf32.ss_sp = (long __user) uoss.ss_sp;
  1661. buf32.ss_flags = uoss.ss_flags;
  1662. buf32.ss_size = uoss.ss_size;
  1663. if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
  1664. return -EFAULT;
  1665. }
  1666. return ret;
  1667. }
  1668. asmlinkage int
  1669. sys32_pause (void)
  1670. {
  1671. current->state = TASK_INTERRUPTIBLE;
  1672. schedule();
  1673. return -ERESTARTNOHAND;
  1674. }
  1675. asmlinkage int
  1676. sys32_msync (unsigned int start, unsigned int len, int flags)
  1677. {
  1678. unsigned int addr;
  1679. if (OFFSET4K(start))
  1680. return -EINVAL;
  1681. addr = PAGE_START(start);
  1682. return sys_msync(addr, len + (start - addr), flags);
  1683. }
  1684. struct sysctl32 {
  1685. unsigned int name;
  1686. int nlen;
  1687. unsigned int oldval;
  1688. unsigned int oldlenp;
  1689. unsigned int newval;
  1690. unsigned int newlen;
  1691. unsigned int __unused[4];
  1692. };
  1693. #ifdef CONFIG_SYSCTL
  1694. asmlinkage long
  1695. sys32_sysctl (struct sysctl32 __user *args)
  1696. {
  1697. struct sysctl32 a32;
  1698. mm_segment_t old_fs = get_fs ();
  1699. void __user *oldvalp, *newvalp;
  1700. size_t oldlen;
  1701. int __user *namep;
  1702. long ret;
  1703. if (copy_from_user(&a32, args, sizeof(a32)))
  1704. return -EFAULT;
  1705. /*
  1706. * We need to pre-validate these because we have to disable address checking
  1707. * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
  1708. * user specifying bad addresses here. Well, since we're dealing with 32 bit
  1709. * addresses, we KNOW that access_ok() will always succeed, so this is an
  1710. * expensive NOP, but so what...
  1711. */
  1712. namep = (int __user *) compat_ptr(a32.name);
  1713. oldvalp = compat_ptr(a32.oldval);
  1714. newvalp = compat_ptr(a32.newval);
  1715. if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1716. || !access_ok(VERIFY_WRITE, namep, 0)
  1717. || !access_ok(VERIFY_WRITE, oldvalp, 0)
  1718. || !access_ok(VERIFY_WRITE, newvalp, 0))
  1719. return -EFAULT;
  1720. set_fs(KERNEL_DS);
  1721. lock_kernel();
  1722. ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
  1723. newvalp, (size_t) a32.newlen);
  1724. unlock_kernel();
  1725. set_fs(old_fs);
  1726. if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1727. return -EFAULT;
  1728. return ret;
  1729. }
  1730. #endif
  1731. asmlinkage long
  1732. sys32_newuname (struct new_utsname __user *name)
  1733. {
  1734. int ret = sys_newuname(name);
  1735. if (!ret)
  1736. if (copy_to_user(name->machine, "i686\0\0\0", 8))
  1737. ret = -EFAULT;
  1738. return ret;
  1739. }
  1740. asmlinkage long
  1741. sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
  1742. {
  1743. uid_t a, b, c;
  1744. int ret;
  1745. mm_segment_t old_fs = get_fs();
  1746. set_fs(KERNEL_DS);
  1747. ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
  1748. set_fs(old_fs);
  1749. if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
  1750. return -EFAULT;
  1751. return ret;
  1752. }
  1753. asmlinkage long
  1754. sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
  1755. {
  1756. gid_t a, b, c;
  1757. int ret;
  1758. mm_segment_t old_fs = get_fs();
  1759. set_fs(KERNEL_DS);
  1760. ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
  1761. set_fs(old_fs);
  1762. if (ret)
  1763. return ret;
  1764. return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
  1765. }
  1766. asmlinkage long
  1767. sys32_lseek (unsigned int fd, int offset, unsigned int whence)
  1768. {
  1769. /* Sign-extension of "offset" is important here... */
  1770. return sys_lseek(fd, offset, whence);
  1771. }
  1772. static int
  1773. groups16_to_user(short __user *grouplist, struct group_info *group_info)
  1774. {
  1775. int i;
  1776. short group;
  1777. for (i = 0; i < group_info->ngroups; i++) {
  1778. group = (short)GROUP_AT(group_info, i);
  1779. if (put_user(group, grouplist+i))
  1780. return -EFAULT;
  1781. }
  1782. return 0;
  1783. }
  1784. static int
  1785. groups16_from_user(struct group_info *group_info, short __user *grouplist)
  1786. {
  1787. int i;
  1788. short group;
  1789. for (i = 0; i < group_info->ngroups; i++) {
  1790. if (get_user(group, grouplist+i))
  1791. return -EFAULT;
  1792. GROUP_AT(group_info, i) = (gid_t)group;
  1793. }
  1794. return 0;
  1795. }
  1796. asmlinkage long
  1797. sys32_getgroups16 (int gidsetsize, short __user *grouplist)
  1798. {
  1799. int i;
  1800. if (gidsetsize < 0)
  1801. return -EINVAL;
  1802. get_group_info(current->group_info);
  1803. i = current->group_info->ngroups;
  1804. if (gidsetsize) {
  1805. if (i > gidsetsize) {
  1806. i = -EINVAL;
  1807. goto out;
  1808. }
  1809. if (groups16_to_user(grouplist, current->group_info)) {
  1810. i = -EFAULT;
  1811. goto out;
  1812. }
  1813. }
  1814. out:
  1815. put_group_info(current->group_info);
  1816. return i;
  1817. }
  1818. asmlinkage long
  1819. sys32_setgroups16 (int gidsetsize, short __user *grouplist)
  1820. {
  1821. struct group_info *group_info;
  1822. int retval;
  1823. if (!capable(CAP_SETGID))
  1824. return -EPERM;
  1825. if ((unsigned)gidsetsize > NGROUPS_MAX)
  1826. return -EINVAL;
  1827. group_info = groups_alloc(gidsetsize);
  1828. if (!group_info)
  1829. return -ENOMEM;
  1830. retval = groups16_from_user(group_info, grouplist);
  1831. if (retval) {
  1832. put_group_info(group_info);
  1833. return retval;
  1834. }
  1835. retval = set_current_groups(group_info);
  1836. put_group_info(group_info);
  1837. return retval;
  1838. }
  1839. asmlinkage long
  1840. sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
  1841. {
  1842. return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
  1843. }
  1844. asmlinkage long
  1845. sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
  1846. {
  1847. return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
  1848. }
  1849. static int
  1850. putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
  1851. {
  1852. int err;
  1853. u64 hdev;
  1854. if (clear_user(ubuf, sizeof(*ubuf)))
  1855. return -EFAULT;
  1856. hdev = huge_encode_dev(kbuf->dev);
  1857. err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
  1858. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
  1859. err |= __put_user(kbuf->ino, &ubuf->__st_ino);
  1860. err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
  1861. err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
  1862. err |= __put_user(kbuf->mode, &ubuf->st_mode);
  1863. err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
  1864. err |= __put_user(kbuf->uid, &ubuf->st_uid);
  1865. err |= __put_user(kbuf->gid, &ubuf->st_gid);
  1866. hdev = huge_encode_dev(kbuf->rdev);
  1867. err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
  1868. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
  1869. err |= __put_user(kbuf->size, &ubuf->st_size_lo);
  1870. err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
  1871. err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
  1872. err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
  1873. err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
  1874. err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  1875. err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
  1876. err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  1877. err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
  1878. err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
  1879. return err;
  1880. }
  1881. asmlinkage long
  1882. sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
  1883. {
  1884. struct kstat s;
  1885. long ret = vfs_stat(filename, &s);
  1886. if (!ret)
  1887. ret = putstat64(statbuf, &s);
  1888. return ret;
  1889. }
  1890. asmlinkage long
  1891. sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
  1892. {
  1893. struct kstat s;
  1894. long ret = vfs_lstat(filename, &s);
  1895. if (!ret)
  1896. ret = putstat64(statbuf, &s);
  1897. return ret;
  1898. }
  1899. asmlinkage long
  1900. sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
  1901. {
  1902. struct kstat s;
  1903. long ret = vfs_fstat(fd, &s);
  1904. if (!ret)
  1905. ret = putstat64(statbuf, &s);
  1906. return ret;
  1907. }
  1908. struct sysinfo32 {
  1909. s32 uptime;
  1910. u32 loads[3];
  1911. u32 totalram;
  1912. u32 freeram;
  1913. u32 sharedram;
  1914. u32 bufferram;
  1915. u32 totalswap;
  1916. u32 freeswap;
  1917. u16 procs;
  1918. u16 pad;
  1919. u32 totalhigh;
  1920. u32 freehigh;
  1921. u32 mem_unit;
  1922. char _f[8];
  1923. };
  1924. asmlinkage long
  1925. sys32_sysinfo (struct sysinfo32 __user *info)
  1926. {
  1927. struct sysinfo s;
  1928. long ret, err;
  1929. int bitcount = 0;
  1930. mm_segment_t old_fs = get_fs();
  1931. set_fs(KERNEL_DS);
  1932. ret = sys_sysinfo((struct sysinfo __user *) &s);
  1933. set_fs(old_fs);
  1934. /* Check to see if any memory value is too large for 32-bit and
  1935. * scale down if needed.
  1936. */
  1937. if ((s.totalram >> 32) || (s.totalswap >> 32)) {
  1938. while (s.mem_unit < PAGE_SIZE) {
  1939. s.mem_unit <<= 1;
  1940. bitcount++;
  1941. }
  1942. s.totalram >>= bitcount;
  1943. s.freeram >>= bitcount;
  1944. s.sharedram >>= bitcount;
  1945. s.bufferram >>= bitcount;
  1946. s.totalswap >>= bitcount;
  1947. s.freeswap >>= bitcount;
  1948. s.totalhigh >>= bitcount;
  1949. s.freehigh >>= bitcount;
  1950. }
  1951. if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
  1952. return -EFAULT;
  1953. err = __put_user(s.uptime, &info->uptime);
  1954. err |= __put_user(s.loads[0], &info->loads[0]);
  1955. err |= __put_user(s.loads[1], &info->loads[1]);
  1956. err |= __put_user(s.loads[2], &info->loads[2]);
  1957. err |= __put_user(s.totalram, &info->totalram);
  1958. err |= __put_user(s.freeram, &info->freeram);
  1959. err |= __put_user(s.sharedram, &info->sharedram);
  1960. err |= __put_user(s.bufferram, &info->bufferram);
  1961. err |= __put_user(s.totalswap, &info->totalswap);
  1962. err |= __put_user(s.freeswap, &info->freeswap);
  1963. err |= __put_user(s.procs, &info->procs);
  1964. err |= __put_user (s.totalhigh, &info->totalhigh);
  1965. err |= __put_user (s.freehigh, &info->freehigh);
  1966. err |= __put_user (s.mem_unit, &info->mem_unit);
  1967. if (err)
  1968. return -EFAULT;
  1969. return ret;
  1970. }
  1971. asmlinkage long
  1972. sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
  1973. {
  1974. mm_segment_t old_fs = get_fs();
  1975. struct timespec t;
  1976. long ret;
  1977. set_fs(KERNEL_DS);
  1978. ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
  1979. set_fs(old_fs);
  1980. if (put_compat_timespec(&t, interval))
  1981. return -EFAULT;
  1982. return ret;
  1983. }
  1984. asmlinkage long
  1985. sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  1986. {
  1987. return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  1988. }
  1989. asmlinkage long
  1990. sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  1991. {
  1992. return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  1993. }
  1994. asmlinkage long
  1995. sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
  1996. {
  1997. mm_segment_t old_fs = get_fs();
  1998. long ret;
  1999. off_t of;
  2000. if (offset && get_user(of, offset))
  2001. return -EFAULT;
  2002. set_fs(KERNEL_DS);
  2003. ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
  2004. set_fs(old_fs);
  2005. if (offset && put_user(of, offset))
  2006. return -EFAULT;
  2007. return ret;
  2008. }
  2009. asmlinkage long
  2010. sys32_personality (unsigned int personality)
  2011. {
  2012. long ret;
  2013. if (current->personality == PER_LINUX32 && personality == PER_LINUX)
  2014. personality = PER_LINUX32;
  2015. ret = sys_personality(personality);
  2016. if (ret == PER_LINUX32)
  2017. ret = PER_LINUX;
  2018. return ret;
  2019. }
  2020. asmlinkage unsigned long
  2021. sys32_brk (unsigned int brk)
  2022. {
  2023. unsigned long ret, obrk;
  2024. struct mm_struct *mm = current->mm;
  2025. obrk = mm->brk;
  2026. ret = sys_brk(brk);
  2027. if (ret < obrk)
  2028. clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
  2029. return ret;
  2030. }
  2031. /* Structure for ia32 emulation on ia64 */
  2032. struct epoll_event32
  2033. {
  2034. u32 events;
  2035. u32 data[2];
  2036. };
  2037. asmlinkage long
  2038. sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
  2039. {
  2040. mm_segment_t old_fs = get_fs();
  2041. struct epoll_event event64;
  2042. int error;
  2043. u32 data_halfword;
  2044. if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
  2045. return -EFAULT;
  2046. __get_user(event64.events, &event->events);
  2047. __get_user(data_halfword, &event->data[0]);
  2048. event64.data = data_halfword;
  2049. __get_user(data_halfword, &event->data[1]);
  2050. event64.data |= (u64)data_halfword << 32;
  2051. set_fs(KERNEL_DS);
  2052. error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
  2053. set_fs(old_fs);
  2054. return error;
  2055. }
  2056. asmlinkage long
  2057. sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
  2058. int timeout)
  2059. {
  2060. struct epoll_event *events64 = NULL;
  2061. mm_segment_t old_fs = get_fs();
  2062. int numevents, size;
  2063. int evt_idx;
  2064. int do_free_pages = 0;
  2065. if (maxevents <= 0) {
  2066. return -EINVAL;
  2067. }
  2068. /* Verify that the area passed by the user is writeable */
  2069. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
  2070. return -EFAULT;
  2071. /*
  2072. * Allocate space for the intermediate copy. If the space needed
  2073. * is large enough to cause kmalloc to fail, then try again with
  2074. * __get_free_pages.
  2075. */
  2076. size = maxevents * sizeof(struct epoll_event);
  2077. events64 = kmalloc(size, GFP_KERNEL);
  2078. if (events64 == NULL) {
  2079. events64 = (struct epoll_event *)
  2080. __get_free_pages(GFP_KERNEL, get_order(size));
  2081. if (events64 == NULL)
  2082. return -ENOMEM;
  2083. do_free_pages = 1;
  2084. }
  2085. /* Do the system call */
  2086. set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
  2087. numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
  2088. maxevents, timeout);
  2089. set_fs(old_fs);
  2090. /* Don't modify userspace memory if we're returning an error */
  2091. if (numevents > 0) {
  2092. /* Translate the 64-bit structures back into the 32-bit
  2093. structures */
  2094. for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
  2095. __put_user(events64[evt_idx].events,
  2096. &events[evt_idx].events);
  2097. __put_user((u32)events64[evt_idx].data,
  2098. &events[evt_idx].data[0]);
  2099. __put_user((u32)(events64[evt_idx].data >> 32),
  2100. &events[evt_idx].data[1]);
  2101. }
  2102. }
  2103. if (do_free_pages)
  2104. free_pages((unsigned long) events64, get_order(size));
  2105. else
  2106. kfree(events64);
  2107. return numevents;
  2108. }
  2109. /*
  2110. * Get a yet unused TLS descriptor index.
  2111. */
  2112. static int
  2113. get_free_idx (void)
  2114. {
  2115. struct thread_struct *t = &current->thread;
  2116. int idx;
  2117. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  2118. if (desc_empty(t->tls_array + idx))
  2119. return idx + GDT_ENTRY_TLS_MIN;
  2120. return -ESRCH;
  2121. }
  2122. /*
  2123. * Set a given TLS descriptor:
  2124. */
  2125. asmlinkage int
  2126. sys32_set_thread_area (struct ia32_user_desc __user *u_info)
  2127. {
  2128. struct thread_struct *t = &current->thread;
  2129. struct ia32_user_desc info;
  2130. struct desc_struct *desc;
  2131. int cpu, idx;
  2132. if (copy_from_user(&info, u_info, sizeof(info)))
  2133. return -EFAULT;
  2134. idx = info.entry_number;
  2135. /*
  2136. * index -1 means the kernel should try to find and allocate an empty descriptor:
  2137. */
  2138. if (idx == -1) {
  2139. idx = get_free_idx();
  2140. if (idx < 0)
  2141. return idx;
  2142. if (put_user(idx, &u_info->entry_number))
  2143. return -EFAULT;
  2144. }
  2145. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2146. return -EINVAL;
  2147. desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
  2148. cpu = smp_processor_id();
  2149. if (LDT_empty(&info)) {
  2150. desc->a = 0;
  2151. desc->b = 0;
  2152. } else {
  2153. desc->a = LDT_entry_a(&info);
  2154. desc->b = LDT_entry_b(&info);
  2155. }
  2156. load_TLS(t, cpu);
  2157. return 0;
  2158. }
  2159. /*
  2160. * Get the current Thread-Local Storage area:
  2161. */
  2162. #define GET_BASE(desc) ( \
  2163. (((desc)->a >> 16) & 0x0000ffff) | \
  2164. (((desc)->b << 16) & 0x00ff0000) | \
  2165. ( (desc)->b & 0xff000000) )
  2166. #define GET_LIMIT(desc) ( \
  2167. ((desc)->a & 0x0ffff) | \
  2168. ((desc)->b & 0xf0000) )
  2169. #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
  2170. #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
  2171. #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
  2172. #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
  2173. #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
  2174. #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
  2175. asmlinkage int
  2176. sys32_get_thread_area (struct ia32_user_desc __user *u_info)
  2177. {
  2178. struct ia32_user_desc info;
  2179. struct desc_struct *desc;
  2180. int idx;
  2181. if (get_user(idx, &u_info->entry_number))
  2182. return -EFAULT;
  2183. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2184. return -EINVAL;
  2185. desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  2186. info.entry_number = idx;
  2187. info.base_addr = GET_BASE(desc);
  2188. info.limit = GET_LIMIT(desc);
  2189. info.seg_32bit = GET_32BIT(desc);
  2190. info.contents = GET_CONTENTS(desc);
  2191. info.read_exec_only = !GET_WRITABLE(desc);
  2192. info.limit_in_pages = GET_LIMIT_PAGES(desc);
  2193. info.seg_not_present = !GET_PRESENT(desc);
  2194. info.useable = GET_USEABLE(desc);
  2195. if (copy_to_user(u_info, &info, sizeof(info)))
  2196. return -EFAULT;
  2197. return 0;
  2198. }
  2199. long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
  2200. __u32 len_low, __u32 len_high, int advice)
  2201. {
  2202. return sys_fadvise64_64(fd,
  2203. (((u64)offset_high)<<32) | offset_low,
  2204. (((u64)len_high)<<32) | len_low,
  2205. advice);
  2206. }
  2207. #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
  2208. asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
  2209. {
  2210. uid_t sruid, seuid;
  2211. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2212. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2213. return sys_setreuid(sruid, seuid);
  2214. }
  2215. asmlinkage long
  2216. sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
  2217. compat_uid_t suid)
  2218. {
  2219. uid_t sruid, seuid, ssuid;
  2220. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2221. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2222. ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
  2223. return sys_setresuid(sruid, seuid, ssuid);
  2224. }
  2225. asmlinkage long
  2226. sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
  2227. {
  2228. gid_t srgid, segid;
  2229. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2230. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2231. return sys_setregid(srgid, segid);
  2232. }
  2233. asmlinkage long
  2234. sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
  2235. compat_gid_t sgid)
  2236. {
  2237. gid_t srgid, segid, ssgid;
  2238. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2239. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2240. ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
  2241. return sys_setresgid(srgid, segid, ssgid);
  2242. }
  2243. #endif /* NOTYET */