sys_ia32.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536
  1. /*
  2. * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
  3. *
  4. * Copyright (C) 2000 VA Linux Co
  5. * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
  6. * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  9. * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
  10. * David Mosberger-Tang <davidm@hpl.hp.com>
  11. * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
  12. *
  13. * These routines maintain argument size conversion between 32bit and 64bit
  14. * environment.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/sysctl.h>
  19. #include <linux/sched.h>
  20. #include <linux/fs.h>
  21. #include <linux/file.h>
  22. #include <linux/signal.h>
  23. #include <linux/resource.h>
  24. #include <linux/times.h>
  25. #include <linux/utsname.h>
  26. #include <linux/smp.h>
  27. #include <linux/smp_lock.h>
  28. #include <linux/sem.h>
  29. #include <linux/msg.h>
  30. #include <linux/mm.h>
  31. #include <linux/shm.h>
  32. #include <linux/slab.h>
  33. #include <linux/uio.h>
  34. #include <linux/socket.h>
  35. #include <linux/quota.h>
  36. #include <linux/poll.h>
  37. #include <linux/eventpoll.h>
  38. #include <linux/personality.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/stat.h>
  41. #include <linux/ipc.h>
  42. #include <linux/capability.h>
  43. #include <linux/compat.h>
  44. #include <linux/vfs.h>
  45. #include <linux/mman.h>
  46. #include <linux/mutex.h>
  47. #include <asm/intrinsics.h>
  48. #include <asm/types.h>
  49. #include <asm/uaccess.h>
  50. #include <asm/unistd.h>
  51. #include "ia32priv.h"
  52. #include <net/scm.h>
  53. #include <net/sock.h>
  54. #define DEBUG 0
  55. #if DEBUG
  56. # define DBG(fmt...) printk(KERN_DEBUG fmt)
  57. #else
  58. # define DBG(fmt...)
  59. #endif
  60. #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
  61. #define OFFSET4K(a) ((a) & 0xfff)
  62. #define PAGE_START(addr) ((addr) & PAGE_MASK)
  63. #define MINSIGSTKSZ_IA32 2048
  64. #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
  65. #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
  66. /*
  67. * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
  68. * while doing so.
  69. */
  70. /* XXX make per-mm: */
  71. static DEFINE_MUTEX(ia32_mmap_mutex);
  72. asmlinkage long
  73. sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
  74. struct pt_regs *regs)
  75. {
  76. long error;
  77. char *filename;
  78. unsigned long old_map_base, old_task_size, tssd;
  79. filename = getname(name);
  80. error = PTR_ERR(filename);
  81. if (IS_ERR(filename))
  82. return error;
  83. old_map_base = current->thread.map_base;
  84. old_task_size = current->thread.task_size;
  85. tssd = ia64_get_kr(IA64_KR_TSSD);
  86. /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
  87. current->thread.map_base = DEFAULT_MAP_BASE;
  88. current->thread.task_size = DEFAULT_TASK_SIZE;
  89. ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
  90. ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
  91. error = compat_do_execve(filename, argv, envp, regs);
  92. putname(filename);
  93. if (error < 0) {
  94. /* oops, execve failed, switch back to old values... */
  95. ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
  96. ia64_set_kr(IA64_KR_TSSD, tssd);
  97. current->thread.map_base = old_map_base;
  98. current->thread.task_size = old_task_size;
  99. }
  100. return error;
  101. }
  102. int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
  103. {
  104. compat_ino_t ino;
  105. int err;
  106. if ((u64) stat->size > MAX_NON_LFS ||
  107. !old_valid_dev(stat->dev) ||
  108. !old_valid_dev(stat->rdev))
  109. return -EOVERFLOW;
  110. ino = stat->ino;
  111. if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
  112. return -EOVERFLOW;
  113. if (clear_user(ubuf, sizeof(*ubuf)))
  114. return -EFAULT;
  115. err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
  116. err |= __put_user(ino, &ubuf->st_ino);
  117. err |= __put_user(stat->mode, &ubuf->st_mode);
  118. err |= __put_user(stat->nlink, &ubuf->st_nlink);
  119. err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
  120. err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
  121. err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
  122. err |= __put_user(stat->size, &ubuf->st_size);
  123. err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
  124. err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
  125. err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
  126. err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  127. err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
  128. err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  129. err |= __put_user(stat->blksize, &ubuf->st_blksize);
  130. err |= __put_user(stat->blocks, &ubuf->st_blocks);
  131. return err;
  132. }
  133. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  134. static int
  135. get_page_prot (struct vm_area_struct *vma, unsigned long addr)
  136. {
  137. int prot = 0;
  138. if (!vma || vma->vm_start > addr)
  139. return 0;
  140. if (vma->vm_flags & VM_READ)
  141. prot |= PROT_READ;
  142. if (vma->vm_flags & VM_WRITE)
  143. prot |= PROT_WRITE;
  144. if (vma->vm_flags & VM_EXEC)
  145. prot |= PROT_EXEC;
  146. return prot;
  147. }
  148. /*
  149. * Map a subpage by creating an anonymous page that contains the union of the old page and
  150. * the subpage.
  151. */
  152. static unsigned long
  153. mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
  154. loff_t off)
  155. {
  156. void *page = NULL;
  157. struct inode *inode;
  158. unsigned long ret = 0;
  159. struct vm_area_struct *vma = find_vma(current->mm, start);
  160. int old_prot = get_page_prot(vma, start);
  161. DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
  162. file, start, end, prot, flags, off);
  163. /* Optimize the case where the old mmap and the new mmap are both anonymous */
  164. if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
  165. if (clear_user((void __user *) start, end - start)) {
  166. ret = -EFAULT;
  167. goto out;
  168. }
  169. goto skip_mmap;
  170. }
  171. page = (void *) get_zeroed_page(GFP_KERNEL);
  172. if (!page)
  173. return -ENOMEM;
  174. if (old_prot)
  175. copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
  176. down_write(&current->mm->mmap_sem);
  177. {
  178. ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
  179. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  180. }
  181. up_write(&current->mm->mmap_sem);
  182. if (IS_ERR((void *) ret))
  183. goto out;
  184. if (old_prot) {
  185. /* copy back the old page contents. */
  186. if (offset_in_page(start))
  187. copy_to_user((void __user *) PAGE_START(start), page,
  188. offset_in_page(start));
  189. if (offset_in_page(end))
  190. copy_to_user((void __user *) end, page + offset_in_page(end),
  191. PAGE_SIZE - offset_in_page(end));
  192. }
  193. if (!(flags & MAP_ANONYMOUS)) {
  194. /* read the file contents */
  195. inode = file->f_path.dentry->d_inode;
  196. if (!inode->i_fop || !file->f_op->read
  197. || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
  198. {
  199. ret = -EINVAL;
  200. goto out;
  201. }
  202. }
  203. skip_mmap:
  204. if (!(prot & PROT_WRITE))
  205. ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
  206. out:
  207. if (page)
  208. free_page((unsigned long) page);
  209. return ret;
  210. }
  211. /* SLAB cache for ia64_partial_page structures */
  212. struct kmem_cache *ia64_partial_page_cachep;
  213. /*
  214. * init ia64_partial_page_list.
  215. * return 0 means kmalloc fail.
  216. */
  217. struct ia64_partial_page_list*
  218. ia32_init_pp_list(void)
  219. {
  220. struct ia64_partial_page_list *p;
  221. if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
  222. return p;
  223. p->pp_head = NULL;
  224. p->ppl_rb = RB_ROOT;
  225. p->pp_hint = NULL;
  226. atomic_set(&p->pp_count, 1);
  227. return p;
  228. }
  229. /*
  230. * Search for the partial page with @start in partial page list @ppl.
  231. * If finds the partial page, return the found partial page.
  232. * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
  233. * be used by later __ia32_insert_pp().
  234. */
  235. static struct ia64_partial_page *
  236. __ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
  237. struct ia64_partial_page **pprev, struct rb_node ***rb_link,
  238. struct rb_node **rb_parent)
  239. {
  240. struct ia64_partial_page *pp;
  241. struct rb_node **__rb_link, *__rb_parent, *rb_prev;
  242. pp = ppl->pp_hint;
  243. if (pp && pp->base == start)
  244. return pp;
  245. __rb_link = &ppl->ppl_rb.rb_node;
  246. rb_prev = __rb_parent = NULL;
  247. while (*__rb_link) {
  248. __rb_parent = *__rb_link;
  249. pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
  250. if (pp->base == start) {
  251. ppl->pp_hint = pp;
  252. return pp;
  253. } else if (pp->base < start) {
  254. rb_prev = __rb_parent;
  255. __rb_link = &__rb_parent->rb_right;
  256. } else {
  257. __rb_link = &__rb_parent->rb_left;
  258. }
  259. }
  260. *rb_link = __rb_link;
  261. *rb_parent = __rb_parent;
  262. *pprev = NULL;
  263. if (rb_prev)
  264. *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
  265. return NULL;
  266. }
  267. /*
  268. * insert @pp into @ppl.
  269. */
  270. static void
  271. __ia32_insert_pp(struct ia64_partial_page_list *ppl,
  272. struct ia64_partial_page *pp, struct ia64_partial_page *prev,
  273. struct rb_node **rb_link, struct rb_node *rb_parent)
  274. {
  275. /* link list */
  276. if (prev) {
  277. pp->next = prev->next;
  278. prev->next = pp;
  279. } else {
  280. ppl->pp_head = pp;
  281. if (rb_parent)
  282. pp->next = rb_entry(rb_parent,
  283. struct ia64_partial_page, pp_rb);
  284. else
  285. pp->next = NULL;
  286. }
  287. /* link rb */
  288. rb_link_node(&pp->pp_rb, rb_parent, rb_link);
  289. rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
  290. ppl->pp_hint = pp;
  291. }
  292. /*
  293. * delete @pp from partial page list @ppl.
  294. */
  295. static void
  296. __ia32_delete_pp(struct ia64_partial_page_list *ppl,
  297. struct ia64_partial_page *pp, struct ia64_partial_page *prev)
  298. {
  299. if (prev) {
  300. prev->next = pp->next;
  301. if (ppl->pp_hint == pp)
  302. ppl->pp_hint = prev;
  303. } else {
  304. ppl->pp_head = pp->next;
  305. if (ppl->pp_hint == pp)
  306. ppl->pp_hint = pp->next;
  307. }
  308. rb_erase(&pp->pp_rb, &ppl->ppl_rb);
  309. kmem_cache_free(ia64_partial_page_cachep, pp);
  310. }
  311. static struct ia64_partial_page *
  312. __pp_prev(struct ia64_partial_page *pp)
  313. {
  314. struct rb_node *prev = rb_prev(&pp->pp_rb);
  315. if (prev)
  316. return rb_entry(prev, struct ia64_partial_page, pp_rb);
  317. else
  318. return NULL;
  319. }
  320. /*
  321. * Delete partial pages with address between @start and @end.
  322. * @start and @end are page aligned.
  323. */
  324. static void
  325. __ia32_delete_pp_range(unsigned int start, unsigned int end)
  326. {
  327. struct ia64_partial_page *pp, *prev;
  328. struct rb_node **rb_link, *rb_parent;
  329. if (start >= end)
  330. return;
  331. pp = __ia32_find_pp(current->thread.ppl, start, &prev,
  332. &rb_link, &rb_parent);
  333. if (pp)
  334. prev = __pp_prev(pp);
  335. else {
  336. if (prev)
  337. pp = prev->next;
  338. else
  339. pp = current->thread.ppl->pp_head;
  340. }
  341. while (pp && pp->base < end) {
  342. struct ia64_partial_page *tmp = pp->next;
  343. __ia32_delete_pp(current->thread.ppl, pp, prev);
  344. pp = tmp;
  345. }
  346. }
  347. /*
  348. * Set the range between @start and @end in bitmap.
  349. * @start and @end should be IA32 page aligned and in the same IA64 page.
  350. */
  351. static int
  352. __ia32_set_pp(unsigned int start, unsigned int end, int flags)
  353. {
  354. struct ia64_partial_page *pp, *prev;
  355. struct rb_node ** rb_link, *rb_parent;
  356. unsigned int pstart, start_bit, end_bit, i;
  357. pstart = PAGE_START(start);
  358. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  359. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  360. if (end_bit == 0)
  361. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  362. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  363. &rb_link, &rb_parent);
  364. if (pp) {
  365. for (i = start_bit; i < end_bit; i++)
  366. set_bit(i, &pp->bitmap);
  367. /*
  368. * Check: if this partial page has been set to a full page,
  369. * then delete it.
  370. */
  371. if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
  372. PAGE_SIZE/IA32_PAGE_SIZE) {
  373. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  374. }
  375. return 0;
  376. }
  377. /*
  378. * MAP_FIXED may lead to overlapping mmap.
  379. * In this case, the requested mmap area may already mmaped as a full
  380. * page. So check vma before adding a new partial page.
  381. */
  382. if (flags & MAP_FIXED) {
  383. struct vm_area_struct *vma = find_vma(current->mm, pstart);
  384. if (vma && vma->vm_start <= pstart)
  385. return 0;
  386. }
  387. /* new a ia64_partial_page */
  388. pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
  389. if (!pp)
  390. return -ENOMEM;
  391. pp->base = pstart;
  392. pp->bitmap = 0;
  393. for (i=start_bit; i<end_bit; i++)
  394. set_bit(i, &(pp->bitmap));
  395. pp->next = NULL;
  396. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  397. return 0;
  398. }
  399. /*
  400. * @start and @end should be IA32 page aligned, but don't need to be in the
  401. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  402. * page, then call __ia32_set_pp().
  403. */
  404. static void
  405. ia32_set_pp(unsigned int start, unsigned int end, int flags)
  406. {
  407. down_write(&current->mm->mmap_sem);
  408. if (flags & MAP_FIXED) {
  409. /*
  410. * MAP_FIXED may lead to overlapping mmap. When this happens,
  411. * a series of complete IA64 pages results in deletion of
  412. * old partial pages in that range.
  413. */
  414. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  415. }
  416. if (end < PAGE_ALIGN(start)) {
  417. __ia32_set_pp(start, end, flags);
  418. } else {
  419. if (offset_in_page(start))
  420. __ia32_set_pp(start, PAGE_ALIGN(start), flags);
  421. if (offset_in_page(end))
  422. __ia32_set_pp(PAGE_START(end), end, flags);
  423. }
  424. up_write(&current->mm->mmap_sem);
  425. }
  426. /*
  427. * Unset the range between @start and @end in bitmap.
  428. * @start and @end should be IA32 page aligned and in the same IA64 page.
  429. * After doing that, if the bitmap is 0, then free the page and return 1,
  430. * else return 0;
  431. * If not find the partial page in the list, then
  432. * If the vma exists, then the full page is set to a partial page;
  433. * Else return -ENOMEM.
  434. */
  435. static int
  436. __ia32_unset_pp(unsigned int start, unsigned int end)
  437. {
  438. struct ia64_partial_page *pp, *prev;
  439. struct rb_node ** rb_link, *rb_parent;
  440. unsigned int pstart, start_bit, end_bit, i;
  441. struct vm_area_struct *vma;
  442. pstart = PAGE_START(start);
  443. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  444. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  445. if (end_bit == 0)
  446. end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
  447. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  448. &rb_link, &rb_parent);
  449. if (pp) {
  450. for (i = start_bit; i < end_bit; i++)
  451. clear_bit(i, &pp->bitmap);
  452. if (pp->bitmap == 0) {
  453. __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
  454. return 1;
  455. }
  456. return 0;
  457. }
  458. vma = find_vma(current->mm, pstart);
  459. if (!vma || vma->vm_start > pstart) {
  460. return -ENOMEM;
  461. }
  462. /* new a ia64_partial_page */
  463. pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
  464. if (!pp)
  465. return -ENOMEM;
  466. pp->base = pstart;
  467. pp->bitmap = 0;
  468. for (i = 0; i < start_bit; i++)
  469. set_bit(i, &(pp->bitmap));
  470. for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
  471. set_bit(i, &(pp->bitmap));
  472. pp->next = NULL;
  473. __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
  474. return 0;
  475. }
  476. /*
  477. * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
  478. * __ia32_delete_pp_range(). Unset possible partial pages by calling
  479. * __ia32_unset_pp().
  480. * The returned value see __ia32_unset_pp().
  481. */
  482. static int
  483. ia32_unset_pp(unsigned int *startp, unsigned int *endp)
  484. {
  485. unsigned int start = *startp, end = *endp;
  486. int ret = 0;
  487. down_write(&current->mm->mmap_sem);
  488. __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
  489. if (end < PAGE_ALIGN(start)) {
  490. ret = __ia32_unset_pp(start, end);
  491. if (ret == 1) {
  492. *startp = PAGE_START(start);
  493. *endp = PAGE_ALIGN(end);
  494. }
  495. if (ret == 0) {
  496. /* to shortcut sys_munmap() in sys32_munmap() */
  497. *startp = PAGE_START(start);
  498. *endp = PAGE_START(end);
  499. }
  500. } else {
  501. if (offset_in_page(start)) {
  502. ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
  503. if (ret == 1)
  504. *startp = PAGE_START(start);
  505. if (ret == 0)
  506. *startp = PAGE_ALIGN(start);
  507. if (ret < 0)
  508. goto out;
  509. }
  510. if (offset_in_page(end)) {
  511. ret = __ia32_unset_pp(PAGE_START(end), end);
  512. if (ret == 1)
  513. *endp = PAGE_ALIGN(end);
  514. if (ret == 0)
  515. *endp = PAGE_START(end);
  516. }
  517. }
  518. out:
  519. up_write(&current->mm->mmap_sem);
  520. return ret;
  521. }
  522. /*
  523. * Compare the range between @start and @end with bitmap in partial page.
  524. * @start and @end should be IA32 page aligned and in the same IA64 page.
  525. */
  526. static int
  527. __ia32_compare_pp(unsigned int start, unsigned int end)
  528. {
  529. struct ia64_partial_page *pp, *prev;
  530. struct rb_node ** rb_link, *rb_parent;
  531. unsigned int pstart, start_bit, end_bit, size;
  532. unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
  533. pstart = PAGE_START(start);
  534. pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
  535. &rb_link, &rb_parent);
  536. if (!pp)
  537. return 1;
  538. start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
  539. end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
  540. size = sizeof(pp->bitmap) * 8;
  541. first_bit = find_first_bit(&pp->bitmap, size);
  542. next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
  543. if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
  544. /* exceeds the first range in bitmap */
  545. return -ENOMEM;
  546. } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
  547. first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
  548. if ((next_zero_bit < first_bit) && (first_bit < size))
  549. return 1; /* has next range */
  550. else
  551. return 0; /* no next range */
  552. } else
  553. return 1;
  554. }
  555. /*
  556. * @start and @end should be IA32 page aligned, but don't need to be in the
  557. * same IA64 page. Split @start and @end to make sure they're in the same IA64
  558. * page, then call __ia32_compare_pp().
  559. *
  560. * Take this as example: the range is the 1st and 2nd 4K page.
  561. * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
  562. * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
  563. * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
  564. * bitmap = 00000101.
  565. */
  566. static int
  567. ia32_compare_pp(unsigned int *startp, unsigned int *endp)
  568. {
  569. unsigned int start = *startp, end = *endp;
  570. int retval = 0;
  571. down_write(&current->mm->mmap_sem);
  572. if (end < PAGE_ALIGN(start)) {
  573. retval = __ia32_compare_pp(start, end);
  574. if (retval == 0) {
  575. *startp = PAGE_START(start);
  576. *endp = PAGE_ALIGN(end);
  577. }
  578. } else {
  579. if (offset_in_page(start)) {
  580. retval = __ia32_compare_pp(start,
  581. PAGE_ALIGN(start));
  582. if (retval == 0)
  583. *startp = PAGE_START(start);
  584. if (retval < 0)
  585. goto out;
  586. }
  587. if (offset_in_page(end)) {
  588. retval = __ia32_compare_pp(PAGE_START(end), end);
  589. if (retval == 0)
  590. *endp = PAGE_ALIGN(end);
  591. }
  592. }
  593. out:
  594. up_write(&current->mm->mmap_sem);
  595. return retval;
  596. }
  597. static void
  598. __ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
  599. {
  600. struct ia64_partial_page *pp = ppl->pp_head;
  601. while (pp) {
  602. struct ia64_partial_page *next = pp->next;
  603. kmem_cache_free(ia64_partial_page_cachep, pp);
  604. pp = next;
  605. }
  606. kfree(ppl);
  607. }
  608. void
  609. ia32_drop_ia64_partial_page_list(struct task_struct *task)
  610. {
  611. struct ia64_partial_page_list* ppl = task->thread.ppl;
  612. if (ppl && atomic_dec_and_test(&ppl->pp_count))
  613. __ia32_drop_pp_list(ppl);
  614. }
  615. /*
  616. * Copy current->thread.ppl to ppl (already initialized).
  617. */
  618. static int
  619. __ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
  620. {
  621. struct ia64_partial_page *pp, *tmp, *prev;
  622. struct rb_node **rb_link, *rb_parent;
  623. ppl->pp_head = NULL;
  624. ppl->pp_hint = NULL;
  625. ppl->ppl_rb = RB_ROOT;
  626. rb_link = &ppl->ppl_rb.rb_node;
  627. rb_parent = NULL;
  628. prev = NULL;
  629. for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
  630. tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
  631. if (!tmp)
  632. return -ENOMEM;
  633. *tmp = *pp;
  634. __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
  635. prev = tmp;
  636. rb_link = &tmp->pp_rb.rb_right;
  637. rb_parent = &tmp->pp_rb;
  638. }
  639. return 0;
  640. }
  641. int
  642. ia32_copy_ia64_partial_page_list(struct task_struct *p,
  643. unsigned long clone_flags)
  644. {
  645. int retval = 0;
  646. if (clone_flags & CLONE_VM) {
  647. atomic_inc(&current->thread.ppl->pp_count);
  648. p->thread.ppl = current->thread.ppl;
  649. } else {
  650. p->thread.ppl = ia32_init_pp_list();
  651. if (!p->thread.ppl)
  652. return -ENOMEM;
  653. down_write(&current->mm->mmap_sem);
  654. {
  655. retval = __ia32_copy_pp_list(p->thread.ppl);
  656. }
  657. up_write(&current->mm->mmap_sem);
  658. }
  659. return retval;
  660. }
  661. static unsigned long
  662. emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
  663. loff_t off)
  664. {
  665. unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
  666. struct inode *inode;
  667. loff_t poff;
  668. end = start + len;
  669. pstart = PAGE_START(start);
  670. pend = PAGE_ALIGN(end);
  671. if (flags & MAP_FIXED) {
  672. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  673. if (start > pstart) {
  674. if (flags & MAP_SHARED)
  675. printk(KERN_INFO
  676. "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
  677. current->comm, task_pid_nr(current), start);
  678. ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
  679. off);
  680. if (IS_ERR((void *) ret))
  681. return ret;
  682. pstart += PAGE_SIZE;
  683. if (pstart >= pend)
  684. goto out; /* done */
  685. }
  686. if (end < pend) {
  687. if (flags & MAP_SHARED)
  688. printk(KERN_INFO
  689. "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
  690. current->comm, task_pid_nr(current), end);
  691. ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
  692. (off + len) - offset_in_page(end));
  693. if (IS_ERR((void *) ret))
  694. return ret;
  695. pend -= PAGE_SIZE;
  696. if (pstart >= pend)
  697. goto out; /* done */
  698. }
  699. } else {
  700. /*
  701. * If a start address was specified, use it if the entire rounded out area
  702. * is available.
  703. */
  704. if (start && !pstart)
  705. fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
  706. tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
  707. if (tmp != pstart) {
  708. pstart = tmp;
  709. start = pstart + offset_in_page(off); /* make start congruent with off */
  710. end = start + len;
  711. pend = PAGE_ALIGN(end);
  712. }
  713. }
  714. poff = off + (pstart - start); /* note: (pstart - start) may be negative */
  715. is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
  716. if ((flags & MAP_SHARED) && !is_congruent)
  717. printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
  718. "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
  719. DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
  720. is_congruent ? "congruent" : "not congruent", poff);
  721. down_write(&current->mm->mmap_sem);
  722. {
  723. if (!(flags & MAP_ANONYMOUS) && is_congruent)
  724. ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
  725. else
  726. ret = do_mmap(NULL, pstart, pend - pstart,
  727. prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
  728. flags | MAP_FIXED | MAP_ANONYMOUS, 0);
  729. }
  730. up_write(&current->mm->mmap_sem);
  731. if (IS_ERR((void *) ret))
  732. return ret;
  733. if (!is_congruent) {
  734. /* read the file contents */
  735. inode = file->f_path.dentry->d_inode;
  736. if (!inode->i_fop || !file->f_op->read
  737. || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
  738. < 0))
  739. {
  740. sys_munmap(pstart, pend - pstart);
  741. return -EINVAL;
  742. }
  743. if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
  744. return -EINVAL;
  745. }
  746. if (!(flags & MAP_FIXED))
  747. ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
  748. out:
  749. return start;
  750. }
  751. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  752. static inline unsigned int
  753. get_prot32 (unsigned int prot)
  754. {
  755. if (prot & PROT_WRITE)
  756. /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
  757. prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
  758. else if (prot & (PROT_READ | PROT_EXEC))
  759. /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
  760. prot |= (PROT_READ | PROT_EXEC);
  761. return prot;
  762. }
  763. unsigned long
  764. ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
  765. loff_t offset)
  766. {
  767. DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
  768. file, addr, len, prot, flags, offset);
  769. if (file && (!file->f_op || !file->f_op->mmap))
  770. return -ENODEV;
  771. len = IA32_PAGE_ALIGN(len);
  772. if (len == 0)
  773. return addr;
  774. if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
  775. {
  776. if (flags & MAP_FIXED)
  777. return -ENOMEM;
  778. else
  779. return -EINVAL;
  780. }
  781. if (OFFSET4K(offset))
  782. return -EINVAL;
  783. prot = get_prot32(prot);
  784. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  785. mutex_lock(&ia32_mmap_mutex);
  786. {
  787. addr = emulate_mmap(file, addr, len, prot, flags, offset);
  788. }
  789. mutex_unlock(&ia32_mmap_mutex);
  790. #else
  791. down_write(&current->mm->mmap_sem);
  792. {
  793. addr = do_mmap(file, addr, len, prot, flags, offset);
  794. }
  795. up_write(&current->mm->mmap_sem);
  796. #endif
  797. DBG("ia32_do_mmap: returning 0x%lx\n", addr);
  798. return addr;
  799. }
  800. /*
  801. * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
  802. * system calls used a memory block for parameter passing..
  803. */
  804. struct mmap_arg_struct {
  805. unsigned int addr;
  806. unsigned int len;
  807. unsigned int prot;
  808. unsigned int flags;
  809. unsigned int fd;
  810. unsigned int offset;
  811. };
  812. asmlinkage long
  813. sys32_mmap (struct mmap_arg_struct __user *arg)
  814. {
  815. struct mmap_arg_struct a;
  816. struct file *file = NULL;
  817. unsigned long addr;
  818. int flags;
  819. if (copy_from_user(&a, arg, sizeof(a)))
  820. return -EFAULT;
  821. if (OFFSET4K(a.offset))
  822. return -EINVAL;
  823. flags = a.flags;
  824. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  825. if (!(flags & MAP_ANONYMOUS)) {
  826. file = fget(a.fd);
  827. if (!file)
  828. return -EBADF;
  829. }
  830. addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
  831. if (file)
  832. fput(file);
  833. return addr;
  834. }
  835. asmlinkage long
  836. sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
  837. unsigned int fd, unsigned int pgoff)
  838. {
  839. struct file *file = NULL;
  840. unsigned long retval;
  841. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  842. if (!(flags & MAP_ANONYMOUS)) {
  843. file = fget(fd);
  844. if (!file)
  845. return -EBADF;
  846. }
  847. retval = ia32_do_mmap(file, addr, len, prot, flags,
  848. (unsigned long) pgoff << IA32_PAGE_SHIFT);
  849. if (file)
  850. fput(file);
  851. return retval;
  852. }
  853. asmlinkage long
  854. sys32_munmap (unsigned int start, unsigned int len)
  855. {
  856. unsigned int end = start + len;
  857. long ret;
  858. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  859. ret = sys_munmap(start, end - start);
  860. #else
  861. if (OFFSET4K(start))
  862. return -EINVAL;
  863. end = IA32_PAGE_ALIGN(end);
  864. if (start >= end)
  865. return -EINVAL;
  866. ret = ia32_unset_pp(&start, &end);
  867. if (ret < 0)
  868. return ret;
  869. if (start >= end)
  870. return 0;
  871. mutex_lock(&ia32_mmap_mutex);
  872. ret = sys_munmap(start, end - start);
  873. mutex_unlock(&ia32_mmap_mutex);
  874. #endif
  875. return ret;
  876. }
  877. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  878. /*
  879. * When mprotect()ing a partial page, we set the permission to the union of the old
  880. * settings and the new settings. In other words, it's only possible to make access to a
  881. * partial page less restrictive.
  882. */
  883. static long
  884. mprotect_subpage (unsigned long address, int new_prot)
  885. {
  886. int old_prot;
  887. struct vm_area_struct *vma;
  888. if (new_prot == PROT_NONE)
  889. return 0; /* optimize case where nothing changes... */
  890. vma = find_vma(current->mm, address);
  891. old_prot = get_page_prot(vma, address);
  892. return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
  893. }
  894. #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
  895. asmlinkage long
  896. sys32_mprotect (unsigned int start, unsigned int len, int prot)
  897. {
  898. unsigned int end = start + len;
  899. #if PAGE_SHIFT > IA32_PAGE_SHIFT
  900. long retval = 0;
  901. #endif
  902. prot = get_prot32(prot);
  903. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  904. return sys_mprotect(start, end - start, prot);
  905. #else
  906. if (OFFSET4K(start))
  907. return -EINVAL;
  908. end = IA32_PAGE_ALIGN(end);
  909. if (end < start)
  910. return -EINVAL;
  911. retval = ia32_compare_pp(&start, &end);
  912. if (retval < 0)
  913. return retval;
  914. mutex_lock(&ia32_mmap_mutex);
  915. {
  916. if (offset_in_page(start)) {
  917. /* start address is 4KB aligned but not page aligned. */
  918. retval = mprotect_subpage(PAGE_START(start), prot);
  919. if (retval < 0)
  920. goto out;
  921. start = PAGE_ALIGN(start);
  922. if (start >= end)
  923. goto out; /* retval is already zero... */
  924. }
  925. if (offset_in_page(end)) {
  926. /* end address is 4KB aligned but not page aligned. */
  927. retval = mprotect_subpage(PAGE_START(end), prot);
  928. if (retval < 0)
  929. goto out;
  930. end = PAGE_START(end);
  931. }
  932. retval = sys_mprotect(start, end - start, prot);
  933. }
  934. out:
  935. mutex_unlock(&ia32_mmap_mutex);
  936. return retval;
  937. #endif
  938. }
  939. asmlinkage long
  940. sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
  941. unsigned int flags, unsigned int new_addr)
  942. {
  943. long ret;
  944. #if PAGE_SHIFT <= IA32_PAGE_SHIFT
  945. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  946. #else
  947. unsigned int old_end, new_end;
  948. if (OFFSET4K(addr))
  949. return -EINVAL;
  950. old_len = IA32_PAGE_ALIGN(old_len);
  951. new_len = IA32_PAGE_ALIGN(new_len);
  952. old_end = addr + old_len;
  953. new_end = addr + new_len;
  954. if (!new_len)
  955. return -EINVAL;
  956. if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
  957. return -EINVAL;
  958. if (old_len >= new_len) {
  959. ret = sys32_munmap(addr + new_len, old_len - new_len);
  960. if (ret && old_len != new_len)
  961. return ret;
  962. ret = addr;
  963. if (!(flags & MREMAP_FIXED) || (new_addr == addr))
  964. return ret;
  965. old_len = new_len;
  966. }
  967. addr = PAGE_START(addr);
  968. old_len = PAGE_ALIGN(old_end) - addr;
  969. new_len = PAGE_ALIGN(new_end) - addr;
  970. mutex_lock(&ia32_mmap_mutex);
  971. ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
  972. mutex_unlock(&ia32_mmap_mutex);
  973. if ((ret >= 0) && (old_len < new_len)) {
  974. /* mremap expanded successfully */
  975. ia32_set_pp(old_end, new_end, flags);
  976. }
  977. #endif
  978. return ret;
  979. }
  980. asmlinkage long
  981. sys32_pipe (int __user *fd)
  982. {
  983. int retval;
  984. int fds[2];
  985. retval = do_pipe(fds);
  986. if (retval)
  987. goto out;
  988. if (copy_to_user(fd, fds, sizeof(fds)))
  989. retval = -EFAULT;
  990. out:
  991. return retval;
  992. }
  993. static inline long
  994. get_tv32 (struct timeval *o, struct compat_timeval __user *i)
  995. {
  996. return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
  997. (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
  998. }
  999. static inline long
  1000. put_tv32 (struct compat_timeval __user *o, struct timeval *i)
  1001. {
  1002. return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
  1003. (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
  1004. }
  1005. asmlinkage unsigned long
  1006. sys32_alarm (unsigned int seconds)
  1007. {
  1008. return alarm_setitimer(seconds);
  1009. }
  1010. /* Translations due to time_t size differences. Which affects all
  1011. sorts of things, like timeval and itimerval. */
  1012. extern struct timezone sys_tz;
  1013. asmlinkage long
  1014. sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1015. {
  1016. if (tv) {
  1017. struct timeval ktv;
  1018. do_gettimeofday(&ktv);
  1019. if (put_tv32(tv, &ktv))
  1020. return -EFAULT;
  1021. }
  1022. if (tz) {
  1023. if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
  1024. return -EFAULT;
  1025. }
  1026. return 0;
  1027. }
  1028. asmlinkage long
  1029. sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
  1030. {
  1031. struct timeval ktv;
  1032. struct timespec kts;
  1033. struct timezone ktz;
  1034. if (tv) {
  1035. if (get_tv32(&ktv, tv))
  1036. return -EFAULT;
  1037. kts.tv_sec = ktv.tv_sec;
  1038. kts.tv_nsec = ktv.tv_usec * 1000;
  1039. }
  1040. if (tz) {
  1041. if (copy_from_user(&ktz, tz, sizeof(ktz)))
  1042. return -EFAULT;
  1043. }
  1044. return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
  1045. }
  1046. struct getdents32_callback {
  1047. struct compat_dirent __user *current_dir;
  1048. struct compat_dirent __user *previous;
  1049. int count;
  1050. int error;
  1051. };
  1052. struct readdir32_callback {
  1053. struct old_linux32_dirent __user * dirent;
  1054. int count;
  1055. };
  1056. static int
  1057. filldir32 (void *__buf, const char *name, int namlen, loff_t offset, u64 ino,
  1058. unsigned int d_type)
  1059. {
  1060. struct compat_dirent __user * dirent;
  1061. struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
  1062. int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4);
  1063. u32 d_ino;
  1064. buf->error = -EINVAL; /* only used if we fail.. */
  1065. if (reclen > buf->count)
  1066. return -EINVAL;
  1067. d_ino = ino;
  1068. if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
  1069. return -EOVERFLOW;
  1070. buf->error = -EFAULT; /* only used if we fail.. */
  1071. dirent = buf->previous;
  1072. if (dirent)
  1073. if (put_user(offset, &dirent->d_off))
  1074. return -EFAULT;
  1075. dirent = buf->current_dir;
  1076. buf->previous = dirent;
  1077. if (put_user(d_ino, &dirent->d_ino)
  1078. || put_user(reclen, &dirent->d_reclen)
  1079. || copy_to_user(dirent->d_name, name, namlen)
  1080. || put_user(0, dirent->d_name + namlen))
  1081. return -EFAULT;
  1082. dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen);
  1083. buf->current_dir = dirent;
  1084. buf->count -= reclen;
  1085. return 0;
  1086. }
  1087. asmlinkage long
  1088. sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count)
  1089. {
  1090. struct file * file;
  1091. struct compat_dirent __user * lastdirent;
  1092. struct getdents32_callback buf;
  1093. int error;
  1094. error = -EFAULT;
  1095. if (!access_ok(VERIFY_WRITE, dirent, count))
  1096. goto out;
  1097. error = -EBADF;
  1098. file = fget(fd);
  1099. if (!file)
  1100. goto out;
  1101. buf.current_dir = dirent;
  1102. buf.previous = NULL;
  1103. buf.count = count;
  1104. buf.error = 0;
  1105. error = vfs_readdir(file, filldir32, &buf);
  1106. if (error < 0)
  1107. goto out_putf;
  1108. error = buf.error;
  1109. lastdirent = buf.previous;
  1110. if (lastdirent) {
  1111. if (put_user(file->f_pos, &lastdirent->d_off))
  1112. error = -EFAULT;
  1113. else
  1114. error = count - buf.count;
  1115. }
  1116. out_putf:
  1117. fput(file);
  1118. out:
  1119. return error;
  1120. }
  1121. static int
  1122. fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, u64 ino,
  1123. unsigned int d_type)
  1124. {
  1125. struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
  1126. struct old_linux32_dirent __user * dirent;
  1127. u32 d_ino;
  1128. if (buf->count)
  1129. return -EINVAL;
  1130. d_ino = ino;
  1131. if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
  1132. return -EOVERFLOW;
  1133. buf->count++;
  1134. dirent = buf->dirent;
  1135. if (put_user(d_ino, &dirent->d_ino)
  1136. || put_user(offset, &dirent->d_offset)
  1137. || put_user(namlen, &dirent->d_namlen)
  1138. || copy_to_user(dirent->d_name, name, namlen)
  1139. || put_user(0, dirent->d_name + namlen))
  1140. return -EFAULT;
  1141. return 0;
  1142. }
  1143. asmlinkage long
  1144. sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count)
  1145. {
  1146. int error;
  1147. struct file * file;
  1148. struct readdir32_callback buf;
  1149. error = -EBADF;
  1150. file = fget(fd);
  1151. if (!file)
  1152. goto out;
  1153. buf.count = 0;
  1154. buf.dirent = dirent;
  1155. error = vfs_readdir(file, fillonedir32, &buf);
  1156. if (error >= 0)
  1157. error = buf.count;
  1158. fput(file);
  1159. out:
  1160. return error;
  1161. }
  1162. struct sel_arg_struct {
  1163. unsigned int n;
  1164. unsigned int inp;
  1165. unsigned int outp;
  1166. unsigned int exp;
  1167. unsigned int tvp;
  1168. };
  1169. asmlinkage long
  1170. sys32_old_select (struct sel_arg_struct __user *arg)
  1171. {
  1172. struct sel_arg_struct a;
  1173. if (copy_from_user(&a, arg, sizeof(a)))
  1174. return -EFAULT;
  1175. return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
  1176. compat_ptr(a.exp), compat_ptr(a.tvp));
  1177. }
  1178. #define SEMOP 1
  1179. #define SEMGET 2
  1180. #define SEMCTL 3
  1181. #define SEMTIMEDOP 4
  1182. #define MSGSND 11
  1183. #define MSGRCV 12
  1184. #define MSGGET 13
  1185. #define MSGCTL 14
  1186. #define SHMAT 21
  1187. #define SHMDT 22
  1188. #define SHMGET 23
  1189. #define SHMCTL 24
  1190. asmlinkage long
  1191. sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
  1192. {
  1193. int version;
  1194. version = call >> 16; /* hack for backward compatibility */
  1195. call &= 0xffff;
  1196. switch (call) {
  1197. case SEMTIMEDOP:
  1198. if (fifth)
  1199. return compat_sys_semtimedop(first, compat_ptr(ptr),
  1200. second, compat_ptr(fifth));
  1201. /* else fall through for normal semop() */
  1202. case SEMOP:
  1203. /* struct sembuf is the same on 32 and 64bit :)) */
  1204. return sys_semtimedop(first, compat_ptr(ptr), second,
  1205. NULL);
  1206. case SEMGET:
  1207. return sys_semget(first, second, third);
  1208. case SEMCTL:
  1209. return compat_sys_semctl(first, second, third, compat_ptr(ptr));
  1210. case MSGSND:
  1211. return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
  1212. case MSGRCV:
  1213. return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
  1214. case MSGGET:
  1215. return sys_msgget((key_t) first, second);
  1216. case MSGCTL:
  1217. return compat_sys_msgctl(first, second, compat_ptr(ptr));
  1218. case SHMAT:
  1219. return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
  1220. break;
  1221. case SHMDT:
  1222. return sys_shmdt(compat_ptr(ptr));
  1223. case SHMGET:
  1224. return sys_shmget(first, (unsigned)second, third);
  1225. case SHMCTL:
  1226. return compat_sys_shmctl(first, second, compat_ptr(ptr));
  1227. default:
  1228. return -ENOSYS;
  1229. }
  1230. return -EINVAL;
  1231. }
  1232. asmlinkage long
  1233. compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
  1234. struct compat_rusage *ru);
  1235. asmlinkage long
  1236. sys32_waitpid (int pid, unsigned int *stat_addr, int options)
  1237. {
  1238. return compat_sys_wait4(pid, stat_addr, options, NULL);
  1239. }
  1240. static unsigned int
  1241. ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
  1242. {
  1243. size_t copied;
  1244. unsigned int ret;
  1245. copied = access_process_vm(child, addr, val, sizeof(*val), 0);
  1246. return (copied != sizeof(ret)) ? -EIO : 0;
  1247. }
  1248. static unsigned int
  1249. ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
  1250. {
  1251. if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
  1252. return -EIO;
  1253. return 0;
  1254. }
  1255. /*
  1256. * The order in which registers are stored in the ptrace regs structure
  1257. */
  1258. #define PT_EBX 0
  1259. #define PT_ECX 1
  1260. #define PT_EDX 2
  1261. #define PT_ESI 3
  1262. #define PT_EDI 4
  1263. #define PT_EBP 5
  1264. #define PT_EAX 6
  1265. #define PT_DS 7
  1266. #define PT_ES 8
  1267. #define PT_FS 9
  1268. #define PT_GS 10
  1269. #define PT_ORIG_EAX 11
  1270. #define PT_EIP 12
  1271. #define PT_CS 13
  1272. #define PT_EFL 14
  1273. #define PT_UESP 15
  1274. #define PT_SS 16
  1275. static unsigned int
  1276. getreg (struct task_struct *child, int regno)
  1277. {
  1278. struct pt_regs *child_regs;
  1279. child_regs = task_pt_regs(child);
  1280. switch (regno / sizeof(int)) {
  1281. case PT_EBX: return child_regs->r11;
  1282. case PT_ECX: return child_regs->r9;
  1283. case PT_EDX: return child_regs->r10;
  1284. case PT_ESI: return child_regs->r14;
  1285. case PT_EDI: return child_regs->r15;
  1286. case PT_EBP: return child_regs->r13;
  1287. case PT_EAX: return child_regs->r8;
  1288. case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
  1289. case PT_EIP: return child_regs->cr_iip;
  1290. case PT_UESP: return child_regs->r12;
  1291. case PT_EFL: return child->thread.eflag;
  1292. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1293. return __USER_DS;
  1294. case PT_CS: return __USER_CS;
  1295. default:
  1296. printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
  1297. break;
  1298. }
  1299. return 0;
  1300. }
  1301. static void
  1302. putreg (struct task_struct *child, int regno, unsigned int value)
  1303. {
  1304. struct pt_regs *child_regs;
  1305. child_regs = task_pt_regs(child);
  1306. switch (regno / sizeof(int)) {
  1307. case PT_EBX: child_regs->r11 = value; break;
  1308. case PT_ECX: child_regs->r9 = value; break;
  1309. case PT_EDX: child_regs->r10 = value; break;
  1310. case PT_ESI: child_regs->r14 = value; break;
  1311. case PT_EDI: child_regs->r15 = value; break;
  1312. case PT_EBP: child_regs->r13 = value; break;
  1313. case PT_EAX: child_regs->r8 = value; break;
  1314. case PT_ORIG_EAX: child_regs->r1 = value; break;
  1315. case PT_EIP: child_regs->cr_iip = value; break;
  1316. case PT_UESP: child_regs->r12 = value; break;
  1317. case PT_EFL: child->thread.eflag = value; break;
  1318. case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
  1319. if (value != __USER_DS)
  1320. printk(KERN_ERR
  1321. "ia32.putreg: attempt to set invalid segment register %d = %x\n",
  1322. regno, value);
  1323. break;
  1324. case PT_CS:
  1325. if (value != __USER_CS)
  1326. printk(KERN_ERR
  1327. "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
  1328. regno, value);
  1329. break;
  1330. default:
  1331. printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
  1332. break;
  1333. }
  1334. }
  1335. static void
  1336. put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1337. struct switch_stack *swp, int tos)
  1338. {
  1339. struct _fpreg_ia32 *f;
  1340. char buf[32];
  1341. f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
  1342. if ((regno += tos) >= 8)
  1343. regno -= 8;
  1344. switch (regno) {
  1345. case 0:
  1346. ia64f2ia32f(f, &ptp->f8);
  1347. break;
  1348. case 1:
  1349. ia64f2ia32f(f, &ptp->f9);
  1350. break;
  1351. case 2:
  1352. ia64f2ia32f(f, &ptp->f10);
  1353. break;
  1354. case 3:
  1355. ia64f2ia32f(f, &ptp->f11);
  1356. break;
  1357. case 4:
  1358. case 5:
  1359. case 6:
  1360. case 7:
  1361. ia64f2ia32f(f, &swp->f12 + (regno - 4));
  1362. break;
  1363. }
  1364. copy_to_user(reg, f, sizeof(*reg));
  1365. }
  1366. static void
  1367. get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
  1368. struct switch_stack *swp, int tos)
  1369. {
  1370. if ((regno += tos) >= 8)
  1371. regno -= 8;
  1372. switch (regno) {
  1373. case 0:
  1374. copy_from_user(&ptp->f8, reg, sizeof(*reg));
  1375. break;
  1376. case 1:
  1377. copy_from_user(&ptp->f9, reg, sizeof(*reg));
  1378. break;
  1379. case 2:
  1380. copy_from_user(&ptp->f10, reg, sizeof(*reg));
  1381. break;
  1382. case 3:
  1383. copy_from_user(&ptp->f11, reg, sizeof(*reg));
  1384. break;
  1385. case 4:
  1386. case 5:
  1387. case 6:
  1388. case 7:
  1389. copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
  1390. break;
  1391. }
  1392. return;
  1393. }
  1394. int
  1395. save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1396. {
  1397. struct switch_stack *swp;
  1398. struct pt_regs *ptp;
  1399. int i, tos;
  1400. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1401. return -EFAULT;
  1402. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1403. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1404. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1405. __put_user(tsk->thread.fir, &save->fip);
  1406. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1407. __put_user(tsk->thread.fdr, &save->foo);
  1408. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1409. /*
  1410. * Stack frames start with 16-bytes of temp space
  1411. */
  1412. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1413. ptp = task_pt_regs(tsk);
  1414. tos = (tsk->thread.fsr >> 11) & 7;
  1415. for (i = 0; i < 8; i++)
  1416. put_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1417. return 0;
  1418. }
  1419. static int
  1420. restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
  1421. {
  1422. struct switch_stack *swp;
  1423. struct pt_regs *ptp;
  1424. int i, tos;
  1425. unsigned int fsrlo, fsrhi, num32;
  1426. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1427. return(-EFAULT);
  1428. __get_user(num32, (unsigned int __user *)&save->cwd);
  1429. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1430. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1431. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1432. num32 = (fsrhi << 16) | fsrlo;
  1433. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1434. __get_user(num32, (unsigned int __user *)&save->fip);
  1435. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1436. __get_user(num32, (unsigned int __user *)&save->foo);
  1437. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1438. /*
  1439. * Stack frames start with 16-bytes of temp space
  1440. */
  1441. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1442. ptp = task_pt_regs(tsk);
  1443. tos = (tsk->thread.fsr >> 11) & 7;
  1444. for (i = 0; i < 8; i++)
  1445. get_fpreg(i, &save->st_space[i], ptp, swp, tos);
  1446. return 0;
  1447. }
  1448. int
  1449. save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1450. {
  1451. struct switch_stack *swp;
  1452. struct pt_regs *ptp;
  1453. int i, tos;
  1454. unsigned long mxcsr=0;
  1455. unsigned long num128[2];
  1456. if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
  1457. return -EFAULT;
  1458. __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
  1459. __put_user(tsk->thread.fsr & 0xffff, &save->swd);
  1460. __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
  1461. __put_user(tsk->thread.fir, &save->fip);
  1462. __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
  1463. __put_user(tsk->thread.fdr, &save->foo);
  1464. __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
  1465. /*
  1466. * Stack frames start with 16-bytes of temp space
  1467. */
  1468. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1469. ptp = task_pt_regs(tsk);
  1470. tos = (tsk->thread.fsr >> 11) & 7;
  1471. for (i = 0; i < 8; i++)
  1472. put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1473. mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
  1474. __put_user(mxcsr & 0xffff, &save->mxcsr);
  1475. for (i = 0; i < 8; i++) {
  1476. memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
  1477. memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
  1478. copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
  1479. }
  1480. return 0;
  1481. }
  1482. static int
  1483. restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
  1484. {
  1485. struct switch_stack *swp;
  1486. struct pt_regs *ptp;
  1487. int i, tos;
  1488. unsigned int fsrlo, fsrhi, num32;
  1489. int mxcsr;
  1490. unsigned long num64;
  1491. unsigned long num128[2];
  1492. if (!access_ok(VERIFY_READ, save, sizeof(*save)))
  1493. return(-EFAULT);
  1494. __get_user(num32, (unsigned int __user *)&save->cwd);
  1495. tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
  1496. __get_user(fsrlo, (unsigned int __user *)&save->swd);
  1497. __get_user(fsrhi, (unsigned int __user *)&save->twd);
  1498. num32 = (fsrhi << 16) | fsrlo;
  1499. tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
  1500. __get_user(num32, (unsigned int __user *)&save->fip);
  1501. tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
  1502. __get_user(num32, (unsigned int __user *)&save->foo);
  1503. tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
  1504. /*
  1505. * Stack frames start with 16-bytes of temp space
  1506. */
  1507. swp = (struct switch_stack *)(tsk->thread.ksp + 16);
  1508. ptp = task_pt_regs(tsk);
  1509. tos = (tsk->thread.fsr >> 11) & 7;
  1510. for (i = 0; i < 8; i++)
  1511. get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
  1512. __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
  1513. num64 = mxcsr & 0xff10;
  1514. tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
  1515. num64 = mxcsr & 0x3f;
  1516. tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
  1517. for (i = 0; i < 8; i++) {
  1518. copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
  1519. memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
  1520. memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
  1521. }
  1522. return 0;
  1523. }
  1524. asmlinkage long
  1525. sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
  1526. {
  1527. struct task_struct *child;
  1528. unsigned int value, tmp;
  1529. long i, ret;
  1530. lock_kernel();
  1531. if (request == PTRACE_TRACEME) {
  1532. ret = ptrace_traceme();
  1533. goto out;
  1534. }
  1535. child = ptrace_get_task_struct(pid);
  1536. if (IS_ERR(child)) {
  1537. ret = PTR_ERR(child);
  1538. goto out;
  1539. }
  1540. if (request == PTRACE_ATTACH) {
  1541. ret = sys_ptrace(request, pid, addr, data);
  1542. goto out_tsk;
  1543. }
  1544. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  1545. if (ret < 0)
  1546. goto out_tsk;
  1547. switch (request) {
  1548. case PTRACE_PEEKTEXT:
  1549. case PTRACE_PEEKDATA: /* read word at location addr */
  1550. ret = ia32_peek(child, addr, &value);
  1551. if (ret == 0)
  1552. ret = put_user(value, (unsigned int __user *) compat_ptr(data));
  1553. else
  1554. ret = -EIO;
  1555. goto out_tsk;
  1556. case PTRACE_POKETEXT:
  1557. case PTRACE_POKEDATA: /* write the word at location addr */
  1558. ret = ia32_poke(child, addr, data);
  1559. goto out_tsk;
  1560. case PTRACE_PEEKUSR: /* read word at addr in USER area */
  1561. ret = -EIO;
  1562. if ((addr & 3) || addr > 17*sizeof(int))
  1563. break;
  1564. tmp = getreg(child, addr);
  1565. if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
  1566. ret = 0;
  1567. break;
  1568. case PTRACE_POKEUSR: /* write word at addr in USER area */
  1569. ret = -EIO;
  1570. if ((addr & 3) || addr > 17*sizeof(int))
  1571. break;
  1572. putreg(child, addr, data);
  1573. ret = 0;
  1574. break;
  1575. case IA32_PTRACE_GETREGS:
  1576. if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
  1577. ret = -EIO;
  1578. break;
  1579. }
  1580. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1581. put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
  1582. data += sizeof(int);
  1583. }
  1584. ret = 0;
  1585. break;
  1586. case IA32_PTRACE_SETREGS:
  1587. if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
  1588. ret = -EIO;
  1589. break;
  1590. }
  1591. for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
  1592. get_user(tmp, (unsigned int __user *) compat_ptr(data));
  1593. putreg(child, i, tmp);
  1594. data += sizeof(int);
  1595. }
  1596. ret = 0;
  1597. break;
  1598. case IA32_PTRACE_GETFPREGS:
  1599. ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1600. compat_ptr(data));
  1601. break;
  1602. case IA32_PTRACE_GETFPXREGS:
  1603. ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1604. compat_ptr(data));
  1605. break;
  1606. case IA32_PTRACE_SETFPREGS:
  1607. ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
  1608. compat_ptr(data));
  1609. break;
  1610. case IA32_PTRACE_SETFPXREGS:
  1611. ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
  1612. compat_ptr(data));
  1613. break;
  1614. case PTRACE_GETEVENTMSG:
  1615. ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
  1616. break;
  1617. case PTRACE_SYSCALL: /* continue, stop after next syscall */
  1618. case PTRACE_CONT: /* restart after signal. */
  1619. case PTRACE_KILL:
  1620. case PTRACE_SINGLESTEP: /* execute chile for one instruction */
  1621. case PTRACE_DETACH: /* detach a process */
  1622. ret = sys_ptrace(request, pid, addr, data);
  1623. break;
  1624. default:
  1625. ret = ptrace_request(child, request, addr, data);
  1626. break;
  1627. }
  1628. out_tsk:
  1629. put_task_struct(child);
  1630. out:
  1631. unlock_kernel();
  1632. return ret;
  1633. }
  1634. typedef struct {
  1635. unsigned int ss_sp;
  1636. unsigned int ss_flags;
  1637. unsigned int ss_size;
  1638. } ia32_stack_t;
  1639. asmlinkage long
  1640. sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
  1641. long arg2, long arg3, long arg4, long arg5, long arg6,
  1642. long arg7, struct pt_regs pt)
  1643. {
  1644. stack_t uss, uoss;
  1645. ia32_stack_t buf32;
  1646. int ret;
  1647. mm_segment_t old_fs = get_fs();
  1648. if (uss32) {
  1649. if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
  1650. return -EFAULT;
  1651. uss.ss_sp = (void __user *) (long) buf32.ss_sp;
  1652. uss.ss_flags = buf32.ss_flags;
  1653. /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
  1654. check and set it to the user requested value later */
  1655. if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
  1656. ret = -ENOMEM;
  1657. goto out;
  1658. }
  1659. uss.ss_size = MINSIGSTKSZ;
  1660. }
  1661. set_fs(KERNEL_DS);
  1662. ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
  1663. (stack_t __user *) &uoss, pt.r12);
  1664. current->sas_ss_size = buf32.ss_size;
  1665. set_fs(old_fs);
  1666. out:
  1667. if (ret < 0)
  1668. return(ret);
  1669. if (uoss32) {
  1670. buf32.ss_sp = (long __user) uoss.ss_sp;
  1671. buf32.ss_flags = uoss.ss_flags;
  1672. buf32.ss_size = uoss.ss_size;
  1673. if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
  1674. return -EFAULT;
  1675. }
  1676. return ret;
  1677. }
  1678. asmlinkage int
  1679. sys32_pause (void)
  1680. {
  1681. current->state = TASK_INTERRUPTIBLE;
  1682. schedule();
  1683. return -ERESTARTNOHAND;
  1684. }
  1685. asmlinkage int
  1686. sys32_msync (unsigned int start, unsigned int len, int flags)
  1687. {
  1688. unsigned int addr;
  1689. if (OFFSET4K(start))
  1690. return -EINVAL;
  1691. addr = PAGE_START(start);
  1692. return sys_msync(addr, len + (start - addr), flags);
  1693. }
  1694. struct sysctl32 {
  1695. unsigned int name;
  1696. int nlen;
  1697. unsigned int oldval;
  1698. unsigned int oldlenp;
  1699. unsigned int newval;
  1700. unsigned int newlen;
  1701. unsigned int __unused[4];
  1702. };
  1703. #ifdef CONFIG_SYSCTL_SYSCALL
  1704. asmlinkage long
  1705. sys32_sysctl (struct sysctl32 __user *args)
  1706. {
  1707. struct sysctl32 a32;
  1708. mm_segment_t old_fs = get_fs ();
  1709. void __user *oldvalp, *newvalp;
  1710. size_t oldlen;
  1711. int __user *namep;
  1712. long ret;
  1713. if (copy_from_user(&a32, args, sizeof(a32)))
  1714. return -EFAULT;
  1715. /*
  1716. * We need to pre-validate these because we have to disable address checking
  1717. * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
  1718. * user specifying bad addresses here. Well, since we're dealing with 32 bit
  1719. * addresses, we KNOW that access_ok() will always succeed, so this is an
  1720. * expensive NOP, but so what...
  1721. */
  1722. namep = (int __user *) compat_ptr(a32.name);
  1723. oldvalp = compat_ptr(a32.oldval);
  1724. newvalp = compat_ptr(a32.newval);
  1725. if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1726. || !access_ok(VERIFY_WRITE, namep, 0)
  1727. || !access_ok(VERIFY_WRITE, oldvalp, 0)
  1728. || !access_ok(VERIFY_WRITE, newvalp, 0))
  1729. return -EFAULT;
  1730. set_fs(KERNEL_DS);
  1731. lock_kernel();
  1732. ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
  1733. newvalp, (size_t) a32.newlen);
  1734. unlock_kernel();
  1735. set_fs(old_fs);
  1736. if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
  1737. return -EFAULT;
  1738. return ret;
  1739. }
  1740. #endif
  1741. asmlinkage long
  1742. sys32_newuname (struct new_utsname __user *name)
  1743. {
  1744. int ret = sys_newuname(name);
  1745. if (!ret)
  1746. if (copy_to_user(name->machine, "i686\0\0\0", 8))
  1747. ret = -EFAULT;
  1748. return ret;
  1749. }
  1750. asmlinkage long
  1751. sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
  1752. {
  1753. uid_t a, b, c;
  1754. int ret;
  1755. mm_segment_t old_fs = get_fs();
  1756. set_fs(KERNEL_DS);
  1757. ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
  1758. set_fs(old_fs);
  1759. if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
  1760. return -EFAULT;
  1761. return ret;
  1762. }
  1763. asmlinkage long
  1764. sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
  1765. {
  1766. gid_t a, b, c;
  1767. int ret;
  1768. mm_segment_t old_fs = get_fs();
  1769. set_fs(KERNEL_DS);
  1770. ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
  1771. set_fs(old_fs);
  1772. if (ret)
  1773. return ret;
  1774. return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
  1775. }
  1776. asmlinkage long
  1777. sys32_lseek (unsigned int fd, int offset, unsigned int whence)
  1778. {
  1779. /* Sign-extension of "offset" is important here... */
  1780. return sys_lseek(fd, offset, whence);
  1781. }
  1782. static int
  1783. groups16_to_user(short __user *grouplist, struct group_info *group_info)
  1784. {
  1785. int i;
  1786. short group;
  1787. for (i = 0; i < group_info->ngroups; i++) {
  1788. group = (short)GROUP_AT(group_info, i);
  1789. if (put_user(group, grouplist+i))
  1790. return -EFAULT;
  1791. }
  1792. return 0;
  1793. }
  1794. static int
  1795. groups16_from_user(struct group_info *group_info, short __user *grouplist)
  1796. {
  1797. int i;
  1798. short group;
  1799. for (i = 0; i < group_info->ngroups; i++) {
  1800. if (get_user(group, grouplist+i))
  1801. return -EFAULT;
  1802. GROUP_AT(group_info, i) = (gid_t)group;
  1803. }
  1804. return 0;
  1805. }
  1806. asmlinkage long
  1807. sys32_getgroups16 (int gidsetsize, short __user *grouplist)
  1808. {
  1809. int i;
  1810. if (gidsetsize < 0)
  1811. return -EINVAL;
  1812. get_group_info(current->group_info);
  1813. i = current->group_info->ngroups;
  1814. if (gidsetsize) {
  1815. if (i > gidsetsize) {
  1816. i = -EINVAL;
  1817. goto out;
  1818. }
  1819. if (groups16_to_user(grouplist, current->group_info)) {
  1820. i = -EFAULT;
  1821. goto out;
  1822. }
  1823. }
  1824. out:
  1825. put_group_info(current->group_info);
  1826. return i;
  1827. }
  1828. asmlinkage long
  1829. sys32_setgroups16 (int gidsetsize, short __user *grouplist)
  1830. {
  1831. struct group_info *group_info;
  1832. int retval;
  1833. if (!capable(CAP_SETGID))
  1834. return -EPERM;
  1835. if ((unsigned)gidsetsize > NGROUPS_MAX)
  1836. return -EINVAL;
  1837. group_info = groups_alloc(gidsetsize);
  1838. if (!group_info)
  1839. return -ENOMEM;
  1840. retval = groups16_from_user(group_info, grouplist);
  1841. if (retval) {
  1842. put_group_info(group_info);
  1843. return retval;
  1844. }
  1845. retval = set_current_groups(group_info);
  1846. put_group_info(group_info);
  1847. return retval;
  1848. }
  1849. asmlinkage long
  1850. sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
  1851. {
  1852. return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
  1853. }
  1854. asmlinkage long
  1855. sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
  1856. {
  1857. return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
  1858. }
  1859. static int
  1860. putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
  1861. {
  1862. int err;
  1863. u64 hdev;
  1864. if (clear_user(ubuf, sizeof(*ubuf)))
  1865. return -EFAULT;
  1866. hdev = huge_encode_dev(kbuf->dev);
  1867. err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
  1868. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
  1869. err |= __put_user(kbuf->ino, &ubuf->__st_ino);
  1870. err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
  1871. err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
  1872. err |= __put_user(kbuf->mode, &ubuf->st_mode);
  1873. err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
  1874. err |= __put_user(kbuf->uid, &ubuf->st_uid);
  1875. err |= __put_user(kbuf->gid, &ubuf->st_gid);
  1876. hdev = huge_encode_dev(kbuf->rdev);
  1877. err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
  1878. err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
  1879. err |= __put_user(kbuf->size, &ubuf->st_size_lo);
  1880. err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
  1881. err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
  1882. err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
  1883. err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
  1884. err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
  1885. err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
  1886. err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
  1887. err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
  1888. err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
  1889. return err;
  1890. }
  1891. asmlinkage long
  1892. sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
  1893. {
  1894. struct kstat s;
  1895. long ret = vfs_stat(filename, &s);
  1896. if (!ret)
  1897. ret = putstat64(statbuf, &s);
  1898. return ret;
  1899. }
  1900. asmlinkage long
  1901. sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
  1902. {
  1903. struct kstat s;
  1904. long ret = vfs_lstat(filename, &s);
  1905. if (!ret)
  1906. ret = putstat64(statbuf, &s);
  1907. return ret;
  1908. }
  1909. asmlinkage long
  1910. sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
  1911. {
  1912. struct kstat s;
  1913. long ret = vfs_fstat(fd, &s);
  1914. if (!ret)
  1915. ret = putstat64(statbuf, &s);
  1916. return ret;
  1917. }
  1918. asmlinkage long
  1919. sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
  1920. {
  1921. mm_segment_t old_fs = get_fs();
  1922. struct timespec t;
  1923. long ret;
  1924. set_fs(KERNEL_DS);
  1925. ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
  1926. set_fs(old_fs);
  1927. if (put_compat_timespec(&t, interval))
  1928. return -EFAULT;
  1929. return ret;
  1930. }
  1931. asmlinkage long
  1932. sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  1933. {
  1934. return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  1935. }
  1936. asmlinkage long
  1937. sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
  1938. {
  1939. return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
  1940. }
  1941. asmlinkage long
  1942. sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
  1943. {
  1944. mm_segment_t old_fs = get_fs();
  1945. long ret;
  1946. off_t of;
  1947. if (offset && get_user(of, offset))
  1948. return -EFAULT;
  1949. set_fs(KERNEL_DS);
  1950. ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
  1951. set_fs(old_fs);
  1952. if (offset && put_user(of, offset))
  1953. return -EFAULT;
  1954. return ret;
  1955. }
  1956. asmlinkage long
  1957. sys32_personality (unsigned int personality)
  1958. {
  1959. long ret;
  1960. if (current->personality == PER_LINUX32 && personality == PER_LINUX)
  1961. personality = PER_LINUX32;
  1962. ret = sys_personality(personality);
  1963. if (ret == PER_LINUX32)
  1964. ret = PER_LINUX;
  1965. return ret;
  1966. }
  1967. asmlinkage unsigned long
  1968. sys32_brk (unsigned int brk)
  1969. {
  1970. unsigned long ret, obrk;
  1971. struct mm_struct *mm = current->mm;
  1972. obrk = mm->brk;
  1973. ret = sys_brk(brk);
  1974. if (ret < obrk)
  1975. clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
  1976. return ret;
  1977. }
  1978. /* Structure for ia32 emulation on ia64 */
  1979. struct epoll_event32
  1980. {
  1981. u32 events;
  1982. u32 data[2];
  1983. };
  1984. asmlinkage long
  1985. sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
  1986. {
  1987. mm_segment_t old_fs = get_fs();
  1988. struct epoll_event event64;
  1989. int error;
  1990. u32 data_halfword;
  1991. if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
  1992. return -EFAULT;
  1993. __get_user(event64.events, &event->events);
  1994. __get_user(data_halfword, &event->data[0]);
  1995. event64.data = data_halfword;
  1996. __get_user(data_halfword, &event->data[1]);
  1997. event64.data |= (u64)data_halfword << 32;
  1998. set_fs(KERNEL_DS);
  1999. error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
  2000. set_fs(old_fs);
  2001. return error;
  2002. }
  2003. asmlinkage long
  2004. sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
  2005. int timeout)
  2006. {
  2007. struct epoll_event *events64 = NULL;
  2008. mm_segment_t old_fs = get_fs();
  2009. int numevents, size;
  2010. int evt_idx;
  2011. int do_free_pages = 0;
  2012. if (maxevents <= 0) {
  2013. return -EINVAL;
  2014. }
  2015. /* Verify that the area passed by the user is writeable */
  2016. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
  2017. return -EFAULT;
  2018. /*
  2019. * Allocate space for the intermediate copy. If the space needed
  2020. * is large enough to cause kmalloc to fail, then try again with
  2021. * __get_free_pages.
  2022. */
  2023. size = maxevents * sizeof(struct epoll_event);
  2024. events64 = kmalloc(size, GFP_KERNEL);
  2025. if (events64 == NULL) {
  2026. events64 = (struct epoll_event *)
  2027. __get_free_pages(GFP_KERNEL, get_order(size));
  2028. if (events64 == NULL)
  2029. return -ENOMEM;
  2030. do_free_pages = 1;
  2031. }
  2032. /* Do the system call */
  2033. set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
  2034. numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
  2035. maxevents, timeout);
  2036. set_fs(old_fs);
  2037. /* Don't modify userspace memory if we're returning an error */
  2038. if (numevents > 0) {
  2039. /* Translate the 64-bit structures back into the 32-bit
  2040. structures */
  2041. for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
  2042. __put_user(events64[evt_idx].events,
  2043. &events[evt_idx].events);
  2044. __put_user((u32)events64[evt_idx].data,
  2045. &events[evt_idx].data[0]);
  2046. __put_user((u32)(events64[evt_idx].data >> 32),
  2047. &events[evt_idx].data[1]);
  2048. }
  2049. }
  2050. if (do_free_pages)
  2051. free_pages((unsigned long) events64, get_order(size));
  2052. else
  2053. kfree(events64);
  2054. return numevents;
  2055. }
  2056. /*
  2057. * Get a yet unused TLS descriptor index.
  2058. */
  2059. static int
  2060. get_free_idx (void)
  2061. {
  2062. struct thread_struct *t = &current->thread;
  2063. int idx;
  2064. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  2065. if (desc_empty(t->tls_array + idx))
  2066. return idx + GDT_ENTRY_TLS_MIN;
  2067. return -ESRCH;
  2068. }
  2069. /*
  2070. * Set a given TLS descriptor:
  2071. */
  2072. asmlinkage int
  2073. sys32_set_thread_area (struct ia32_user_desc __user *u_info)
  2074. {
  2075. struct thread_struct *t = &current->thread;
  2076. struct ia32_user_desc info;
  2077. struct desc_struct *desc;
  2078. int cpu, idx;
  2079. if (copy_from_user(&info, u_info, sizeof(info)))
  2080. return -EFAULT;
  2081. idx = info.entry_number;
  2082. /*
  2083. * index -1 means the kernel should try to find and allocate an empty descriptor:
  2084. */
  2085. if (idx == -1) {
  2086. idx = get_free_idx();
  2087. if (idx < 0)
  2088. return idx;
  2089. if (put_user(idx, &u_info->entry_number))
  2090. return -EFAULT;
  2091. }
  2092. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2093. return -EINVAL;
  2094. desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
  2095. cpu = smp_processor_id();
  2096. if (LDT_empty(&info)) {
  2097. desc->a = 0;
  2098. desc->b = 0;
  2099. } else {
  2100. desc->a = LDT_entry_a(&info);
  2101. desc->b = LDT_entry_b(&info);
  2102. }
  2103. load_TLS(t, cpu);
  2104. return 0;
  2105. }
  2106. /*
  2107. * Get the current Thread-Local Storage area:
  2108. */
  2109. #define GET_BASE(desc) ( \
  2110. (((desc)->a >> 16) & 0x0000ffff) | \
  2111. (((desc)->b << 16) & 0x00ff0000) | \
  2112. ( (desc)->b & 0xff000000) )
  2113. #define GET_LIMIT(desc) ( \
  2114. ((desc)->a & 0x0ffff) | \
  2115. ((desc)->b & 0xf0000) )
  2116. #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
  2117. #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
  2118. #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
  2119. #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
  2120. #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
  2121. #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
  2122. asmlinkage int
  2123. sys32_get_thread_area (struct ia32_user_desc __user *u_info)
  2124. {
  2125. struct ia32_user_desc info;
  2126. struct desc_struct *desc;
  2127. int idx;
  2128. if (get_user(idx, &u_info->entry_number))
  2129. return -EFAULT;
  2130. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  2131. return -EINVAL;
  2132. desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  2133. info.entry_number = idx;
  2134. info.base_addr = GET_BASE(desc);
  2135. info.limit = GET_LIMIT(desc);
  2136. info.seg_32bit = GET_32BIT(desc);
  2137. info.contents = GET_CONTENTS(desc);
  2138. info.read_exec_only = !GET_WRITABLE(desc);
  2139. info.limit_in_pages = GET_LIMIT_PAGES(desc);
  2140. info.seg_not_present = !GET_PRESENT(desc);
  2141. info.useable = GET_USEABLE(desc);
  2142. if (copy_to_user(u_info, &info, sizeof(info)))
  2143. return -EFAULT;
  2144. return 0;
  2145. }
  2146. long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
  2147. __u32 len_low, __u32 len_high, int advice)
  2148. {
  2149. return sys_fadvise64_64(fd,
  2150. (((u64)offset_high)<<32) | offset_low,
  2151. (((u64)len_high)<<32) | len_low,
  2152. advice);
  2153. }
  2154. #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
  2155. asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
  2156. {
  2157. uid_t sruid, seuid;
  2158. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2159. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2160. return sys_setreuid(sruid, seuid);
  2161. }
  2162. asmlinkage long
  2163. sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
  2164. compat_uid_t suid)
  2165. {
  2166. uid_t sruid, seuid, ssuid;
  2167. sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
  2168. seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
  2169. ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
  2170. return sys_setresuid(sruid, seuid, ssuid);
  2171. }
  2172. asmlinkage long
  2173. sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
  2174. {
  2175. gid_t srgid, segid;
  2176. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2177. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2178. return sys_setregid(srgid, segid);
  2179. }
  2180. asmlinkage long
  2181. sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
  2182. compat_gid_t sgid)
  2183. {
  2184. gid_t srgid, segid, ssgid;
  2185. srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
  2186. segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
  2187. ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
  2188. return sys_setresgid(srgid, segid, ssgid);
  2189. }
  2190. #endif /* NOTYET */