irixelf.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * irixelf.c: Code to load IRIX ELF executables conforming to the MIPS ABI.
  7. * Based off of work by Eric Youngdale.
  8. *
  9. * Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com>
  10. * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
  11. * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
  12. */
  13. #undef DEBUG
  14. #include <linux/module.h>
  15. #include <linux/fs.h>
  16. #include <linux/stat.h>
  17. #include <linux/sched.h>
  18. #include <linux/mm.h>
  19. #include <linux/mman.h>
  20. #include <linux/a.out.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/signal.h>
  24. #include <linux/binfmts.h>
  25. #include <linux/string.h>
  26. #include <linux/file.h>
  27. #include <linux/fcntl.h>
  28. #include <linux/ptrace.h>
  29. #include <linux/slab.h>
  30. #include <linux/shm.h>
  31. #include <linux/personality.h>
  32. #include <linux/elfcore.h>
  33. #include <asm/mipsregs.h>
  34. #include <asm/namei.h>
  35. #include <asm/prctl.h>
  36. #include <asm/uaccess.h>
  37. #define DLINFO_ITEMS 12
  38. #include <linux/elf.h>
  39. static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
  40. static int load_irix_library(struct file *);
  41. static int irix_core_dump(long signr, struct pt_regs * regs,
  42. struct file *file, unsigned long limit);
  43. static struct linux_binfmt irix_format = {
  44. .module = THIS_MODULE,
  45. .load_binary = load_irix_binary,
  46. .load_shlib = load_irix_library,
  47. .core_dump = irix_core_dump,
  48. .min_coredump = PAGE_SIZE,
  49. };
  50. /* Debugging routines. */
  51. static char *get_elf_p_type(Elf32_Word p_type)
  52. {
  53. #ifdef DEBUG
  54. switch (p_type) {
  55. case PT_NULL:
  56. return "PT_NULL";
  57. break;
  58. case PT_LOAD:
  59. return "PT_LOAD";
  60. break;
  61. case PT_DYNAMIC:
  62. return "PT_DYNAMIC";
  63. break;
  64. case PT_INTERP:
  65. return "PT_INTERP";
  66. break;
  67. case PT_NOTE:
  68. return "PT_NOTE";
  69. break;
  70. case PT_SHLIB:
  71. return "PT_SHLIB";
  72. break;
  73. case PT_PHDR:
  74. return "PT_PHDR";
  75. break;
  76. case PT_LOPROC:
  77. return "PT_LOPROC/REGINFO";
  78. break;
  79. case PT_HIPROC:
  80. return "PT_HIPROC";
  81. break;
  82. default:
  83. return "PT_BOGUS";
  84. break;
  85. }
  86. #endif
  87. }
  88. static void print_elfhdr(struct elfhdr *ehp)
  89. {
  90. int i;
  91. pr_debug("ELFHDR: e_ident<");
  92. for (i = 0; i < (EI_NIDENT - 1); i++)
  93. pr_debug("%x ", ehp->e_ident[i]);
  94. pr_debug("%x>\n", ehp->e_ident[i]);
  95. pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
  96. (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
  97. (unsigned long) ehp->e_version);
  98. pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
  99. "e_flags[%08lx]\n",
  100. (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
  101. (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
  102. pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
  103. (unsigned short) ehp->e_ehsize,
  104. (unsigned short) ehp->e_phentsize,
  105. (unsigned short) ehp->e_phnum);
  106. pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
  107. (unsigned short) ehp->e_shentsize,
  108. (unsigned short) ehp->e_shnum,
  109. (unsigned short) ehp->e_shstrndx);
  110. }
  111. static void print_phdr(int i, struct elf_phdr *ep)
  112. {
  113. pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
  114. "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
  115. (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
  116. (unsigned long) ep->p_paddr);
  117. pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
  118. "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
  119. (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
  120. (unsigned long) ep->p_align);
  121. }
  122. static void dump_phdrs(struct elf_phdr *ep, int pnum)
  123. {
  124. int i;
  125. for (i = 0; i < pnum; i++, ep++) {
  126. if ((ep->p_type == PT_LOAD) ||
  127. (ep->p_type == PT_INTERP) ||
  128. (ep->p_type == PT_PHDR))
  129. print_phdr(i, ep);
  130. }
  131. }
  132. static void set_brk(unsigned long start, unsigned long end)
  133. {
  134. start = PAGE_ALIGN(start);
  135. end = PAGE_ALIGN(end);
  136. if (end <= start)
  137. return;
  138. down_write(&current->mm->mmap_sem);
  139. do_brk(start, end - start);
  140. up_write(&current->mm->mmap_sem);
  141. }
  142. /* We need to explicitly zero any fractional pages
  143. * after the data section (i.e. bss). This would
  144. * contain the junk from the file that should not
  145. * be in memory.
  146. */
  147. static void padzero(unsigned long elf_bss)
  148. {
  149. unsigned long nbyte;
  150. nbyte = elf_bss & (PAGE_SIZE-1);
  151. if (nbyte) {
  152. nbyte = PAGE_SIZE - nbyte;
  153. clear_user((void __user *) elf_bss, nbyte);
  154. }
  155. }
  156. static unsigned long * create_irix_tables(char * p, int argc, int envc,
  157. struct elfhdr * exec, unsigned int load_addr,
  158. unsigned int interp_load_addr, struct pt_regs *regs,
  159. struct elf_phdr *ephdr)
  160. {
  161. elf_addr_t *argv;
  162. elf_addr_t *envp;
  163. elf_addr_t *sp, *csp;
  164. pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
  165. "load_addr[%08x] interp_load_addr[%08x]\n",
  166. p, argc, envc, load_addr, interp_load_addr);
  167. sp = (elf_addr_t *) (~15UL & (unsigned long) p);
  168. csp = sp;
  169. csp -= exec ? DLINFO_ITEMS*2 : 2;
  170. csp -= envc+1;
  171. csp -= argc+1;
  172. csp -= 1; /* argc itself */
  173. if ((unsigned long)csp & 15UL) {
  174. sp -= (16UL - ((unsigned long)csp & 15UL)) / sizeof(*sp);
  175. }
  176. /*
  177. * Put the ELF interpreter info on the stack
  178. */
  179. #define NEW_AUX_ENT(nr, id, val) \
  180. __put_user((id), sp+(nr*2)); \
  181. __put_user((val), sp+(nr*2+1)); \
  182. sp -= 2;
  183. NEW_AUX_ENT(0, AT_NULL, 0);
  184. if (exec) {
  185. sp -= 11*2;
  186. NEW_AUX_ENT(0, AT_PHDR, load_addr + exec->e_phoff);
  187. NEW_AUX_ENT(1, AT_PHENT, sizeof(struct elf_phdr));
  188. NEW_AUX_ENT(2, AT_PHNUM, exec->e_phnum);
  189. NEW_AUX_ENT(3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
  190. NEW_AUX_ENT(4, AT_BASE, interp_load_addr);
  191. NEW_AUX_ENT(5, AT_FLAGS, 0);
  192. NEW_AUX_ENT(6, AT_ENTRY, (elf_addr_t) exec->e_entry);
  193. NEW_AUX_ENT(7, AT_UID, (elf_addr_t) current->uid);
  194. NEW_AUX_ENT(8, AT_EUID, (elf_addr_t) current->euid);
  195. NEW_AUX_ENT(9, AT_GID, (elf_addr_t) current->gid);
  196. NEW_AUX_ENT(10, AT_EGID, (elf_addr_t) current->egid);
  197. }
  198. #undef NEW_AUX_ENT
  199. sp -= envc+1;
  200. envp = sp;
  201. sp -= argc+1;
  202. argv = sp;
  203. __put_user((elf_addr_t)argc, --sp);
  204. current->mm->arg_start = (unsigned long) p;
  205. while (argc-->0) {
  206. __put_user((unsigned long)p, argv++);
  207. p += strlen_user(p);
  208. }
  209. __put_user((unsigned long) NULL, argv);
  210. current->mm->arg_end = current->mm->env_start = (unsigned long) p;
  211. while (envc-->0) {
  212. __put_user((unsigned long)p, envp++);
  213. p += strlen_user(p);
  214. }
  215. __put_user((unsigned long) NULL, envp);
  216. current->mm->env_end = (unsigned long) p;
  217. return sp;
  218. }
  219. /* This is much more generalized than the library routine read function,
  220. * so we keep this separate. Technically the library read function
  221. * is only provided so that we can read a.out libraries that have
  222. * an ELF header.
  223. */
  224. static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
  225. struct file * interpreter,
  226. unsigned int *interp_load_addr)
  227. {
  228. struct elf_phdr *elf_phdata = NULL;
  229. struct elf_phdr *eppnt;
  230. unsigned int len;
  231. unsigned int load_addr;
  232. int elf_bss;
  233. int retval;
  234. unsigned int last_bss;
  235. int error;
  236. int i;
  237. unsigned int k;
  238. elf_bss = 0;
  239. last_bss = 0;
  240. error = load_addr = 0;
  241. print_elfhdr(interp_elf_ex);
  242. /* First of all, some simple consistency checks */
  243. if ((interp_elf_ex->e_type != ET_EXEC &&
  244. interp_elf_ex->e_type != ET_DYN) ||
  245. !interpreter->f_op->mmap) {
  246. printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type);
  247. return 0xffffffff;
  248. }
  249. /* Now read in all of the header information */
  250. if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
  251. printk("IRIX interp header bigger than a page (%d)\n",
  252. (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
  253. return 0xffffffff;
  254. }
  255. elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
  256. GFP_KERNEL);
  257. if (!elf_phdata) {
  258. printk("Cannot kmalloc phdata for IRIX interp.\n");
  259. return 0xffffffff;
  260. }
  261. /* If the size of this structure has changed, then punt, since
  262. * we will be doing the wrong thing.
  263. */
  264. if (interp_elf_ex->e_phentsize != 32) {
  265. printk("IRIX interp e_phentsize == %d != 32 ",
  266. interp_elf_ex->e_phentsize);
  267. kfree(elf_phdata);
  268. return 0xffffffff;
  269. }
  270. retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
  271. (char *) elf_phdata,
  272. sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
  273. dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
  274. eppnt = elf_phdata;
  275. for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
  276. if (eppnt->p_type == PT_LOAD) {
  277. int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
  278. int elf_prot = 0;
  279. unsigned long vaddr = 0;
  280. if (eppnt->p_flags & PF_R)
  281. elf_prot = PROT_READ;
  282. if (eppnt->p_flags & PF_W)
  283. elf_prot |= PROT_WRITE;
  284. if (eppnt->p_flags & PF_X)
  285. elf_prot |= PROT_EXEC;
  286. elf_type |= MAP_FIXED;
  287. vaddr = eppnt->p_vaddr;
  288. pr_debug("INTERP do_mmap"
  289. "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
  290. interpreter, vaddr,
  291. (unsigned long)
  292. (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
  293. (unsigned long)
  294. elf_prot, (unsigned long) elf_type,
  295. (unsigned long)
  296. (eppnt->p_offset & 0xfffff000));
  297. down_write(&current->mm->mmap_sem);
  298. error = do_mmap(interpreter, vaddr,
  299. eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
  300. elf_prot, elf_type,
  301. eppnt->p_offset & 0xfffff000);
  302. up_write(&current->mm->mmap_sem);
  303. if (error < 0 && error > -1024) {
  304. printk("Aieee IRIX interp mmap error=%d\n",
  305. error);
  306. break; /* Real error */
  307. }
  308. pr_debug("error=%08lx ", (unsigned long) error);
  309. if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
  310. load_addr = error;
  311. pr_debug("load_addr = error ");
  312. }
  313. /*
  314. * Find the end of the file mapping for this phdr, and
  315. * keep track of the largest address we see for this.
  316. */
  317. k = eppnt->p_vaddr + eppnt->p_filesz;
  318. if (k > elf_bss)
  319. elf_bss = k;
  320. /* Do the same thing for the memory mapping - between
  321. * elf_bss and last_bss is the bss section.
  322. */
  323. k = eppnt->p_memsz + eppnt->p_vaddr;
  324. if (k > last_bss)
  325. last_bss = k;
  326. pr_debug("\n");
  327. }
  328. }
  329. /* Now use mmap to map the library into memory. */
  330. if (error < 0 && error > -1024) {
  331. pr_debug("got error %d\n", error);
  332. kfree(elf_phdata);
  333. return 0xffffffff;
  334. }
  335. /* Now fill out the bss section. First pad the last page up
  336. * to the page boundary, and then perform a mmap to make sure
  337. * that there are zero-mapped pages up to and including the
  338. * last bss page.
  339. */
  340. pr_debug("padzero(%08lx) ", (unsigned long) (elf_bss));
  341. padzero(elf_bss);
  342. len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */
  343. pr_debug("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
  344. (unsigned long) len);
  345. /* Map the last of the bss segment */
  346. if (last_bss > len) {
  347. down_write(&current->mm->mmap_sem);
  348. do_brk(len, (last_bss - len));
  349. up_write(&current->mm->mmap_sem);
  350. }
  351. kfree(elf_phdata);
  352. *interp_load_addr = load_addr;
  353. return ((unsigned int) interp_elf_ex->e_entry);
  354. }
  355. /* Check sanity of IRIX elf executable header. */
  356. static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
  357. {
  358. if (memcmp(ehp->e_ident, ELFMAG, SELFMAG) != 0)
  359. return -ENOEXEC;
  360. /* First of all, some simple consistency checks */
  361. if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
  362. !bprm->file->f_op->mmap) {
  363. return -ENOEXEC;
  364. }
  365. /* XXX Don't support N32 or 64bit binaries yet because they can
  366. * XXX and do execute 64 bit instructions and expect all registers
  367. * XXX to be 64 bit as well. We need to make the kernel save
  368. * XXX all registers as 64bits on cpu's capable of this at
  369. * XXX exception time plus frob the XTLB exception vector.
  370. */
  371. if ((ehp->e_flags & EF_MIPS_ABI2))
  372. return -ENOEXEC;
  373. return 0;
  374. }
  375. /*
  376. * This is where the detailed check is performed. Irix binaries
  377. * use interpreters with 'libc.so' in the name, so this function
  378. * can differentiate between Linux and Irix binaries.
  379. */
  380. static inline int look_for_irix_interpreter(char **name,
  381. struct file **interpreter,
  382. struct elfhdr *interp_elf_ex,
  383. struct elf_phdr *epp,
  384. struct linux_binprm *bprm, int pnum)
  385. {
  386. int i;
  387. int retval = -EINVAL;
  388. struct file *file = NULL;
  389. *name = NULL;
  390. for (i = 0; i < pnum; i++, epp++) {
  391. if (epp->p_type != PT_INTERP)
  392. continue;
  393. /* It is illegal to have two interpreters for one executable. */
  394. if (*name != NULL)
  395. goto out;
  396. *name = kmalloc(epp->p_filesz + strlen(IRIX_EMUL), GFP_KERNEL);
  397. if (!*name)
  398. return -ENOMEM;
  399. strcpy(*name, IRIX_EMUL);
  400. retval = kernel_read(bprm->file, epp->p_offset, (*name + 16),
  401. epp->p_filesz);
  402. if (retval < 0)
  403. goto out;
  404. file = open_exec(*name);
  405. if (IS_ERR(file)) {
  406. retval = PTR_ERR(file);
  407. goto out;
  408. }
  409. retval = kernel_read(file, 0, bprm->buf, 128);
  410. if (retval < 0)
  411. goto dput_and_out;
  412. *interp_elf_ex = *(struct elfhdr *) bprm->buf;
  413. }
  414. *interpreter = file;
  415. return 0;
  416. dput_and_out:
  417. fput(file);
  418. out:
  419. kfree(*name);
  420. return retval;
  421. }
  422. static inline int verify_irix_interpreter(struct elfhdr *ihp)
  423. {
  424. if (memcmp(ihp->e_ident, ELFMAG, SELFMAG) != 0)
  425. return -ELIBBAD;
  426. return 0;
  427. }
  428. #define EXEC_MAP_FLAGS (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE)
  429. static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnum,
  430. unsigned int *estack, unsigned int *laddr,
  431. unsigned int *scode, unsigned int *ebss,
  432. unsigned int *ecode, unsigned int *edata,
  433. unsigned int *ebrk)
  434. {
  435. unsigned int tmp;
  436. int i, prot;
  437. for (i = 0; i < pnum; i++, epp++) {
  438. if (epp->p_type != PT_LOAD)
  439. continue;
  440. /* Map it. */
  441. prot = (epp->p_flags & PF_R) ? PROT_READ : 0;
  442. prot |= (epp->p_flags & PF_W) ? PROT_WRITE : 0;
  443. prot |= (epp->p_flags & PF_X) ? PROT_EXEC : 0;
  444. down_write(&current->mm->mmap_sem);
  445. (void) do_mmap(fp, (epp->p_vaddr & 0xfffff000),
  446. (epp->p_filesz + (epp->p_vaddr & 0xfff)),
  447. prot, EXEC_MAP_FLAGS,
  448. (epp->p_offset & 0xfffff000));
  449. up_write(&current->mm->mmap_sem);
  450. /* Fixup location tracking vars. */
  451. if ((epp->p_vaddr & 0xfffff000) < *estack)
  452. *estack = (epp->p_vaddr & 0xfffff000);
  453. if (!*laddr)
  454. *laddr = epp->p_vaddr - epp->p_offset;
  455. if (epp->p_vaddr < *scode)
  456. *scode = epp->p_vaddr;
  457. tmp = epp->p_vaddr + epp->p_filesz;
  458. if (tmp > *ebss)
  459. *ebss = tmp;
  460. if ((epp->p_flags & PF_X) && *ecode < tmp)
  461. *ecode = tmp;
  462. if (*edata < tmp)
  463. *edata = tmp;
  464. tmp = epp->p_vaddr + epp->p_memsz;
  465. if (tmp > *ebrk)
  466. *ebrk = tmp;
  467. }
  468. }
  469. static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
  470. struct file *interp, unsigned int *iladdr,
  471. int pnum, mm_segment_t old_fs,
  472. unsigned int *eentry)
  473. {
  474. int i;
  475. *eentry = 0xffffffff;
  476. for (i = 0; i < pnum; i++, epp++) {
  477. if (epp->p_type != PT_INTERP)
  478. continue;
  479. /* We should have fielded this error elsewhere... */
  480. if (*eentry != 0xffffffff)
  481. return -1;
  482. set_fs(old_fs);
  483. *eentry = load_irix_interp(ihp, interp, iladdr);
  484. old_fs = get_fs();
  485. set_fs(get_ds());
  486. fput(interp);
  487. if (*eentry == 0xffffffff)
  488. return -1;
  489. }
  490. return 0;
  491. }
  492. /*
  493. * IRIX maps a page at 0x200000 that holds information about the
  494. * process and the system, here we map the page and fill the
  495. * structure
  496. */
  497. static void irix_map_prda_page(void)
  498. {
  499. unsigned long v;
  500. struct prda *pp;
  501. down_write(&current->mm->mmap_sem);
  502. v = do_brk(PRDA_ADDRESS, PAGE_SIZE);
  503. up_write(&current->mm->mmap_sem);
  504. if (v < 0)
  505. return;
  506. pp = (struct prda *) v;
  507. pp->prda_sys.t_pid = current->pid;
  508. pp->prda_sys.t_prid = read_c0_prid();
  509. pp->prda_sys.t_rpid = current->pid;
  510. /* We leave the rest set to zero */
  511. }
  512. /* These are the functions used to load ELF style executables and shared
  513. * libraries. There is no binary dependent code anywhere else.
  514. */
  515. static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
  516. {
  517. struct elfhdr elf_ex, interp_elf_ex;
  518. struct file *interpreter;
  519. struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr;
  520. unsigned int load_addr, elf_bss, elf_brk;
  521. unsigned int elf_entry, interp_load_addr = 0;
  522. unsigned int start_code, end_code, end_data, elf_stack;
  523. int retval, has_interp, has_ephdr, size, i;
  524. char *elf_interpreter;
  525. mm_segment_t old_fs;
  526. load_addr = 0;
  527. has_interp = has_ephdr = 0;
  528. elf_ihdr = elf_ephdr = NULL;
  529. elf_ex = *((struct elfhdr *) bprm->buf);
  530. retval = -ENOEXEC;
  531. if (verify_binary(&elf_ex, bprm))
  532. goto out;
  533. /*
  534. * Telling -o32 static binaries from Linux and Irix apart from each
  535. * other is difficult. There are 2 differences to be noted for static
  536. * binaries from the 2 operating systems:
  537. *
  538. * 1) Irix binaries have their .text section before their .init
  539. * section. Linux binaries are just the opposite.
  540. *
  541. * 2) Irix binaries usually have <= 12 sections and Linux
  542. * binaries have > 20.
  543. *
  544. * We will use Method #2 since Method #1 would require us to read in
  545. * the section headers which is way too much overhead. This appears
  546. * to work for everything we have ran into so far. If anyone has a
  547. * better method to tell the binaries apart, I'm listening.
  548. */
  549. if (elf_ex.e_shnum > 20)
  550. goto out;
  551. print_elfhdr(&elf_ex);
  552. /* Now read in all of the header information */
  553. size = elf_ex.e_phentsize * elf_ex.e_phnum;
  554. if (size > 65536)
  555. goto out;
  556. elf_phdata = kmalloc(size, GFP_KERNEL);
  557. if (elf_phdata == NULL) {
  558. retval = -ENOMEM;
  559. goto out;
  560. }
  561. retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size);
  562. if (retval < 0)
  563. goto out_free_ph;
  564. dump_phdrs(elf_phdata, elf_ex.e_phnum);
  565. /* Set some things for later. */
  566. for (i = 0; i < elf_ex.e_phnum; i++) {
  567. switch (elf_phdata[i].p_type) {
  568. case PT_INTERP:
  569. has_interp = 1;
  570. elf_ihdr = &elf_phdata[i];
  571. break;
  572. case PT_PHDR:
  573. has_ephdr = 1;
  574. elf_ephdr = &elf_phdata[i];
  575. break;
  576. };
  577. }
  578. pr_debug("\n");
  579. elf_bss = 0;
  580. elf_brk = 0;
  581. elf_stack = 0xffffffff;
  582. elf_interpreter = NULL;
  583. start_code = 0xffffffff;
  584. end_code = 0;
  585. end_data = 0;
  586. /*
  587. * If we get a return value, we change the value to be ENOEXEC
  588. * so that we can exit gracefully and the main binary format
  589. * search loop in 'fs/exec.c' will move onto the next handler
  590. * which should be the normal ELF binary handler.
  591. */
  592. retval = look_for_irix_interpreter(&elf_interpreter, &interpreter,
  593. &interp_elf_ex, elf_phdata, bprm,
  594. elf_ex.e_phnum);
  595. if (retval) {
  596. retval = -ENOEXEC;
  597. goto out_free_file;
  598. }
  599. if (elf_interpreter) {
  600. retval = verify_irix_interpreter(&interp_elf_ex);
  601. if (retval)
  602. goto out_free_interp;
  603. }
  604. /* OK, we are done with that, now set up the arg stuff,
  605. * and then start this sucker up.
  606. */
  607. retval = -E2BIG;
  608. if (!bprm->sh_bang && !bprm->p)
  609. goto out_free_interp;
  610. /* Flush all traces of the currently running executable */
  611. retval = flush_old_exec(bprm);
  612. if (retval)
  613. goto out_free_dentry;
  614. /* OK, This is the point of no return */
  615. current->mm->end_data = 0;
  616. current->mm->end_code = 0;
  617. current->mm->mmap = NULL;
  618. current->flags &= ~PF_FORKNOEXEC;
  619. elf_entry = (unsigned int) elf_ex.e_entry;
  620. /* Do this so that we can load the interpreter, if need be. We will
  621. * change some of these later.
  622. */
  623. setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
  624. current->mm->start_stack = bprm->p;
  625. /* At this point, we assume that the image should be loaded at
  626. * fixed address, not at a variable address.
  627. */
  628. old_fs = get_fs();
  629. set_fs(get_ds());
  630. map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack,
  631. &load_addr, &start_code, &elf_bss, &end_code,
  632. &end_data, &elf_brk);
  633. if (elf_interpreter) {
  634. retval = map_interpreter(elf_phdata, &interp_elf_ex,
  635. interpreter, &interp_load_addr,
  636. elf_ex.e_phnum, old_fs, &elf_entry);
  637. kfree(elf_interpreter);
  638. if (retval) {
  639. set_fs(old_fs);
  640. printk("Unable to load IRIX ELF interpreter\n");
  641. send_sig(SIGSEGV, current, 0);
  642. retval = 0;
  643. goto out_free_file;
  644. }
  645. }
  646. set_fs(old_fs);
  647. kfree(elf_phdata);
  648. set_personality(PER_IRIX32);
  649. set_binfmt(&irix_format);
  650. compute_creds(bprm);
  651. current->flags &= ~PF_FORKNOEXEC;
  652. bprm->p = (unsigned long)
  653. create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc,
  654. (elf_interpreter ? &elf_ex : NULL),
  655. load_addr, interp_load_addr, regs, elf_ephdr);
  656. current->mm->start_brk = current->mm->brk = elf_brk;
  657. current->mm->end_code = end_code;
  658. current->mm->start_code = start_code;
  659. current->mm->end_data = end_data;
  660. current->mm->start_stack = bprm->p;
  661. /* Calling set_brk effectively mmaps the pages that we need for the
  662. * bss and break sections.
  663. */
  664. set_brk(elf_bss, elf_brk);
  665. /*
  666. * IRIX maps a page at 0x200000 which holds some system
  667. * information. Programs depend on this.
  668. */
  669. irix_map_prda_page();
  670. padzero(elf_bss);
  671. pr_debug("(start_brk) %lx\n" , (long) current->mm->start_brk);
  672. pr_debug("(end_code) %lx\n" , (long) current->mm->end_code);
  673. pr_debug("(start_code) %lx\n" , (long) current->mm->start_code);
  674. pr_debug("(end_data) %lx\n" , (long) current->mm->end_data);
  675. pr_debug("(start_stack) %lx\n" , (long) current->mm->start_stack);
  676. pr_debug("(brk) %lx\n" , (long) current->mm->brk);
  677. #if 0 /* XXX No fucking way dude... */
  678. /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
  679. * and some applications "depend" upon this behavior.
  680. * Since we do not have the power to recompile these, we
  681. * emulate the SVr4 behavior. Sigh.
  682. */
  683. down_write(&current->mm->mmap_sem);
  684. (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
  685. MAP_FIXED | MAP_PRIVATE, 0);
  686. up_write(&current->mm->mmap_sem);
  687. #endif
  688. start_thread(regs, elf_entry, bprm->p);
  689. if (current->ptrace & PT_PTRACED)
  690. send_sig(SIGTRAP, current, 0);
  691. return 0;
  692. out:
  693. return retval;
  694. out_free_dentry:
  695. allow_write_access(interpreter);
  696. fput(interpreter);
  697. out_free_interp:
  698. kfree(elf_interpreter);
  699. out_free_file:
  700. out_free_ph:
  701. kfree(elf_phdata);
  702. goto out;
  703. }
  704. /* This is really simpleminded and specialized - we are loading an
  705. * a.out library that is given an ELF header.
  706. */
  707. static int load_irix_library(struct file *file)
  708. {
  709. struct elfhdr elf_ex;
  710. struct elf_phdr *elf_phdata = NULL;
  711. unsigned int len = 0;
  712. int elf_bss = 0;
  713. int retval;
  714. unsigned int bss;
  715. int error;
  716. int i, j, k;
  717. error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
  718. if (error != sizeof(elf_ex))
  719. return -ENOEXEC;
  720. if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
  721. return -ENOEXEC;
  722. /* First of all, some simple consistency checks. */
  723. if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
  724. !file->f_op->mmap)
  725. return -ENOEXEC;
  726. /* Now read in all of the header information. */
  727. if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
  728. return -ENOEXEC;
  729. elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
  730. if (elf_phdata == NULL)
  731. return -ENOMEM;
  732. retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata,
  733. sizeof(struct elf_phdr) * elf_ex.e_phnum);
  734. j = 0;
  735. for (i=0; i<elf_ex.e_phnum; i++)
  736. if ((elf_phdata + i)->p_type == PT_LOAD) j++;
  737. if (j != 1) {
  738. kfree(elf_phdata);
  739. return -ENOEXEC;
  740. }
  741. while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
  742. /* Now use mmap to map the library into memory. */
  743. down_write(&current->mm->mmap_sem);
  744. error = do_mmap(file,
  745. elf_phdata->p_vaddr & 0xfffff000,
  746. elf_phdata->p_filesz + (elf_phdata->p_vaddr & 0xfff),
  747. PROT_READ | PROT_WRITE | PROT_EXEC,
  748. MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
  749. elf_phdata->p_offset & 0xfffff000);
  750. up_write(&current->mm->mmap_sem);
  751. k = elf_phdata->p_vaddr + elf_phdata->p_filesz;
  752. if (k > elf_bss) elf_bss = k;
  753. if (error != (elf_phdata->p_vaddr & 0xfffff000)) {
  754. kfree(elf_phdata);
  755. return error;
  756. }
  757. padzero(elf_bss);
  758. len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000;
  759. bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
  760. if (bss > len) {
  761. down_write(&current->mm->mmap_sem);
  762. do_brk(len, bss-len);
  763. up_write(&current->mm->mmap_sem);
  764. }
  765. kfree(elf_phdata);
  766. return 0;
  767. }
  768. /* Called through irix_syssgi() to map an elf image given an FD,
  769. * a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many
  770. * phdrs there are in the USER_PHDRP array. We return the vaddr the
  771. * first phdr was successfully mapped to.
  772. */
  773. unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
  774. {
  775. unsigned long type, vaddr, filesz, offset, flags;
  776. struct elf_phdr __user *hp;
  777. struct file *filp;
  778. int i, retval;
  779. pr_debug("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n",
  780. fd, user_phdrp, cnt);
  781. /* First get the verification out of the way. */
  782. hp = user_phdrp;
  783. if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) {
  784. pr_debug("irix_mapelf: bad pointer to ELF PHDR!\n");
  785. return -EFAULT;
  786. }
  787. dump_phdrs(user_phdrp, cnt);
  788. for (i = 0; i < cnt; i++, hp++) {
  789. if (__get_user(type, &hp->p_type))
  790. return -EFAULT;
  791. if (type != PT_LOAD) {
  792. printk("irix_mapelf: One section is not PT_LOAD!\n");
  793. return -ENOEXEC;
  794. }
  795. }
  796. filp = fget(fd);
  797. if (!filp)
  798. return -EACCES;
  799. if (!filp->f_op) {
  800. printk("irix_mapelf: Bogon filp!\n");
  801. fput(filp);
  802. return -EACCES;
  803. }
  804. hp = user_phdrp;
  805. for (i = 0; i < cnt; i++, hp++) {
  806. int prot;
  807. retval = __get_user(vaddr, &hp->p_vaddr);
  808. retval |= __get_user(filesz, &hp->p_filesz);
  809. retval |= __get_user(offset, &hp->p_offset);
  810. retval |= __get_user(flags, &hp->p_flags);
  811. if (retval)
  812. return retval;
  813. prot = (flags & PF_R) ? PROT_READ : 0;
  814. prot |= (flags & PF_W) ? PROT_WRITE : 0;
  815. prot |= (flags & PF_X) ? PROT_EXEC : 0;
  816. down_write(&current->mm->mmap_sem);
  817. retval = do_mmap(filp, (vaddr & 0xfffff000),
  818. (filesz + (vaddr & 0xfff)),
  819. prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
  820. (offset & 0xfffff000));
  821. up_write(&current->mm->mmap_sem);
  822. if (retval != (vaddr & 0xfffff000)) {
  823. printk("irix_mapelf: do_mmap fails with %d!\n", retval);
  824. fput(filp);
  825. return retval;
  826. }
  827. }
  828. pr_debug("irix_mapelf: Success, returning %08lx\n",
  829. (unsigned long) user_phdrp->p_vaddr);
  830. fput(filp);
  831. if (__get_user(vaddr, &user_phdrp->p_vaddr))
  832. return -EFAULT;
  833. return vaddr;
  834. }
  835. /*
  836. * ELF core dumper
  837. *
  838. * Modelled on fs/exec.c:aout_core_dump()
  839. * Jeremy Fitzhardinge <jeremy@sw.oz.au>
  840. */
  841. /* These are the only things you should do on a core-file: use only these
  842. * functions to write out all the necessary info.
  843. */
  844. static int dump_write(struct file *file, const void __user *addr, int nr)
  845. {
  846. return file->f_op->write(file, (const char __user *) addr, nr, &file->f_pos) == nr;
  847. }
  848. static int dump_seek(struct file *file, off_t off)
  849. {
  850. if (file->f_op->llseek) {
  851. if (file->f_op->llseek(file, off, 0) != off)
  852. return 0;
  853. } else
  854. file->f_pos = off;
  855. return 1;
  856. }
  857. /* Decide whether a segment is worth dumping; default is yes to be
  858. * sure (missing info is worse than too much; etc).
  859. * Personally I'd include everything, and use the coredump limit...
  860. *
  861. * I think we should skip something. But I am not sure how. H.J.
  862. */
  863. static inline int maydump(struct vm_area_struct *vma)
  864. {
  865. if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
  866. return 0;
  867. #if 1
  868. if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
  869. return 1;
  870. if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
  871. return 0;
  872. #endif
  873. return 1;
  874. }
  875. /* An ELF note in memory. */
  876. struct memelfnote
  877. {
  878. const char *name;
  879. int type;
  880. unsigned int datasz;
  881. void *data;
  882. };
  883. static int notesize(struct memelfnote *en)
  884. {
  885. int sz;
  886. sz = sizeof(struct elf_note);
  887. sz += roundup(strlen(en->name) + 1, 4);
  888. sz += roundup(en->datasz, 4);
  889. return sz;
  890. }
  891. #define DUMP_WRITE(addr, nr) \
  892. if (!dump_write(file, (addr), (nr))) \
  893. goto end_coredump;
  894. #define DUMP_SEEK(off) \
  895. if (!dump_seek(file, (off))) \
  896. goto end_coredump;
  897. static int writenote(struct memelfnote *men, struct file *file)
  898. {
  899. struct elf_note en;
  900. en.n_namesz = strlen(men->name) + 1;
  901. en.n_descsz = men->datasz;
  902. en.n_type = men->type;
  903. DUMP_WRITE(&en, sizeof(en));
  904. DUMP_WRITE(men->name, en.n_namesz);
  905. /* XXX - cast from long long to long to avoid need for libgcc.a */
  906. DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
  907. DUMP_WRITE(men->data, men->datasz);
  908. DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
  909. return 1;
  910. end_coredump:
  911. return 0;
  912. }
  913. #undef DUMP_WRITE
  914. #undef DUMP_SEEK
  915. #define DUMP_WRITE(addr, nr) \
  916. if (!dump_write(file, (addr), (nr))) \
  917. goto end_coredump;
  918. #define DUMP_SEEK(off) \
  919. if (!dump_seek(file, (off))) \
  920. goto end_coredump;
  921. /* Actual dumper.
  922. *
  923. * This is a two-pass process; first we find the offsets of the bits,
  924. * and then they are actually written out. If we run out of core limit
  925. * we just truncate.
  926. */
  927. static int irix_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
  928. {
  929. int has_dumped = 0;
  930. mm_segment_t fs;
  931. int segs;
  932. int i;
  933. size_t size;
  934. struct vm_area_struct *vma;
  935. struct elfhdr elf;
  936. off_t offset = 0, dataoff;
  937. int numnote = 3;
  938. struct memelfnote notes[3];
  939. struct elf_prstatus prstatus; /* NT_PRSTATUS */
  940. elf_fpregset_t fpu; /* NT_PRFPREG */
  941. struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
  942. /* Count what's needed to dump, up to the limit of coredump size. */
  943. segs = 0;
  944. size = 0;
  945. for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
  946. if (maydump(vma))
  947. {
  948. int sz = vma->vm_end-vma->vm_start;
  949. if (size+sz >= limit)
  950. break;
  951. else
  952. size += sz;
  953. }
  954. segs++;
  955. }
  956. pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
  957. /* Set up header. */
  958. memcpy(elf.e_ident, ELFMAG, SELFMAG);
  959. elf.e_ident[EI_CLASS] = ELFCLASS32;
  960. elf.e_ident[EI_DATA] = ELFDATA2LSB;
  961. elf.e_ident[EI_VERSION] = EV_CURRENT;
  962. elf.e_ident[EI_OSABI] = ELF_OSABI;
  963. memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
  964. elf.e_type = ET_CORE;
  965. elf.e_machine = ELF_ARCH;
  966. elf.e_version = EV_CURRENT;
  967. elf.e_entry = 0;
  968. elf.e_phoff = sizeof(elf);
  969. elf.e_shoff = 0;
  970. elf.e_flags = 0;
  971. elf.e_ehsize = sizeof(elf);
  972. elf.e_phentsize = sizeof(struct elf_phdr);
  973. elf.e_phnum = segs+1; /* Include notes. */
  974. elf.e_shentsize = 0;
  975. elf.e_shnum = 0;
  976. elf.e_shstrndx = 0;
  977. fs = get_fs();
  978. set_fs(KERNEL_DS);
  979. has_dumped = 1;
  980. current->flags |= PF_DUMPCORE;
  981. DUMP_WRITE(&elf, sizeof(elf));
  982. offset += sizeof(elf); /* Elf header. */
  983. offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers. */
  984. /* Set up the notes in similar form to SVR4 core dumps made
  985. * with info from their /proc.
  986. */
  987. memset(&psinfo, 0, sizeof(psinfo));
  988. memset(&prstatus, 0, sizeof(prstatus));
  989. notes[0].name = "CORE";
  990. notes[0].type = NT_PRSTATUS;
  991. notes[0].datasz = sizeof(prstatus);
  992. notes[0].data = &prstatus;
  993. prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
  994. prstatus.pr_sigpend = current->pending.signal.sig[0];
  995. prstatus.pr_sighold = current->blocked.sig[0];
  996. psinfo.pr_pid = prstatus.pr_pid = current->pid;
  997. psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
  998. psinfo.pr_pgrp = prstatus.pr_pgrp = task_pgrp_nr(current);
  999. psinfo.pr_sid = prstatus.pr_sid = task_session_nr(current);
  1000. if (current->pid == current->tgid) {
  1001. /*
  1002. * This is the record for the group leader. Add in the
  1003. * cumulative times of previous dead threads. This total
  1004. * won't include the time of each live thread whose state
  1005. * is included in the core dump. The final total reported
  1006. * to our parent process when it calls wait4 will include
  1007. * those sums as well as the little bit more time it takes
  1008. * this and each other thread to finish dying after the
  1009. * core dump synchronization phase.
  1010. */
  1011. jiffies_to_timeval(current->utime + current->signal->utime,
  1012. &prstatus.pr_utime);
  1013. jiffies_to_timeval(current->stime + current->signal->stime,
  1014. &prstatus.pr_stime);
  1015. } else {
  1016. jiffies_to_timeval(current->utime, &prstatus.pr_utime);
  1017. jiffies_to_timeval(current->stime, &prstatus.pr_stime);
  1018. }
  1019. jiffies_to_timeval(current->signal->cutime, &prstatus.pr_cutime);
  1020. jiffies_to_timeval(current->signal->cstime, &prstatus.pr_cstime);
  1021. if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) {
  1022. printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) "
  1023. "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs));
  1024. } else {
  1025. *(struct pt_regs *)&prstatus.pr_reg = *regs;
  1026. }
  1027. notes[1].name = "CORE";
  1028. notes[1].type = NT_PRPSINFO;
  1029. notes[1].datasz = sizeof(psinfo);
  1030. notes[1].data = &psinfo;
  1031. i = current->state ? ffz(~current->state) + 1 : 0;
  1032. psinfo.pr_state = i;
  1033. psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
  1034. psinfo.pr_zomb = psinfo.pr_sname == 'Z';
  1035. psinfo.pr_nice = task_nice(current);
  1036. psinfo.pr_flag = current->flags;
  1037. psinfo.pr_uid = current->uid;
  1038. psinfo.pr_gid = current->gid;
  1039. {
  1040. int i, len;
  1041. set_fs(fs);
  1042. len = current->mm->arg_end - current->mm->arg_start;
  1043. len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len;
  1044. (void *) copy_from_user(&psinfo.pr_psargs,
  1045. (const char __user *)current->mm->arg_start, len);
  1046. for (i = 0; i < len; i++)
  1047. if (psinfo.pr_psargs[i] == 0)
  1048. psinfo.pr_psargs[i] = ' ';
  1049. psinfo.pr_psargs[len] = 0;
  1050. set_fs(KERNEL_DS);
  1051. }
  1052. strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
  1053. /* Try to dump the FPU. */
  1054. prstatus.pr_fpvalid = dump_fpu(regs, &fpu);
  1055. if (!prstatus.pr_fpvalid) {
  1056. numnote--;
  1057. } else {
  1058. notes[2].name = "CORE";
  1059. notes[2].type = NT_PRFPREG;
  1060. notes[2].datasz = sizeof(fpu);
  1061. notes[2].data = &fpu;
  1062. }
  1063. /* Write notes phdr entry. */
  1064. {
  1065. struct elf_phdr phdr;
  1066. int sz = 0;
  1067. for (i = 0; i < numnote; i++)
  1068. sz += notesize(&notes[i]);
  1069. phdr.p_type = PT_NOTE;
  1070. phdr.p_offset = offset;
  1071. phdr.p_vaddr = 0;
  1072. phdr.p_paddr = 0;
  1073. phdr.p_filesz = sz;
  1074. phdr.p_memsz = 0;
  1075. phdr.p_flags = 0;
  1076. phdr.p_align = 0;
  1077. offset += phdr.p_filesz;
  1078. DUMP_WRITE(&phdr, sizeof(phdr));
  1079. }
  1080. /* Page-align dumped data. */
  1081. dataoff = offset = roundup(offset, PAGE_SIZE);
  1082. /* Write program headers for segments dump. */
  1083. for (vma = current->mm->mmap, i = 0;
  1084. i < segs && vma != NULL; vma = vma->vm_next) {
  1085. struct elf_phdr phdr;
  1086. size_t sz;
  1087. i++;
  1088. sz = vma->vm_end - vma->vm_start;
  1089. phdr.p_type = PT_LOAD;
  1090. phdr.p_offset = offset;
  1091. phdr.p_vaddr = vma->vm_start;
  1092. phdr.p_paddr = 0;
  1093. phdr.p_filesz = maydump(vma) ? sz : 0;
  1094. phdr.p_memsz = sz;
  1095. offset += phdr.p_filesz;
  1096. phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
  1097. if (vma->vm_flags & VM_WRITE)
  1098. phdr.p_flags |= PF_W;
  1099. if (vma->vm_flags & VM_EXEC)
  1100. phdr.p_flags |= PF_X;
  1101. phdr.p_align = PAGE_SIZE;
  1102. DUMP_WRITE(&phdr, sizeof(phdr));
  1103. }
  1104. for (i = 0; i < numnote; i++)
  1105. if (!writenote(&notes[i], file))
  1106. goto end_coredump;
  1107. set_fs(fs);
  1108. DUMP_SEEK(dataoff);
  1109. for (i = 0, vma = current->mm->mmap;
  1110. i < segs && vma != NULL;
  1111. vma = vma->vm_next) {
  1112. unsigned long addr = vma->vm_start;
  1113. unsigned long len = vma->vm_end - vma->vm_start;
  1114. if (!maydump(vma))
  1115. continue;
  1116. i++;
  1117. pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
  1118. DUMP_WRITE((void __user *)addr, len);
  1119. }
  1120. if ((off_t) file->f_pos != offset) {
  1121. /* Sanity check. */
  1122. printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
  1123. (off_t) file->f_pos, offset);
  1124. }
  1125. end_coredump:
  1126. set_fs(fs);
  1127. return has_dumped;
  1128. }
  1129. static int __init init_irix_binfmt(void)
  1130. {
  1131. extern int init_inventory(void);
  1132. extern asmlinkage unsigned long sys_call_table;
  1133. extern asmlinkage unsigned long sys_call_table_irix5;
  1134. init_inventory();
  1135. /*
  1136. * Copy the IRIX5 syscall table (8000 bytes) into the main syscall
  1137. * table. The IRIX5 calls are located by an offset of 8000 bytes
  1138. * from the beginning of the main table.
  1139. */
  1140. memcpy((void *) ((unsigned long) &sys_call_table + 8000),
  1141. &sys_call_table_irix5, 8000);
  1142. return register_binfmt(&irix_format);
  1143. }
  1144. static void __exit exit_irix_binfmt(void)
  1145. {
  1146. /*
  1147. * Remove the Irix ELF loader.
  1148. */
  1149. unregister_binfmt(&irix_format);
  1150. }
  1151. module_init(init_irix_binfmt)
  1152. module_exit(exit_irix_binfmt)